Compare commits
392 commits
devel
...
stable-2.2
Author | SHA1 | Date | |
---|---|---|---|
|
611d34acf3 | ||
|
e72b41d3eb | ||
|
ebae05adf5 | ||
|
b08dc0c056 | ||
|
90623b1ffb | ||
|
0b16e9257a | ||
|
df14dceb1f | ||
|
6c82b90587 | ||
|
5d0ccb68af | ||
|
672a8b3ec9 | ||
|
4d585c9035 | ||
|
d189b668cb | ||
|
55339cd0b4 | ||
|
df551246a3 | ||
|
6e3f5404f9 | ||
|
6e474bb995 | ||
|
9adfb2d6f5 | ||
|
0cd22c730c | ||
|
9e6fc2b182 | ||
|
f5be18f409 | ||
|
da260cb013 | ||
|
f0e348f5ee | ||
|
f779227859 | ||
|
341ffef08e | ||
|
d600266d52 | ||
|
0feb231a9c | ||
|
8899f5e068 | ||
|
ad6ccf8ea9 | ||
|
7b6557a23b | ||
|
7cc351a237 | ||
|
34511d1fc9 | ||
|
3f8c56a7e3 | ||
|
b12b5c376d | ||
|
792832b53e | ||
|
6ee3a42673 | ||
|
7ff9fa52cf | ||
|
4a852ec287 | ||
|
df3e71a83f | ||
|
42afc7dc45 | ||
|
04f65a4d29 | ||
|
9b71a6a0b0 | ||
|
972660968a | ||
|
2273800f7c | ||
|
31300bd36b | ||
|
147c29f0e9 | ||
|
de331a9488 | ||
|
36b462ba85 | ||
|
b33c41dc5f | ||
|
8b8f0597ed | ||
|
3ba057d28d | ||
|
ac4ce95810 | ||
|
dae298e429 | ||
|
c40d9e63bb | ||
|
75c04b6773 | ||
|
6ca528aea4 | ||
|
1a6e27a6ac | ||
|
d45f2d3288 | ||
|
e9e39e4fd6 | ||
|
1eda29bfa8 | ||
|
6791061395 | ||
|
694011a897 | ||
|
cfd57fcae2 | ||
|
097dbb2daf | ||
|
c459f87a42 | ||
|
547dcf4b9e | ||
|
f47356f5c7 | ||
|
fe3ede881d | ||
|
5dcce0666a | ||
|
c92ce0c2ca | ||
|
1262e5fdca | ||
|
cb93ecaef9 | ||
|
6176c95838 | ||
|
ef24d56c8f | ||
|
f1217a9b94 | ||
|
9029547603 | ||
|
81e96c754a | ||
|
c4b09cbcb4 | ||
|
ef1ecfd2c2 | ||
|
39acdde5aa | ||
|
7b2fcb2d4e | ||
|
4e257fad84 | ||
|
3551de98c6 | ||
|
a0104cfe81 | ||
|
5f5d143194 | ||
|
04b2e1fbc0 | ||
|
65ee9d2e46 | ||
|
75de5737f7 | ||
|
21106cc95b | ||
|
89fdca0929 | ||
|
7164956cc6 | ||
|
0240ffe220 | ||
|
db22594955 | ||
|
9e9d202d6f | ||
|
b2942f1084 | ||
|
26f5e4b32d | ||
|
e85f3f4461 | ||
|
f4ecbc4c3f | ||
|
b12256acf2 | ||
|
f649a87a19 | ||
|
2f0dba4f36 | ||
|
6a9572e1e5 | ||
|
5362910000 | ||
|
009ac7b65e | ||
|
c6ef74d81b | ||
|
0c153146e3 | ||
|
86eadc5814 | ||
|
4b495d7e43 | ||
|
c846e915f8 | ||
|
a93e7506a8 | ||
|
49fc0cfe4f | ||
|
d1c5a39420 | ||
|
06ed25e788 | ||
|
b3daa9dd64 | ||
|
6dee2b21e6 | ||
|
cc4634a5e7 | ||
|
eb8c26c105 | ||
|
240c388e6c | ||
|
941552d107 | ||
|
d8c9b8d347 | ||
|
ec84ff6de6 | ||
|
56de9d8ae7 | ||
|
3e2f6e5094 | ||
|
577ea88f78 | ||
|
7e10994b6d | ||
|
f6a3c4f071 | ||
|
8439d1813e | ||
|
e223349edc | ||
|
b25c06725a | ||
|
22f7ca8c97 | ||
|
a0a2392c87 | ||
|
3c4ac877f6 | ||
|
38bf7ab71f | ||
|
47e16bef08 | ||
|
e05222a4cc | ||
|
32dad09d2e | ||
|
a04d0f485b | ||
|
7683715caf | ||
|
3c7987f3a4 | ||
|
3e72e0c173 | ||
|
10a3053525 | ||
|
29762c87c8 | ||
|
477043c422 | ||
|
f89abc705b | ||
|
2d8ebbfe8c | ||
|
e1b459470d | ||
|
1bdf25561a | ||
|
b1e44d1195 | ||
|
8ad67b44dc | ||
|
cfbb58adae | ||
|
f90a6439c4 | ||
|
d8449b3013 | ||
|
1168524f22 | ||
|
99472c42e3 | ||
|
00378515e2 | ||
|
2639016847 | ||
|
be07fcc6d9 | ||
|
268645c17a | ||
|
e715221a66 | ||
|
fe33c937c4 | ||
|
cbf1f23e7e | ||
|
0db1c77041 | ||
|
ddc8d3d988 | ||
|
3beac89893 | ||
|
abc0eeac02 | ||
|
886f8d224e | ||
|
76be9aa693 | ||
|
acad2ba246 | ||
|
47cd4867a1 | ||
|
69301f2823 | ||
|
6025e97d13 | ||
|
01fa3d3024 | ||
|
18aba3ebec | ||
|
b15e1b743f | ||
|
97444c5e59 | ||
|
4ef8493a11 | ||
|
4854705267 | ||
|
ff60245e2b | ||
|
a7abe4be19 | ||
|
faaabec397 | ||
|
288f6684cf | ||
|
b878e8f0f0 | ||
|
4d7760c0b1 | ||
|
b9a1b2836a | ||
|
8dee7f3138 | ||
|
f4a2332d48 | ||
|
a60a7279d0 | ||
|
f1f6752686 | ||
|
42e0efbbbf | ||
|
ac076dfc12 | ||
|
0bed5d4d85 | ||
|
b39f48121d | ||
|
bf8902f371 | ||
|
27be8a1022 | ||
|
f62224497e | ||
|
529adb574a | ||
|
528426ce0c | ||
|
a34793d7fb | ||
|
a91788e25a | ||
|
74bb122598 | ||
|
66779698f0 | ||
|
d4e8cdc84f | ||
|
ead92bee3d | ||
|
23b5764b57 | ||
|
3ce19f4c58 | ||
|
585c57fca4 | ||
|
aed616ab31 | ||
|
f6b47c53d7 | ||
|
8696ce9e00 | ||
|
d637559825 | ||
|
c2f9846278 | ||
|
35e198a616 | ||
|
0871d955fe | ||
|
159399fea4 | ||
|
47d5f0f0a8 | ||
|
bc539adddc | ||
|
ecbac4cf73 | ||
|
1a247de0b6 | ||
|
30d5d5fa67 | ||
|
39cb6797a3 | ||
|
6f5ec79e91 | ||
|
42fb088807 | ||
|
7602a2a030 | ||
|
413f6ab7f1 | ||
|
832cc5bb5b | ||
|
28883975bf | ||
|
c23d99b786 | ||
|
0cde6fdaca | ||
|
fad7f1de7c | ||
|
6bd4bec9de | ||
|
debfb798dd | ||
|
dfad25bd38 | ||
|
b482cdcf03 | ||
|
c920c8bc3b | ||
|
9499ed5360 | ||
|
caba50c778 | ||
|
58d8a0fca0 | ||
|
aaedf0bd73 | ||
|
366bfe14c3 | ||
|
780d2c4bc4 | ||
|
1cc26cf7cd | ||
|
de7fbd407a | ||
|
9d81ad6423 | ||
|
5bf1269aaf | ||
|
3f25088bf0 | ||
|
09fc911a48 | ||
|
2b9659945c | ||
|
182943f3b3 | ||
|
b229898f80 | ||
|
5c4a4703d9 | ||
|
06599f49eb | ||
|
3a577966ba | ||
|
6824b1ea1e | ||
|
00bdada50e | ||
|
32971e8639 | ||
|
70824e06b5 | ||
|
e13f3e3c07 | ||
|
6adbc7d64a | ||
|
325bf617e9 | ||
|
2c572ba786 | ||
|
503537eb25 | ||
|
cdec853e37 | ||
|
23812ab87d | ||
|
1f80e35312 | ||
|
c07f6d1bdd | ||
|
d559355b29 | ||
|
219a20277f | ||
|
3de9d8373b | ||
|
9d4ce0a94e | ||
|
c5d4134f37 | ||
|
95a8bbdbda | ||
|
1ebc94f290 | ||
|
a2df07ade3 | ||
|
6b603b026c | ||
|
deb1e3ebc7 | ||
|
79e43925b1 | ||
|
0eb23f5a86 | ||
|
066a360a36 | ||
|
731422a6dc | ||
|
96d3f06743 | ||
|
eafb4043c9 | ||
|
35938b907d | ||
|
bab1ac1d5c | ||
|
f3fc029726 | ||
|
e4ebe721f5 | ||
|
2fa12438dd | ||
|
9d82a3aa0c | ||
|
fef9de30d9 | ||
|
5169252641 | ||
|
4d5368e93b | ||
|
02ed599035 | ||
|
bf503e4ff2 | ||
|
9022862624 | ||
|
6c118252b6 | ||
|
e97a00de9e | ||
|
f5240d2953 | ||
|
92c851a894 | ||
|
84485c29ee | ||
|
806fc1ac74 | ||
|
b702d3810e | ||
|
8c6d749ad9 | ||
|
8dbc564fc6 | ||
|
e8c97768b7 | ||
|
90d3824678 | ||
|
5bd6a9b76c | ||
|
a15d3106e9 | ||
|
3ee4effb7a | ||
|
90b06bc8b4 | ||
|
b6e51d670a | ||
|
dcc6a15ce3 | ||
|
25e4398d5b | ||
|
f15ec38788 | ||
|
ddbc01dfe5 | ||
|
6e36d1899c | ||
|
ba8e1f88a9 | ||
|
1dfa0e06eb | ||
|
e9b7d42205 | ||
|
d2998f0811 | ||
|
fa8f9e9ead | ||
|
257182e46a | ||
|
bce9bfce51 | ||
|
cc91c34f36 | ||
|
a0a4d0e3f4 | ||
|
b9e8aa72be | ||
|
fb921042db | ||
|
885b218a7b | ||
|
c5b155ba1a | ||
|
f6295677c9 | ||
|
1e54f424ec | ||
|
b373f67368 | ||
|
924e0726df | ||
|
69ff46b8ca | ||
|
19516d8c19 | ||
|
531023ad1f | ||
|
1af1cca59f | ||
|
dde882c91f | ||
|
7ee14f466e | ||
|
9a9f767857 | ||
|
d4b8178b4b | ||
|
141ed26e02 | ||
|
5bf850568a | ||
|
24d7555d0b | ||
|
d3dd82f3cf | ||
|
e1101f78bd | ||
|
3d3ebbf98e | ||
|
261013354f | ||
|
15f4b83564 | ||
|
747a5ef791 | ||
|
c1a34b5eff | ||
|
b8c25d8f70 | ||
|
05dfed7575 | ||
|
b6e317c045 | ||
|
a94db01b89 | ||
|
6d909bd65c | ||
|
ab3d4731a3 | ||
|
ed9d0cdf4a | ||
|
cb8c28870c | ||
|
2accc28d14 | ||
|
d8155cc4fa | ||
|
edff94f96d | ||
|
1acd258931 | ||
|
7034a34ce4 | ||
|
4cd32ee1ac | ||
|
9f4a656929 | ||
|
3db274ac21 | ||
|
3a822faeae | ||
|
3c9966d6fc | ||
|
2a7f728fdf | ||
|
aafa7ab471 | ||
|
f59430aba8 | ||
|
448cac16db | ||
|
ede5eb78ab | ||
|
7063ed8ceb | ||
|
be6396d5e9 | ||
|
e4efe0b2f1 | ||
|
dea2cabe94 | ||
|
08b646684b | ||
|
255b9364ab | ||
|
9596b9218c | ||
|
66ebe7a461 | ||
|
6a76d7fbef | ||
|
b878c47d5e | ||
|
f7d3ed6eb3 | ||
|
8456686f4b | ||
|
fb4c0a085f | ||
|
6b85c31fdf | ||
|
767dba8f24 | ||
|
5109d50adb | ||
|
f4b1d87ec0 | ||
|
ae52943719 | ||
|
6ba009f913 | ||
|
52173e7707 | ||
|
44faad0593 |
224 changed files with 4918 additions and 1501 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -43,7 +43,7 @@ docsite/htmlout
|
|||
docs-api/rst/
|
||||
docs-api/_build/
|
||||
# deb building stuff...
|
||||
debian/
|
||||
/debian/
|
||||
deb-build
|
||||
# Vim swap files
|
||||
*.swp
|
||||
|
|
95
CHANGELOG.md
95
CHANGELOG.md
|
@ -1,10 +1,88 @@
|
|||
Ansible Changes By Release
|
||||
==========================
|
||||
|
||||
## 2.2 "The Battle of Evermore" - ACTIVE DEVELOPMENT
|
||||
## 2.2.4 "The Battle of Evermore" - TBD
|
||||
* avoid vault view writing to logs
|
||||
* moved htpasswd module to use LooseVersion vs StrictVersion to make usable on Debian
|
||||
* fix for adhoc not obeying callback options
|
||||
|
||||
## 2.2.3 "The Battle of Evermore" - 05-09-2017
|
||||
|
||||
### Major Changes:
|
||||
|
||||
* [SECURITY] (HIGH): fix for CVE-2017-7466, which was caused by an incomplete cherry-picking of commits related to CVE-2016-9587. This can lead to some jinja2 syntax not being stripped out of templated results.
|
||||
* [SECURITY] (MODERATE): fix for CVE-2017-7481, in which data for lookup plugins used as variables was not being correctly marked as "unsafe".
|
||||
|
||||
### Minor Changes:
|
||||
|
||||
* Fixes a bug when using YAML inventory where hosts were not put in the 'all' group, and some other 'ungrouped' issues in inventory.
|
||||
* Fixes a bug when using ansible commands without a tty for stdin.
|
||||
* Split on newlines when searching for become prompt.
|
||||
* Fix crash upon pass prompt in py3 when using the paramiko connection type.
|
||||
|
||||
|
||||
## 2.2.2 "The Battle of Evermore" - 03-27-2017
|
||||
|
||||
### Major Changes:
|
||||
|
||||
* [SECURITY] (HIGH): (continued fix for CVE-2016-9587) Handle some additional corner cases in the way conditionals are parsed and evaluated.
|
||||
* [SECURITY] (LOW): properly filter passwords out of URLs when displaying output from some modules.
|
||||
|
||||
### Minor Changes:
|
||||
|
||||
* Fix azure_rm version checks (#22270).
|
||||
* Fix for traceback when we encounter non-utf8 characters when using --diff.
|
||||
* Ensure ssh hostkey checks respect server port.
|
||||
* Use proper PyYAML classes for safe loading YAML files.
|
||||
* Fix for bug related to when statements for older jinja2 versions.
|
||||
* Fix a bug/traceback when using to_yaml/to_nice_yaml.
|
||||
* Properly clean data of jinja2-like syntax, even if that data came from an unsafe source.
|
||||
* Fix bug regarding undefined entries in HostVars.
|
||||
* Skip fact gathering if the entire play was included via conditional which evaluates to False.
|
||||
* Fixed a performance regression when using a large number of items in a with loop.
|
||||
* Fixed a bug in the way the end of role was detected, which in some cases could cause a role to be run more than once.
|
||||
* Add jinja2 groupby filter override to cast namedtuple to tuple to handle a non-compatible change in jinja2 2.9.4-2.9.5.
|
||||
* Fixed several bugs related to temp directory creation on remote systems when using shell expansions and become privilege escalation.
|
||||
* Fixed a bug related to spliting/parsing the output of a become privilege escalation when looking for a password prompt.
|
||||
* Several unicode/bytes fixes.
|
||||
|
||||
|
||||
## 2.2.1 "The Battle of Evermore" - 01-16-2017
|
||||
|
||||
### Major Changes:
|
||||
|
||||
* Security fix for CVE-2016-9587 - An attacker with control over a client system being managed by Ansible and the ability to send facts back to the Ansible server could use this flaw to execute arbitrary code on the Ansible server as the user and group Ansible is running as.
|
||||
|
||||
### Minor Changes:
|
||||
|
||||
* Fixes a bug where undefined variables in with_* loops would cause a task failure even if the when condition would cause the task to be skipped.
|
||||
* Fixed a bug related to roles where in certain situations a role may be run more than once despite not allowing duplicates.
|
||||
* Fixed some additional bugs related to atomic_move for modules.
|
||||
* Fixes multiple bugs related to field/attribute inheritance in nested blocks and includes, as well as task iteration logic during failures.
|
||||
* Fixed pip installing packages into virtualenvs using the system pip instead of the virtualenv pip.
|
||||
* Fixed dnf on systems with dnf-2.0.x (some changes in the API).
|
||||
* Fixed traceback with dnf install of groups.
|
||||
* Fixes a bug in which include_vars was not working with failed_when.
|
||||
* Fix for include_vars only loading files with .yml, .yaml, and .json extensions. This was only supposed to apply to loading a directory of vars files.
|
||||
* Fixes several bugs related to properly incrementing the failed count in the host statistics.
|
||||
* Fixes a bug with listening handlers which did not specify a `name` field.
|
||||
* Fixes a bug with the `play_hosts` internal variable, so that it properly reflects the current list of hosts.
|
||||
* Fixes a bug related to the v2_playbook_on_start callback method and legacy (v1) plugins.
|
||||
* Fixes an openssh related process exit race condition, related to the fact that connections using ControlPersist do not close stderr.
|
||||
* Improvements and fixes to OpenBSD fact gathering.
|
||||
* Updated `make deb` to use pbuilder. Use `make local_deb` for the previous non-pbuilder build.
|
||||
* Fixed Windows async to avoid blocking due to handle inheritance.
|
||||
* Fixed bugs in the mount module on older Linux kernels and *BSDs
|
||||
* Fix regression in jinja2 include search path.
|
||||
* Various minor fixes for Python 3
|
||||
* Inserted some checks for jinja2-2.9, which can cause some issues with Ansible currently.
|
||||
|
||||
## 2.2 "The Battle of Evermore" - 11-01-2016
|
||||
|
||||
###Major Changes:
|
||||
|
||||
* Security fix for CVE-2016-8628 - Command injection by compromised server via fact variables. In some situations, facts returned by modules could overwrite connection-based facts or some other special variables, leading to injected commands running on the Ansible controller as the user running Ansible (or via escalated permissions).
|
||||
* Security fix for CVE-2016-8614 - apt_key module not properly validating keys in some situations.
|
||||
* Added the `listen` feature for modules. This feature allows tasks to more easily notify multiple handlers, as well as making it easier for handlers from decoupled roles to be notified.
|
||||
* Major performance improvements.
|
||||
* Added support for binary modules
|
||||
|
@ -33,16 +111,15 @@ Ansible Changes By Release
|
|||
* Tech Preview: Work has been done to get Ansible running under Python3. This work is not complete enough to depend upon in production environments but it is enough to begin testing it.
|
||||
* Most of the controller side should now work. Users should be able to run python3 /usr/bin/ansible and python3 /usr/bin/ansible-playbook and have core features of ansible work.
|
||||
* A few of the most essential modules have been audited and are known to work. Others work out of the box.
|
||||
* We are using unit and integration tests to help us port code and not regress later. Even if you are not famiriar with python you can still help by contributing integration tests (just ansible roles) that exercise more of the code to make sure it continues to run on both Python2 and Python3.
|
||||
* We are using unit and integration tests to help us port code and not regress later. Even if you are not familiar with python you can still help by contributing integration tests (just ansible roles) that exercise more of the code to make sure it continues to run on both Python2 and Python3.
|
||||
* scp_if_ssh now supports True, False and "smart". "smart" is the default and will retry failed sftp transfers with scp.
|
||||
* Network:
|
||||
* Refactored all network modules to remove dulicate code and take advantage of Ansiballz implementation
|
||||
* Refactored all network modules to remove duplicate code and take advantage of Ansiballz implementation
|
||||
* All functionality from *_template network modules have been combined into *_config module
|
||||
* Network *_command modules not longer allow configuration mode statements
|
||||
|
||||
####New Modules:
|
||||
- apache2_mod_proxy
|
||||
- archive
|
||||
- asa
|
||||
* asa_acl
|
||||
* asa_command
|
||||
|
@ -283,6 +360,13 @@ Ansible Changes By Release
|
|||
* Fix a problem with the pip module updating the python pip package itself.
|
||||
* ansible_play_hosts is a new magic variable to provide a list of hosts in scope for the current play. Unlike play_hosts it is not subject to the 'serial' keyword.
|
||||
* ansible_play_batch is a new magic variable meant to substitute the current play_hosts.
|
||||
* The subversion module from core now marks its password parameter as no_log so
|
||||
the password is obscured when logging.
|
||||
* The postgresql_lang and postgresql_ext modules from extras now mark
|
||||
login_password as no_log so the password is obscured when logging.
|
||||
* Fix for yum module incorrectly thinking it succeeded in installing packages
|
||||
* Make the default ansible_managed template string into a static string since
|
||||
all of the replacable values lead to non-idempotent behaviour.
|
||||
|
||||
###For custom front ends using the API:
|
||||
* ansible.parsing.vault:
|
||||
|
@ -292,7 +376,7 @@ Ansible Changes By Release
|
|||
Ansible. The feature it was intended to support has now been implemented
|
||||
without using this.
|
||||
* VaultAES, the older, insecure encrypted format that debuted in Ansible-1.5
|
||||
and was relaced by VaultAES256 less than a week later, now has a deprecation
|
||||
and was replaced by VaultAES256 less than a week later, now has a deprecation
|
||||
warning. **It will be removed in 2.3**. In the unlikely event that you
|
||||
wrote a vault file in that 1 week window and have never modified the file
|
||||
since (ansible-vault automatically re-encrypts the file using VaultAES256
|
||||
|
@ -308,7 +392,6 @@ Ansible Changes By Release
|
|||
###Deprecations
|
||||
Notice given that the following will be removed in Ansible 2.4:
|
||||
* Modules
|
||||
* asa_template
|
||||
* eos_template
|
||||
* ios_template
|
||||
* iosxr_template
|
||||
|
|
36
Makefile
36
Makefile
|
@ -47,7 +47,7 @@ else
|
|||
GITINFO = ""
|
||||
endif
|
||||
|
||||
ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1)
|
||||
ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD|DragonFly'),1)
|
||||
DATE := $(shell date -j -r $(shell git log -n 1 --format="%at") +%Y%m%d%H%M)
|
||||
else
|
||||
DATE := $(shell date --utc --date="$(GIT_DATE)" +%Y%m%d%H%M)
|
||||
|
@ -77,6 +77,12 @@ DEB_PPA ?= ppa
|
|||
# Choose the desired Ubuntu release: lucid precise saucy trusty
|
||||
DEB_DIST ?= unstable
|
||||
|
||||
# pbuilder parameters
|
||||
PBUILDER_ARCH ?= amd64
|
||||
PBUILDER_CACHE_DIR = /var/cache/pbuilder
|
||||
PBUILDER_BIN ?= pbuilder
|
||||
PBUILDER_OPTS ?= --debootstrapopts --variant=buildd --architecture $(PBUILDER_ARCH) --debbuildopts -b
|
||||
|
||||
# RPM build parameters
|
||||
RPMSPECDIR= packaging/rpm
|
||||
RPMSPEC = $(RPMSPECDIR)/ansible.spec
|
||||
|
@ -85,6 +91,10 @@ RPMRELEASE = $(RELEASE)
|
|||
ifneq ($(OFFICIAL),yes)
|
||||
RPMRELEASE = 100.git$(DATE)$(GITINFO)
|
||||
endif
|
||||
ifeq ($(PUBLISH),nightly)
|
||||
# https://fedoraproject.org/wiki/Packaging:Versioning#Snapshots
|
||||
RPMRELEASE = $(RELEASE).$(DATE)git.$(GIT_HASH)
|
||||
endif
|
||||
RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)"
|
||||
|
||||
# MOCK build parameters
|
||||
|
@ -176,17 +186,17 @@ sdist_upload: clean docs
|
|||
rpmcommon: $(MANPAGES) sdist
|
||||
@mkdir -p rpm-build
|
||||
@cp dist/*.gz rpm-build/
|
||||
@sed -e 's#^Version:.*#Version: $(VERSION)#' -e 's#^Release:.*#Release: $(RPMRELEASE)%{?dist}#' $(RPMSPEC) >rpm-build/$(NAME).spec
|
||||
@sed -e 's#^Version:.*#Version: $(VERSION)#' -e 's#^Release:.*#Release: $(RPMRELEASE)%{?dist}$(REPOTAG)#' $(RPMSPEC) >rpm-build/$(NAME).spec
|
||||
|
||||
mock-srpm: /etc/mock/$(MOCK_CFG).cfg rpmcommon
|
||||
$(MOCK_BIN) -r $(MOCK_CFG) --resultdir rpm-build/ --buildsrpm --spec rpm-build/$(NAME).spec --sources rpm-build/
|
||||
$(MOCK_BIN) -r $(MOCK_CFG) $(MOCK_ARGS) --resultdir rpm-build/ --buildsrpm --spec rpm-build/$(NAME).spec --sources rpm-build/
|
||||
@echo "#############################################"
|
||||
@echo "Ansible SRPM is built:"
|
||||
@echo rpm-build/*.src.rpm
|
||||
@echo "#############################################"
|
||||
|
||||
mock-rpm: /etc/mock/$(MOCK_CFG).cfg mock-srpm
|
||||
$(MOCK_BIN) -r $(MOCK_CFG) --resultdir rpm-build/ --rebuild rpm-build/$(NAME)-*.src.rpm
|
||||
$(MOCK_BIN) -r $(MOCK_CFG) $(MOCK_ARGS) --resultdir rpm-build/ --rebuild rpm-build/$(NAME)-*.src.rpm
|
||||
@echo "#############################################"
|
||||
@echo "Ansible RPM is built:"
|
||||
@echo rpm-build/*.noarch.rpm
|
||||
|
@ -230,7 +240,23 @@ debian: sdist
|
|||
sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(DEB_RELEASE)|;s|%DIST%|$${DIST}|g;s|%DATE%|$(DEB_DATE)|g" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \
|
||||
done
|
||||
|
||||
deb: debian
|
||||
deb: deb-src
|
||||
@for DIST in $(DEB_DIST) ; do \
|
||||
PBUILDER_OPTS="$(PBUILDER_OPTS) --distribution $${DIST} --basetgz $(PBUILDER_CACHE_DIR)/$${DIST}-$(PBUILDER_ARCH)-base.tgz --buildresult $(CURDIR)/deb-build/$${DIST}" ; \
|
||||
$(PBUILDER_BIN) create $${PBUILDER_OPTS} --othermirror "deb http://archive.ubuntu.com/ubuntu $${DIST} universe" ; \
|
||||
$(PBUILDER_BIN) update $${PBUILDER_OPTS} ; \
|
||||
$(PBUILDER_BIN) build $${PBUILDER_OPTS} deb-build/$${DIST}/$(NAME)_$(VERSION)-$(DEB_RELEASE)~$${DIST}.dsc ; \
|
||||
done
|
||||
@echo "#############################################"
|
||||
@echo "Ansible DEB artifacts:"
|
||||
@for DIST in $(DEB_DIST) ; do \
|
||||
echo deb-build/$${DIST}/$(NAME)_$(VERSION)-$(DEB_RELEASE)~$${DIST}_amd64.changes ; \
|
||||
done
|
||||
@echo "#############################################"
|
||||
|
||||
# Build package outside of pbuilder, with locally installed dependencies.
|
||||
# Install BuildRequires as noted in packaging/debian/control.
|
||||
local_deb: debian
|
||||
@for DIST in $(DEB_DIST) ; do \
|
||||
(cd deb-build/$${DIST}/$(NAME)-$(VERSION)/ && $(DEBUILD) -b) ; \
|
||||
done
|
||||
|
|
20
README.md
20
README.md
|
@ -1,5 +1,5 @@
|
|||
[![PyPI version](https://img.shields.io/pypi/v/ansible.svg)](https://pypi.python.org/pypi/ansible)
|
||||
[![Build Status](https://api.shippable.com/projects/573f79d02a8192902e20e34b/badge?branch=devel)](https://app.shippable.com/projects/573f79d02a8192902e20e34b)
|
||||
[![Build Status](https://api.shippable.com/projects/573f79d02a8192902e20e34b/badge?branch=stable-2.2)](https://app.shippable.com/projects/573f79d02a8192902e20e34b)
|
||||
|
||||
|
||||
Ansible
|
||||
|
@ -7,13 +7,13 @@ Ansible
|
|||
|
||||
Ansible is a radically simple IT automation system. It handles configuration-management, application deployment, cloud provisioning, ad-hoc task-execution, and multinode orchestration - including trivializing things like zero downtime rolling updates with load balancers.
|
||||
|
||||
Read the documentation and more at http://ansible.com/
|
||||
Read the documentation and more at https://ansible.com/
|
||||
|
||||
Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release.
|
||||
|
||||
You can find instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run `git submodule update --init --recursive` after doing a checkout.
|
||||
You can find instructions [here](https://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run `git submodule update --init --recursive` after doing a checkout.
|
||||
|
||||
If you want to download a tarball of a release, go to [releases.ansible.com](http://releases.ansible.com/ansible), though most users use `yum` (using the EPEL instructions linked above), `apt` (using the PPA instructions linked above), or `pip install ansible`.
|
||||
If you want to download a tarball of a release, go to [releases.ansible.com](https://releases.ansible.com/ansible), though most users use `yum` (using the EPEL instructions linked above), `apt` (using the PPA instructions linked above), or `pip install ansible`.
|
||||
|
||||
Design Principles
|
||||
=================
|
||||
|
@ -31,11 +31,11 @@ Design Principles
|
|||
Get Involved
|
||||
============
|
||||
|
||||
* Read [Community Information](http://docs.ansible.com/community.html) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible.
|
||||
* Read [Community Information](https://docs.ansible.com/community.html) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible.
|
||||
* All code submissions are done through pull requests. Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason. If submitting a large code change (other than modules), it's probably a good idea to join ansible-devel and talk about what you would like to do or add first and to avoid duplicate efforts. This not only helps everyone know what's going on, it also helps save time and effort if we decide some changes are needed.
|
||||
* Users list: [ansible-project](http://groups.google.com/group/ansible-project)
|
||||
* Development list: [ansible-devel](http://groups.google.com/group/ansible-devel)
|
||||
* Announcement list: [ansible-announce](http://groups.google.com/group/ansible-announce) - read only
|
||||
* Users list: [ansible-project](https://groups.google.com/group/ansible-project)
|
||||
* Development list: [ansible-devel](https://groups.google.com/group/ansible-devel)
|
||||
* Announcement list: [ansible-announce](https://groups.google.com/group/ansible-announce) - read only
|
||||
* irc.freenode.net: #ansible
|
||||
|
||||
Branch Info
|
||||
|
@ -45,13 +45,13 @@ Branch Info
|
|||
* The devel branch corresponds to the release actively under development.
|
||||
* As of 1.8, modules are kept in different repos, you'll want to follow [core](https://github.com/ansible/ansible-modules-core) and [extras](https://github.com/ansible/ansible-modules-extras)
|
||||
* Various release-X.Y branches exist for previous releases.
|
||||
* We'd love to have your contributions, read [Community Information](http://docs.ansible.com/community.html) for notes on how to get started.
|
||||
* We'd love to have your contributions, read [Community Information](https://docs.ansible.com/community.html) for notes on how to get started.
|
||||
|
||||
Authors
|
||||
=======
|
||||
|
||||
Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 1000 users (and growing). Thanks everyone!
|
||||
|
||||
Ansible is sponsored by [Ansible, Inc](http://ansible.com)
|
||||
Ansible is sponsored by [Ansible, Inc](https://ansible.com)
|
||||
|
||||
|
||||
|
|
147
RELEASES.txt
147
RELEASES.txt
|
@ -1,74 +1,81 @@
|
|||
Ansible Releases at a Glance
|
||||
============================
|
||||
|
||||
Active Development
|
||||
++++++++++++++++++
|
||||
|
||||
2.2 TBD - in progress
|
||||
|
||||
Released
|
||||
++++++++
|
||||
|
||||
2.1.0 "The Song Remains the Same" in progress
|
||||
2.0.2 "Over the Hills and Far Away" 04-19-2016
|
||||
2.0.1 "Over the Hills and Far Away" 02-24-2016
|
||||
2.0.0 "Over the Hills and Far Away" 01-12-2016
|
||||
1.9.6 "Dancing In the Streets" 04-15-2016
|
||||
1.9.5 "Dancing In the Streets" 03-21-2016
|
||||
1.9.4 "Dancing In the Streets" 10-09-2015
|
||||
1.9.3 "Dancing In the Streets" 09-03-2015
|
||||
1.9.2 "Dancing In the Streets" 06-24-2015
|
||||
1.9.1 "Dancing In the Streets" 04-27-2015
|
||||
1.9.0 "Dancing In the Streets" 03-25-2015
|
||||
1.8.4 "You Really Got Me" ---- 02-19-2015
|
||||
1.8.3 "You Really Got Me" ---- 02-17-2015
|
||||
1.8.2 "You Really Got Me" ---- 12-04-2014
|
||||
1.8.1 "You Really Got Me" ---- 11-26-2014
|
||||
1.7.2 "Summer Nights" -------- 09-24-2014
|
||||
1.7.1 "Summer Nights" -------- 08-14-2014
|
||||
1.7 "Summer Nights" -------- 08-06-2014
|
||||
1.6.10 "The Cradle Will Rock" - 07-25-2014
|
||||
1.6.9 "The Cradle Will Rock" - 07-24-2014
|
||||
1.6.8 "The Cradle Will Rock" - 07-22-2014
|
||||
1.6.7 "The Cradle Will Rock" - 07-21-2014
|
||||
1.6.6 "The Cradle Will Rock" - 07-01-2014
|
||||
1.6.5 "The Cradle Will Rock" - 06-25-2014
|
||||
1.6.4 "The Cradle Will Rock" - 06-25-2014
|
||||
1.6.3 "The Cradle Will Rock" - 06-09-2014
|
||||
1.6.2 "The Cradle Will Rock" - 05-23-2014
|
||||
1.6.1 "The Cradle Will Rock" - 05-07-2014
|
||||
1.6 "The Cradle Will Rock" - 05-05-2014
|
||||
1.5.5 "Love Walks In" -------- 04-18-2014
|
||||
1.5.4 "Love Walks In" -------- 04-01-2014
|
||||
1.5.3 "Love Walks In" -------- 03-13-2014
|
||||
1.5.2 "Love Walks In" -------- 03-11-2014
|
||||
1.5.1 "Love Walks In" -------- 03-10-2014
|
||||
1.5 "Love Walks In" -------- 02-28-2014
|
||||
1.4.5 "Could This Be Magic?" - 02-12-2014
|
||||
1.4.4 "Could This Be Magic?" - 01-06-2014
|
||||
1.4.3 "Could This Be Magic?" - 12-20-2013
|
||||
1.4.2 "Could This Be Magic?" - 12-18-2013
|
||||
1.4.1 "Could This Be Magic?" - 11-27-2013
|
||||
1.4 "Could This Be Magic?" - 11-21-2013
|
||||
1.3.4 "Top of the World" ----- 10-29-2013
|
||||
1.3.3 "Top of the World" ----- 10-09-2013
|
||||
1.3.2 "Top of the World" ----- 09-19-2013
|
||||
1.3.1 "Top of the World" ----- 09-16-2013
|
||||
1.3 "Top of the World" ----- 09-13-2013
|
||||
1.2.3 "Hear About It Later" -- 08-21-2013
|
||||
1.2.2 "Hear About It Later" -- 07-05-2013
|
||||
1.2.1 "Hear About It Later" -- 07-04-2013
|
||||
1.2 "Right Now" ------------ 06-10-2013
|
||||
1.1 "Mean Street" ---------- 04-02-2013
|
||||
1.0 "Eruption" ------------- 02-01-2013
|
||||
0.9 "Dreams" --------------- 11-30-2012
|
||||
0.8 "Cathedral" ------------ 10-19-2012
|
||||
0.7 "Panama" --------------- 09-06-2012
|
||||
0.6 "Cabo" ----------------- 08-06-2012
|
||||
0.5 "Amsterdam" ------------ 07-04-2012
|
||||
0.4 "Unchained" ------------ 05-23-2012
|
||||
0.3 "Baluchitherium" ------- 04-23-2012
|
||||
0.0.2 Untitled
|
||||
0.0.1 Untitled
|
||||
|
||||
VERSION RELEASE CODE NAME
|
||||
++++++++++++++++++++++++++++++
|
||||
|
||||
2.4.0 TBD "Dancing Days"
|
||||
2.3.0 04-12-2017 "Ramble On"
|
||||
2.2.3 05-09-2017 "The Battle of Evermore"
|
||||
2.2.2 03-27-2017 "The Battle of Evermore"
|
||||
2.2.1 01-16-2017 "The Battle of Evermore"
|
||||
2.2.0 11-01-2016 "The Battle of Evermore"
|
||||
2.1.5 03-27-2017 "The Song Remains the Same"
|
||||
2.1.4 01-16-2017 "The Song Remains the Same"
|
||||
2.1.3 11-04-2016 "The Song Remains the Same"
|
||||
2.1.2 09-29-2016 "The Song Remains the Same"
|
||||
2.1.1 07-28-2016 "The Song Remains the Same"
|
||||
2.1.0 05-25-2016 "The Song Remains the Same"
|
||||
2.0.2 04-19-2016 "Over the Hills and Far Away"
|
||||
2.0.1 02-24-2016 "Over the Hills and Far Away"
|
||||
2.0.0 01-12-2016 "Over the Hills and Far Away"
|
||||
1.9.6 04-15-2016 "Dancing In the Streets"
|
||||
1.9.5 03-21-2016 "Dancing In the Streets"
|
||||
1.9.4 10-09-2015 "Dancing In the Streets"
|
||||
1.9.3 09-03-2015 "Dancing In the Streets"
|
||||
1.9.2 06-24-2015 "Dancing In the Streets"
|
||||
1.9.1 04-27-2015 "Dancing In the Streets"
|
||||
1.9.0 03-25-2015 "Dancing In the Streets"
|
||||
1.8.4 02-19-2015 "You Really Got Me"
|
||||
1.8.3 02-17-2015 "You Really Got Me"
|
||||
1.8.2 12-04-2014 "You Really Got Me"
|
||||
1.8.1 11-26-2014 "You Really Got Me"
|
||||
1.8.0 11-25-2014 "You Really Got Me"
|
||||
1.7.2 09-24-2014 "Summer Nights"
|
||||
1.7.1 08-14-2014 "Summer Nights"
|
||||
1.7.0 08-06-2014 "Summer Nights"
|
||||
1.6.10 07-25-2014 "The Cradle Will Rock"
|
||||
1.6.9 07-24-2014 "The Cradle Will Rock"
|
||||
1.6.8 07-22-2014 "The Cradle Will Rock"
|
||||
1.6.7 07-21-2014 "The Cradle Will Rock"
|
||||
1.6.6 07-01-2014 "The Cradle Will Rock"
|
||||
1.6.5 06-25-2014 "The Cradle Will Rock"
|
||||
1.6.4 06-25-2014 "The Cradle Will Rock"
|
||||
1.6.3 06-09-2014 "The Cradle Will Rock"
|
||||
1.6.2 05-23-2014 "The Cradle Will Rock"
|
||||
1.6.1 05-07-2014 "The Cradle Will Rock"
|
||||
1.6.0 05-05-2014 "The Cradle Will Rock"
|
||||
1.5.5 04-18-2014 "Love Walks In"
|
||||
1.5.4 04-01-2014 "Love Walks In"
|
||||
1.5.3 03-13-2014 "Love Walks In"
|
||||
1.5.2 03-11-2014 "Love Walks In"
|
||||
1.5.1 03-10-2014 "Love Walks In"
|
||||
1.5.0 02-28-2014 "Love Walks In"
|
||||
1.4.5 02-12-2014 "Could This Be Magic?"
|
||||
1.4.4 01-06-2014 "Could This Be Magic?"
|
||||
1.4.3 12-20-2013 "Could This Be Magic?"
|
||||
1.4.2 12-18-2013 "Could This Be Magic?"
|
||||
1.4.1 11-27-2013 "Could This Be Magic?"
|
||||
1.4.0 11-21-2013 "Could This Be Magic?"
|
||||
1.3.4 10-29-2013 "Top of the World"
|
||||
1.3.3 10-09-2013 "Top of the World"
|
||||
1.3.2 09-19-2013 "Top of the World"
|
||||
1.3.1 09-16-2013 "Top of the World"
|
||||
1.3.0 09-13-2013 "Top of the World"
|
||||
1.2.3 08-21-2013 "Right Now"
|
||||
1.2.2 07-05-2013 "Right Now"
|
||||
1.2.1 07-04-2013 "Right Now"
|
||||
1.2.0 06-10-2013 "Right Now"
|
||||
1.1.0 04-02-2013 "Mean Street"
|
||||
1.0.0 02-01-2013 "Eruption"
|
||||
0.9.0 11-30-2012 "Dreams"
|
||||
0.8.0 10-19-2012 "Cathedral"
|
||||
0.7.0 09-06-2012 "Panama"
|
||||
0.6.0 08-06-2012 "Cabo"
|
||||
0.5.0 07-04-2012 "Amsterdam"
|
||||
0.4.0 05-23-2012 "Unchained"
|
||||
0.3.0 04-23-2012 "Baluchitherium"
|
||||
0.2.0 ? "Untitled"
|
||||
0.1.0 ? "Untitled"
|
||||
0.0.2 ? "Untitled"
|
||||
0.0.1 ? "Untitled"
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.2.0 0.0.devel
|
||||
2.2.3.0 1
|
||||
|
|
|
@ -126,9 +126,11 @@ if __name__ == '__main__':
|
|||
have_cli_options = cli is not None and cli.options is not None
|
||||
display.error("Unexpected Exception: %s" % to_text(e), wrap_text=False)
|
||||
if not have_cli_options or have_cli_options and cli.options.verbosity > 2:
|
||||
display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()))
|
||||
log_only = False
|
||||
else:
|
||||
display.display("to see the full traceback, use -vvv")
|
||||
log_only = True
|
||||
display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only)
|
||||
exit_code = 250
|
||||
finally:
|
||||
# Remove ansible tempdir
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
Azure External Inventory Script
|
||||
===============================
|
||||
Generates dynamic inventory by making API requests to the Azure Resource
|
||||
Manager using the AAzure Python SDK. For instruction on installing the
|
||||
Manager using the Azure Python SDK. For instruction on installing the
|
||||
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
|
||||
|
||||
Authentication
|
||||
|
@ -32,7 +32,7 @@ The order of precedence is command line arguments, environment variables,
|
|||
and finally the [default] profile found in ~/.azure/credentials.
|
||||
|
||||
If using a credentials file, it should be an ini formatted file with one or
|
||||
more sections, which we refer to as profiles. The script looks for a
|
||||
more sections, which we refer to as profiles. The script looks for a
|
||||
[default] section, if a profile is not specified either on the command line
|
||||
or with an environment variable. The keys in a profile will match the
|
||||
list of command line arguments below.
|
||||
|
@ -42,7 +42,7 @@ in your ~/.azure/credentials file, or a service principal or Active Directory
|
|||
user.
|
||||
|
||||
Command line arguments:
|
||||
- profile
|
||||
- profile
|
||||
- client_id
|
||||
- secret
|
||||
- subscription_id
|
||||
|
@ -61,7 +61,7 @@ Environment variables:
|
|||
|
||||
Run for Specific Host
|
||||
-----------------------
|
||||
When run for a specific host using the --host option, a resource group is
|
||||
When run for a specific host using the --host option, a resource group is
|
||||
required. For a specific host, this script returns the following variables:
|
||||
|
||||
{
|
||||
|
@ -191,7 +191,7 @@ import os
|
|||
import re
|
||||
import sys
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from packaging.version import Version
|
||||
|
||||
from os.path import expanduser
|
||||
|
||||
|
@ -309,7 +309,7 @@ class AzureRM(object):
|
|||
|
||||
def _get_env_credentials(self):
|
||||
env_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
env_credentials[attribute] = os.environ.get(env_variable, None)
|
||||
|
||||
if env_credentials['profile'] is not None:
|
||||
|
@ -328,7 +328,7 @@ class AzureRM(object):
|
|||
self.log('Getting credentials')
|
||||
|
||||
arg_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
arg_credentials[attribute] = getattr(params, attribute)
|
||||
|
||||
# try module params
|
||||
|
@ -362,7 +362,11 @@ class AzureRM(object):
|
|||
resource_client = self.rm_client
|
||||
resource_client.providers.register(key)
|
||||
except Exception as exc:
|
||||
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
self.log("You might need to register {0} using an admin account".format(key))
|
||||
self.log(("To register a provider using the Python CLI: "
|
||||
"https://docs.microsoft.com/azure/azure-resource-manager/"
|
||||
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
|
||||
|
||||
@property
|
||||
def network_client(self):
|
||||
|
@ -442,7 +446,7 @@ class AzureInventory(object):
|
|||
def _parse_cli_args(self):
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
|
@ -664,7 +668,7 @@ class AzureInventory(object):
|
|||
self._inventory['azure'].append(host_name)
|
||||
|
||||
if self.group_by_tag and vars.get('tags'):
|
||||
for key, value in vars['tags'].iteritems():
|
||||
for key, value in vars['tags'].items():
|
||||
safe_key = self._to_safe(key)
|
||||
safe_value = safe_key + '_' + self._to_safe(value)
|
||||
if not self._inventory.get(safe_key):
|
||||
|
@ -724,7 +728,7 @@ class AzureInventory(object):
|
|||
|
||||
def _get_env_settings(self):
|
||||
env_settings = dict()
|
||||
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
|
||||
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
|
||||
env_settings[attribute] = os.environ.get(env_variable, None)
|
||||
return env_settings
|
||||
|
||||
|
@ -786,11 +790,11 @@ class AzureInventory(object):
|
|||
|
||||
def main():
|
||||
if not HAS_AZURE:
|
||||
sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
|
||||
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
|
||||
|
||||
if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION):
|
||||
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
|
||||
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
|
||||
"Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
|
||||
AzureInventory()
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
[collins]
|
||||
|
||||
# You should not have a trailing slash or collins
|
||||
# will not properly match the URI
|
||||
host = http://localhost:9000
|
||||
|
||||
username = blake
|
||||
|
|
|
@ -201,7 +201,8 @@ class CollinsInventory(object):
|
|||
response = open_url(query_url,
|
||||
timeout=self.collins_timeout_secs,
|
||||
url_username=self.collins_username,
|
||||
url_password=self.collins_password)
|
||||
url_password=self.collins_password,
|
||||
force_basic_auth=True)
|
||||
json_response = json.loads(response.read())
|
||||
# Adds any assets found to the array of assets.
|
||||
assets += json_response['data']['Data']
|
||||
|
|
|
@ -260,7 +260,7 @@ or environment variables (DO_API_TOKEN)\n''')
|
|||
|
||||
# Private IP Address
|
||||
if config.has_option('digital_ocean', 'use_private_network'):
|
||||
self.use_private_network = config.get('digital_ocean', 'use_private_network')
|
||||
self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')
|
||||
|
||||
# Group variables
|
||||
if config.has_option('digital_ocean', 'group_variables'):
|
||||
|
|
|
@ -1313,7 +1313,7 @@ class Ec2Inventory(object):
|
|||
elif key == 'ec2_tags':
|
||||
for k, v in value.items():
|
||||
if self.expand_csv_tags and ',' in v:
|
||||
v = map(lambda x: x.strip(), v.split(','))
|
||||
v = list(map(lambda x: x.strip(), v.split(',')))
|
||||
key = self.to_safe('ec2_tag_' + k)
|
||||
instance_vars[key] = v
|
||||
elif key == 'ec2_groups':
|
||||
|
|
|
@ -261,7 +261,7 @@ class GceInventory(object):
|
|||
if inst is None:
|
||||
return {}
|
||||
|
||||
if inst.extra['metadata'].has_key('items'):
|
||||
if 'items' in inst.extra['metadata']:
|
||||
for entry in inst.extra['metadata']['items']:
|
||||
md[entry['key']] = entry['value']
|
||||
|
||||
|
@ -326,7 +326,7 @@ class GceInventory(object):
|
|||
if zones and zone not in zones:
|
||||
continue
|
||||
|
||||
if groups.has_key(zone): groups[zone].append(name)
|
||||
if zone in groups: groups[zone].append(name)
|
||||
else: groups[zone] = [name]
|
||||
|
||||
tags = node.extra['tags']
|
||||
|
@ -335,25 +335,25 @@ class GceInventory(object):
|
|||
tag = t[6:]
|
||||
else:
|
||||
tag = 'tag_%s' % t
|
||||
if groups.has_key(tag): groups[tag].append(name)
|
||||
if tag in groups: groups[tag].append(name)
|
||||
else: groups[tag] = [name]
|
||||
|
||||
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
net = 'network_%s' % net
|
||||
if groups.has_key(net): groups[net].append(name)
|
||||
if net in groups: groups[net].append(name)
|
||||
else: groups[net] = [name]
|
||||
|
||||
machine_type = node.size
|
||||
if groups.has_key(machine_type): groups[machine_type].append(name)
|
||||
if machine_type in groups: groups[machine_type].append(name)
|
||||
else: groups[machine_type] = [name]
|
||||
|
||||
image = node.image and node.image or 'persistent_disk'
|
||||
if groups.has_key(image): groups[image].append(name)
|
||||
if image in groups: groups[image].append(name)
|
||||
else: groups[image] = [name]
|
||||
|
||||
status = node.extra['status']
|
||||
stat = 'status_%s' % status.lower()
|
||||
if groups.has_key(stat): groups[stat].append(name)
|
||||
if stat in groups: groups[stat].append(name)
|
||||
else: groups[stat] = [name]
|
||||
|
||||
groups["_meta"] = meta
|
||||
|
|
|
@ -26,7 +26,7 @@ import re
|
|||
import os
|
||||
import ConfigParser
|
||||
from novaclient import client as nova_client
|
||||
from six import iteritems
|
||||
from six import iteritems, itervalues
|
||||
|
||||
try:
|
||||
import json
|
||||
|
@ -105,7 +105,7 @@ def get_ips(server, access_ip=True):
|
|||
# Iterate through each servers network(s), get addresses and get type
|
||||
addresses = getattr(server, 'addresses', {})
|
||||
if len(addresses) > 0:
|
||||
for network in addresses.itervalues():
|
||||
for network in itervalues(addresses):
|
||||
for address in network:
|
||||
if address.get('OS-EXT-IPS:type', False) == 'fixed':
|
||||
private.append(address['addr'])
|
||||
|
|
|
@ -201,7 +201,7 @@ class NSoTInventory(object):
|
|||
_inventory_group()
|
||||
'''
|
||||
inventory = dict()
|
||||
for group, contents in self.config.iteritems():
|
||||
for group, contents in self.config.items():
|
||||
group_response = self._inventory_group(group, contents)
|
||||
inventory.update(group_response)
|
||||
inventory.update({'_meta': self._meta})
|
||||
|
|
|
@ -211,7 +211,7 @@ class OVirtInventory(object):
|
|||
'ovirt_uuid': inst.get_id(),
|
||||
'ovirt_id': inst.get_id(),
|
||||
'ovirt_image': inst.get_os().get_type(),
|
||||
'ovirt_machine_type': inst.get_instance_type(),
|
||||
'ovirt_machine_type': self.get_machine_type(inst),
|
||||
'ovirt_ips': ips,
|
||||
'ovirt_name': inst.get_name(),
|
||||
'ovirt_description': inst.get_description(),
|
||||
|
@ -230,6 +230,11 @@ class OVirtInventory(object):
|
|||
"""
|
||||
return [x.get_name() for x in inst.get_tags().list()]
|
||||
|
||||
def get_machine_type(self,inst):
|
||||
inst_type = inst.get_instance_type()
|
||||
if inst_type:
|
||||
return self.driver.instancetypes.get(id=inst_type.id).name
|
||||
|
||||
# noinspection PyBroadException,PyUnusedLocal
|
||||
def get_instance(self, instance_name):
|
||||
"""Gets details about a specific instance """
|
||||
|
|
|
@ -14,7 +14,7 @@ class RackhdInventory(object):
|
|||
for nodeid in nodeids:
|
||||
self._load_inventory_data(nodeid)
|
||||
inventory = {}
|
||||
for nodeid,info in self._inventory.iteritems():
|
||||
for nodeid,info in self._inventory.items():
|
||||
inventory[nodeid]= (self._format_output(nodeid, info))
|
||||
print(json.dumps(inventory))
|
||||
|
||||
|
@ -24,7 +24,7 @@ class RackhdInventory(object):
|
|||
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
|
||||
|
||||
results = {}
|
||||
for key,url in info.iteritems():
|
||||
for key,url in info.items():
|
||||
r = requests.get( url, verify=False)
|
||||
results[key] = r.text
|
||||
self._inventory[nodeid] = results
|
||||
|
@ -36,7 +36,7 @@ class RackhdInventory(object):
|
|||
if len(node_info) > 0:
|
||||
ipaddress = node_info[0]['ipAddress']
|
||||
output = { 'hosts':[ipaddress],'vars':{}}
|
||||
for key,result in info.iteritems():
|
||||
for key,result in info.items():
|
||||
output['vars'][key] = json.loads(result)
|
||||
output['vars']['ansible_ssh_user'] = 'monorail'
|
||||
except KeyError:
|
||||
|
|
|
@ -210,7 +210,7 @@ class VMWareInventory(object):
|
|||
config.read(vmware_ini_path)
|
||||
|
||||
# apply defaults
|
||||
for k,v in defaults['vmware'].iteritems():
|
||||
for k,v in defaults['vmware'].items():
|
||||
if not config.has_option('vmware', k):
|
||||
config.set('vmware', k, str(v))
|
||||
|
||||
|
@ -356,7 +356,7 @@ class VMWareInventory(object):
|
|||
|
||||
|
||||
# Reset the inventory keys
|
||||
for k,v in name_mapping.iteritems():
|
||||
for k,v in name_mapping.items():
|
||||
|
||||
if not host_mapping or not k in host_mapping:
|
||||
continue
|
||||
|
@ -389,7 +389,7 @@ class VMWareInventory(object):
|
|||
continue
|
||||
self.debugl('# filter: %s' % hf)
|
||||
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
|
||||
for k,v in filter_map.iteritems():
|
||||
for k,v in filter_map.items():
|
||||
if not v:
|
||||
# delete this host
|
||||
inventory['all']['hosts'].remove(k)
|
||||
|
@ -402,7 +402,7 @@ class VMWareInventory(object):
|
|||
# Create groups
|
||||
for gbp in self.groupby_patterns:
|
||||
groupby_map = self.create_template_mapping(inventory, gbp)
|
||||
for k,v in groupby_map.iteritems():
|
||||
for k,v in groupby_map.items():
|
||||
if v not in inventory:
|
||||
inventory[v] = {}
|
||||
inventory[v]['hosts'] = []
|
||||
|
@ -417,7 +417,7 @@ class VMWareInventory(object):
|
|||
''' Return a hash of uuid to templated string from pattern '''
|
||||
|
||||
mapping = {}
|
||||
for k,v in inventory['_meta']['hostvars'].iteritems():
|
||||
for k,v in inventory['_meta']['hostvars'].items():
|
||||
t = jinja2.Template(pattern)
|
||||
newkey = None
|
||||
try:
|
||||
|
|
|
@ -251,7 +251,7 @@ Tower Support Questions
|
|||
|
||||
Ansible `Tower <http://ansible.com/tower>`_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc.
|
||||
|
||||
If you have a question about Tower, email `support@ansible.com <mailto:support@ansible.com>`_ rather than using the IRC
|
||||
If you have a question about Tower, visit `support.ansible.com <https://support.ansible.com/>`_ rather than using the IRC
|
||||
channel or the general project mailing list.
|
||||
|
||||
IRC Channel
|
||||
|
|
|
@ -85,6 +85,20 @@ different locations::
|
|||
Most users will not need to use this feature. See :doc:`developing_plugins` for more details.
|
||||
|
||||
|
||||
.. _allow_unsafe_lookups:
|
||||
|
||||
allow_unsafe_lookups
|
||||
====================
|
||||
|
||||
.. versionadded:: 2.2.3, 2.3.1
|
||||
|
||||
When enabled, this option allows lookup plugins (whether used in variables as `{{lookup('foo')}}` or as a loop as `with_foo`) to return data that is **not** marked "unsafe". By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk.
|
||||
|
||||
This option is provided to allow for backwards-compatibility, however users should first consider adding `allow_unsafe=True` to any lookups which may be expected to contain data which may be run through the templating engine later. For example::
|
||||
|
||||
{{lookup('pipe', '/path/to/some/command', allow_unsafe=True)}}
|
||||
|
||||
|
||||
.. _allow_world_readable_tmpfiles:
|
||||
|
||||
allow_world_readable_tmpfiles
|
||||
|
@ -443,6 +457,20 @@ implications and wish to disable it, you may do so here by setting the value to
|
|||
|
||||
host_key_checking=True
|
||||
|
||||
.. _internal_poll_interval:
|
||||
|
||||
internal_poll_interval
|
||||
======================
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
This sets the interval (in seconds) of Ansible internal processes polling each other.
|
||||
Lower values improve performance with large playbooks at the expense of extra CPU load.
|
||||
Higher values are more suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage might be a concern.
|
||||
Default corresponds to the value hardcoded in Ansible ≤ 2.1::
|
||||
|
||||
internal_poll_interval=0.001
|
||||
|
||||
.. _inventory_file:
|
||||
|
||||
inventory
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
#inventory = /etc/ansible/hosts
|
||||
#library = /usr/share/my_modules/
|
||||
#remote_tmp = $HOME/.ansible/tmp
|
||||
#local_tmp = $HOME/.ansible/tmp
|
||||
#remote_tmp = ~/.ansible/tmp
|
||||
#local_tmp = ~/.ansible/tmp
|
||||
#forks = 5
|
||||
#poll_interval = 15
|
||||
#sudo_user = root
|
||||
|
@ -70,6 +70,9 @@
|
|||
#task_includes_static = True
|
||||
#handler_includes_static = True
|
||||
|
||||
# Controls if a missing handler for a notification event is an error or a warning
|
||||
#error_on_missing_handler = True
|
||||
|
||||
# change this for alternative sudo implementations
|
||||
#sudo_exe = sudo
|
||||
|
||||
|
@ -121,8 +124,9 @@
|
|||
# templates indicates to users editing templates files will be replaced.
|
||||
# replacing {file}, {host} and {uid} and strftime codes with proper values.
|
||||
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
|
||||
# This short version is better used in templates as it won't flag the file as changed every run.
|
||||
#ansible_managed = Ansible managed: {file} on {host}
|
||||
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
|
||||
# in some situations so the default is a static string:
|
||||
#ansible_managed = Ansible managed
|
||||
|
||||
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
|
||||
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
|
||||
|
@ -261,6 +265,12 @@
|
|||
# set to 0 for unlimited (RAM may suffer!).
|
||||
#max_diff_size = 1048576
|
||||
|
||||
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
|
||||
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
|
||||
# jinja2 templating language which will be run through the templating engine.
|
||||
# ENABLING THIS COULD BE A SECURITY RISK
|
||||
#allow_unsafe_lookups = False
|
||||
|
||||
[privilege_escalation]
|
||||
#become=True
|
||||
#become_method=sudo
|
||||
|
|
|
@ -4,16 +4,16 @@
|
|||
set HACKING_DIR (dirname (status -f))
|
||||
set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
|
||||
set ANSIBLE_HOME (dirname $FULL_PATH)
|
||||
set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
|
||||
set PREFIX_PATH $ANSIBLE_HOME/bin
|
||||
set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
|
||||
set PREFIX_PATH $ANSIBLE_HOME/bin
|
||||
set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
|
||||
|
||||
# set quiet flag
|
||||
if set -q argv
|
||||
if test (count $argv) -ge 1
|
||||
switch $argv
|
||||
case '-q' '--quiet'
|
||||
set QUIET "true"
|
||||
case '*'
|
||||
case '-q' '--quiet'
|
||||
set QUIET "true"
|
||||
case '*'
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -49,15 +49,14 @@ set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
|
|||
|
||||
# Generate egg_info so that pkg_resources works
|
||||
pushd $ANSIBLE_HOME
|
||||
if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
end
|
||||
if [ $QUIET ]
|
||||
python setup.py -q egg_info
|
||||
else
|
||||
python setup.py egg_info
|
||||
end
|
||||
if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
end
|
||||
mv ansible*egg-info $PREFIX_PYTHONPATH
|
||||
find . -type f -name "*.pyc" -delete
|
||||
popd
|
||||
|
||||
|
|
|
@ -105,6 +105,10 @@ def boilerplate_module(modfile, args, interpreter, check, destfile):
|
|||
#included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
|
||||
|
||||
complex_args = {}
|
||||
|
||||
# default selinux fs list is pass in as _ansible_selinux_special_fs arg
|
||||
complex_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
|
||||
|
||||
if args.startswith("@"):
|
||||
# Argument is a YAML file (JSON is a subset of YAML)
|
||||
complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:]))
|
||||
|
|
|
@ -157,33 +157,37 @@ class CLI(object):
|
|||
|
||||
|
||||
@staticmethod
|
||||
def ask_vault_passwords(ask_new_vault_pass=False, rekey=False):
|
||||
def ask_vault_passwords():
|
||||
''' prompt for vault password and/or password change '''
|
||||
|
||||
vault_pass = None
|
||||
new_vault_pass = None
|
||||
try:
|
||||
if rekey or not ask_new_vault_pass:
|
||||
vault_pass = getpass.getpass(prompt="Vault password: ")
|
||||
vault_pass = getpass.getpass(prompt="Vault password: ")
|
||||
|
||||
if ask_new_vault_pass:
|
||||
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
|
||||
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
|
||||
if new_vault_pass != new_vault_pass2:
|
||||
raise AnsibleError("Passwords do not match")
|
||||
except EOFError:
|
||||
pass
|
||||
|
||||
# enforce no newline chars at the end of passwords
|
||||
if vault_pass:
|
||||
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
|
||||
vault_pass = to_bytes(vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip()
|
||||
|
||||
return vault_pass
|
||||
|
||||
@staticmethod
|
||||
def ask_new_vault_passwords():
|
||||
new_vault_pass = None
|
||||
try:
|
||||
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
|
||||
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
|
||||
if new_vault_pass != new_vault_pass2:
|
||||
raise AnsibleError("Passwords do not match")
|
||||
except EOFError:
|
||||
pass
|
||||
|
||||
if new_vault_pass:
|
||||
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
|
||||
new_vault_pass = to_bytes(new_vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip()
|
||||
|
||||
if ask_new_vault_pass and not rekey:
|
||||
vault_pass = new_vault_pass
|
||||
|
||||
return vault_pass, new_vault_pass
|
||||
return new_vault_pass
|
||||
|
||||
def ask_passwords(self):
|
||||
''' prompt for connection and become passwords if needed '''
|
||||
|
@ -515,10 +519,10 @@ class CLI(object):
|
|||
''' find reasonable way to display text '''
|
||||
# this is a much simpler form of what is in pydoc.py
|
||||
if not sys.stdout.isatty():
|
||||
display.display(text)
|
||||
display.display(text, screen_only=True)
|
||||
elif 'PAGER' in os.environ:
|
||||
if sys.platform == 'win32':
|
||||
display.display(text)
|
||||
display.display(text, screen_only=True)
|
||||
else:
|
||||
self.pager_pipe(text, os.environ['PAGER'])
|
||||
else:
|
||||
|
@ -527,7 +531,7 @@ class CLI(object):
|
|||
if p.returncode == 0:
|
||||
self.pager_pipe(text, 'less')
|
||||
else:
|
||||
display.display(text)
|
||||
display.display(text, screen_only=True)
|
||||
|
||||
@staticmethod
|
||||
def pager_pipe(text, cmd):
|
||||
|
@ -573,7 +577,7 @@ class CLI(object):
|
|||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr))
|
||||
vault_pass = stdout.strip('\r\n')
|
||||
vault_pass = stdout.strip(b'\r\n')
|
||||
else:
|
||||
try:
|
||||
f = open(this_path, "rb")
|
||||
|
|
|
@ -107,7 +107,7 @@ class AdHocCLI(CLI):
|
|||
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vault_pass = None
|
||||
b_vault_pass = None
|
||||
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
|
@ -117,11 +117,11 @@ class AdHocCLI(CLI):
|
|||
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
|
||||
loader.set_vault_password(vault_pass)
|
||||
b_vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
|
||||
loader.set_vault_password(b_vault_pass)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords()[0]
|
||||
loader.set_vault_password(vault_pass)
|
||||
b_vault_pass = self.ask_vault_passwords()
|
||||
loader.set_vault_password(b_vault_pass)
|
||||
|
||||
variable_manager = VariableManager()
|
||||
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
|
||||
|
@ -176,6 +176,9 @@ class AdHocCLI(CLI):
|
|||
cb = self.callback
|
||||
elif self.options.one_line:
|
||||
cb = 'oneline'
|
||||
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
|
||||
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
|
||||
cb = C.DEFAULT_STDOUT_CALLBACK
|
||||
else:
|
||||
cb = 'minimal'
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ class DocCLI(CLI):
|
|||
|
||||
try:
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = module_loader.find_plugin(module, mod_type='.py')
|
||||
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
|
||||
if filename is None:
|
||||
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
|
||||
continue
|
||||
|
@ -100,7 +100,7 @@ class DocCLI(CLI):
|
|||
try:
|
||||
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
|
||||
except:
|
||||
display.vvv(traceback.print_exc())
|
||||
display.vvv(traceback.format_exc())
|
||||
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
|
||||
continue
|
||||
|
||||
|
@ -133,10 +133,11 @@ class DocCLI(CLI):
|
|||
# probably a quoting issue.
|
||||
raise AnsibleError("Parsing produced an empty object.")
|
||||
except Exception as e:
|
||||
display.vvv(traceback.print_exc())
|
||||
display.vvv(traceback.format_exc())
|
||||
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
|
||||
|
||||
self.pager(text)
|
||||
if text:
|
||||
self.pager(text)
|
||||
return 0
|
||||
|
||||
def find_modules(self, path):
|
||||
|
@ -173,7 +174,7 @@ class DocCLI(CLI):
|
|||
continue
|
||||
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = module_loader.find_plugin(module, mod_type='.py')
|
||||
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
|
||||
|
||||
if filename is None:
|
||||
continue
|
||||
|
|
|
@ -94,7 +94,7 @@ class PlaybookCLI(CLI):
|
|||
# Manage passwords
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
vault_pass = None
|
||||
b_vault_pass = None
|
||||
passwords = {}
|
||||
|
||||
# don't deal with privilege escalation or passwords when we don't need to
|
||||
|
@ -107,11 +107,11 @@ class PlaybookCLI(CLI):
|
|||
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
|
||||
loader.set_vault_password(vault_pass)
|
||||
b_vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
|
||||
loader.set_vault_password(b_vault_pass)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords()[0]
|
||||
loader.set_vault_password(vault_pass)
|
||||
b_vault_pass = self.ask_vault_passwords()
|
||||
loader.set_vault_password(b_vault_pass)
|
||||
|
||||
# initial error check, to make sure all specified playbooks are accessible
|
||||
# before we start running anything through the playbook executor
|
||||
|
@ -163,6 +163,12 @@ class PlaybookCLI(CLI):
|
|||
|
||||
display.display('\nplaybook: %s' % p['playbook'])
|
||||
for idx, play in enumerate(p['plays']):
|
||||
if play._included_path is not None:
|
||||
loader.set_basedir(play._included_path)
|
||||
else:
|
||||
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
|
||||
loader.set_basedir(pb_dir)
|
||||
|
||||
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
|
||||
mytags = set(play.tags)
|
||||
msg += '\tTAGS: [%s]' % (','.join(mytags))
|
||||
|
|
|
@ -42,8 +42,8 @@ class VaultCLI(CLI):
|
|||
|
||||
def __init__(self, args):
|
||||
|
||||
self.vault_pass = None
|
||||
self.new_vault_pass = None
|
||||
self.b_vault_pass = None
|
||||
self.b_new_vault_pass = None
|
||||
super(VaultCLI, self).__init__(args)
|
||||
|
||||
def parse(self):
|
||||
|
@ -99,23 +99,25 @@ class VaultCLI(CLI):
|
|||
|
||||
if self.options.vault_password_file:
|
||||
# read vault_pass from a file
|
||||
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
|
||||
else:
|
||||
newpass = False
|
||||
rekey = False
|
||||
if not self.options.new_vault_password_file:
|
||||
newpass = (self.action in ['create', 'rekey', 'encrypt'])
|
||||
rekey = (self.action == 'rekey')
|
||||
self.vault_pass, self.new_vault_pass = self.ask_vault_passwords(ask_new_vault_pass=newpass, rekey=rekey)
|
||||
self.b_vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
|
||||
|
||||
if self.options.new_vault_password_file:
|
||||
# for rekey only
|
||||
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
|
||||
self.b_new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
|
||||
|
||||
if not self.vault_pass:
|
||||
if not self.b_vault_pass or self.options.ask_vault_pass:
|
||||
self.b_vault_pass = self.ask_vault_passwords()
|
||||
|
||||
if not self.b_vault_pass:
|
||||
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
|
||||
|
||||
self.editor = VaultEditor(self.vault_pass)
|
||||
if self.action == 'rekey':
|
||||
if not self.b_new_vault_pass:
|
||||
self.b_new_vault_pass = self.ask_new_vault_passwords()
|
||||
if not self.b_new_vault_pass:
|
||||
raise AnsibleOptionsError("A password is required to rekey Ansible's Vault")
|
||||
|
||||
self.editor = VaultEditor(self.b_vault_pass)
|
||||
|
||||
self.execute()
|
||||
|
||||
|
@ -171,6 +173,6 @@ class VaultCLI(CLI):
|
|||
raise AnsibleError(f + " does not exist")
|
||||
|
||||
for f in self.args:
|
||||
self.editor.rekey_file(f, self.new_vault_pass)
|
||||
self.editor.rekey_file(f, self.b_new_vault_pass)
|
||||
|
||||
display.display("Rekey successful", stderr=True)
|
||||
|
|
|
@ -43,6 +43,7 @@ if PY3:
|
|||
class_types = type,
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
cmp = lambda a, b: (a > b) - (a < b)
|
||||
|
||||
MAXSIZE = sys.maxsize
|
||||
else:
|
||||
|
@ -51,6 +52,7 @@ else:
|
|||
class_types = (type, types.ClassType)
|
||||
text_type = unicode
|
||||
binary_type = str
|
||||
cmp = cmp
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
# Jython always uses 32 bits.
|
||||
|
|
|
@ -146,7 +146,7 @@ DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'd
|
|||
DEFAULTS='defaults'
|
||||
|
||||
# FIXME: add deprecation warning when these get set
|
||||
#### DEPRECATED VARS ####
|
||||
#### DEPRECATED VARS ####
|
||||
# use more sanely named 'inventory'
|
||||
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
|
||||
# this is not used since 0.5 but people might still have in config
|
||||
|
@ -157,8 +157,8 @@ DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE
|
|||
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
|
||||
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispathlist=True)
|
||||
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True, expand_relative_paths=True)
|
||||
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
|
||||
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '$HOME/.ansible/tmp', istmppath=True)
|
||||
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
|
||||
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', istmppath=True)
|
||||
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
|
||||
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
|
||||
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
|
||||
|
@ -176,7 +176,7 @@ DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'AN
|
|||
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
|
||||
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
|
||||
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
|
||||
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
|
||||
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
|
||||
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
|
||||
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
|
||||
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
|
||||
|
@ -190,6 +190,9 @@ DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIB
|
|||
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
|
||||
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
|
||||
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
|
||||
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, floating=True)
|
||||
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, boolean=True)
|
||||
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, boolean=True)
|
||||
|
||||
# static includes
|
||||
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# (c) 2016 - Red Hat, Inc. <support@ansible.com>
|
||||
# (c) 2016 - Red Hat, Inc. <info@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
|
|
@ -22,6 +22,7 @@ __metaclass__ = type
|
|||
|
||||
import ast
|
||||
import base64
|
||||
import datetime
|
||||
import imp
|
||||
import json
|
||||
import os
|
||||
|
@ -106,10 +107,21 @@ import __main__
|
|||
# Ubuntu15.10 with python2.7 Works
|
||||
# Ubuntu15.10 with python3.4 Fails without this
|
||||
# Ubuntu16.04.1 with python3.5 Fails without this
|
||||
# To test on another platform:
|
||||
# * use the copy module (since this shadows the stdlib copy module)
|
||||
# * Turn off pipelining
|
||||
# * Make sure that the destination file does not exist
|
||||
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
|
||||
# This will traceback in shutil. Looking at the complete traceback will show
|
||||
# that shutil is importing copy which finds the ansible module instead of the
|
||||
# stdlib module
|
||||
scriptdir = None
|
||||
try:
|
||||
scriptdir = os.path.dirname(os.path.abspath(__main__.__file__))
|
||||
except AttributeError:
|
||||
except (AttributeError, OSError):
|
||||
# Some platforms don't set __file__ when reading from stdin
|
||||
# OSX raises OSError if using abspath() in a directory we don't have
|
||||
# permission to read.
|
||||
pass
|
||||
if scriptdir is not None:
|
||||
sys.path = [p for p in sys.path if p != scriptdir]
|
||||
|
@ -212,12 +224,12 @@ def debug(command, zipped_mod, json_params):
|
|||
directory = os.path.dirname(dest_filename)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
f = open(dest_filename, 'w')
|
||||
f = open(dest_filename, 'wb')
|
||||
f.write(z.read(filename))
|
||||
f.close()
|
||||
|
||||
# write the args file
|
||||
f = open(args_path, 'w')
|
||||
f = open(args_path, 'wb')
|
||||
f.write(json_params)
|
||||
f.close()
|
||||
|
||||
|
@ -317,7 +329,12 @@ if __name__ == '__main__':
|
|||
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
|
||||
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
|
||||
sitecustomize = sitecustomize.encode('utf-8')
|
||||
z.writestr('sitecustomize.py', sitecustomize)
|
||||
# Use a ZipInfo to work around zipfile limitation on hosts with
|
||||
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
|
||||
zinfo = zipfile.ZipInfo()
|
||||
zinfo.filename = 'sitecustomize.py'
|
||||
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
|
||||
z.writestr(zinfo, sitecustomize)
|
||||
z.close()
|
||||
|
||||
exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS)
|
||||
|
@ -680,6 +697,7 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
interpreter_parts = interpreter.split(u' ')
|
||||
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
|
||||
|
||||
now=datetime.datetime.utcnow()
|
||||
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
|
||||
zipdata=zipdata,
|
||||
ansible_module=module_name,
|
||||
|
@ -687,6 +705,12 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
shebang=shebang,
|
||||
interpreter=interpreter,
|
||||
coding=ENCODING_STRING,
|
||||
year=now.year,
|
||||
month=now.month,
|
||||
day=now.day,
|
||||
hour=now.hour,
|
||||
minute=now.minute,
|
||||
second=now.second,
|
||||
)))
|
||||
module_data = output.getvalue()
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.playbook.role_include import IncludeRole
|
||||
|
||||
from ansible.utils.boolean import boolean
|
||||
|
||||
|
@ -48,7 +49,6 @@ class HostState:
|
|||
self.cur_regular_task = 0
|
||||
self.cur_rescue_task = 0
|
||||
self.cur_always_task = 0
|
||||
self.cur_role = None
|
||||
self.cur_dep_chain = None
|
||||
self.run_state = PlayIterator.ITERATING_SETUP
|
||||
self.fail_state = PlayIterator.FAILED_NONE
|
||||
|
@ -56,6 +56,7 @@ class HostState:
|
|||
self.tasks_child_state = None
|
||||
self.rescue_child_state = None
|
||||
self.always_child_state = None
|
||||
self.did_rescue = False
|
||||
self.did_start_at_task = False
|
||||
|
||||
def __repr__(self):
|
||||
|
@ -80,18 +81,18 @@ class HostState:
|
|||
ret.append(states[i])
|
||||
return "|".join(ret)
|
||||
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s, did start at task? %s" % (
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
self.cur_always_task,
|
||||
self.cur_role,
|
||||
_run_state_to_string(self.run_state),
|
||||
_failed_state_to_string(self.fail_state),
|
||||
self.pending_setup,
|
||||
self.tasks_child_state,
|
||||
self.rescue_child_state,
|
||||
self.always_child_state,
|
||||
self.did_rescue,
|
||||
self.did_start_at_task,
|
||||
)
|
||||
|
||||
|
@ -101,7 +102,7 @@ class HostState:
|
|||
|
||||
for attr in (
|
||||
'_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
|
||||
'cur_role', 'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
|
||||
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
|
||||
'tasks_child_state', 'rescue_child_state', 'always_child_state'
|
||||
):
|
||||
if getattr(self, attr) != getattr(other, attr):
|
||||
|
@ -118,10 +119,10 @@ class HostState:
|
|||
new_state.cur_regular_task = self.cur_regular_task
|
||||
new_state.cur_rescue_task = self.cur_rescue_task
|
||||
new_state.cur_always_task = self.cur_always_task
|
||||
new_state.cur_role = self.cur_role
|
||||
new_state.run_state = self.run_state
|
||||
new_state.fail_state = self.fail_state
|
||||
new_state.pending_setup = self.pending_setup
|
||||
new_state.did_rescue = self.did_rescue
|
||||
new_state.did_start_at_task = self.did_start_at_task
|
||||
if self.cur_dep_chain is not None:
|
||||
new_state.cur_dep_chain = self.cur_dep_chain[:]
|
||||
|
@ -177,6 +178,9 @@ class PlayIterator:
|
|||
if gather_timeout:
|
||||
setup_task.args['gather_timeout'] = gather_timeout
|
||||
setup_task.set_loader(self._play._loader)
|
||||
# short circuit fact gathering if the entire playbook is conditional
|
||||
if self._play._included_conditional is not None:
|
||||
setup_task.when = self._play._included_conditional[:]
|
||||
setup_block.block = [setup_task]
|
||||
|
||||
setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
|
||||
|
@ -198,7 +202,7 @@ class PlayIterator:
|
|||
self._host_states[host.name] = HostState(blocks=self._blocks)
|
||||
# if the host's name is in the variable manager's fact cache, then set
|
||||
# its _gathered_facts flag to true for smart gathering tests later
|
||||
if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get('module_setup', False):
|
||||
if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get(host.name).get('module_setup', False):
|
||||
host._gathered_facts = True
|
||||
# if we're looking to start at a specific task, iterate through
|
||||
# the tasks for this host until we find the specified task
|
||||
|
@ -259,19 +263,6 @@ class PlayIterator:
|
|||
old_s = s
|
||||
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
|
||||
|
||||
def _roles_are_different(ra, rb):
|
||||
if ra != rb:
|
||||
return True
|
||||
else:
|
||||
return old_s.cur_dep_chain != task.get_dep_chain()
|
||||
|
||||
if task and task._role:
|
||||
# if we had a current role, mark that role as completed
|
||||
if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek:
|
||||
s.cur_role._completed[host.name] = True
|
||||
s.cur_role = task._role
|
||||
s.cur_dep_chain = task.get_dep_chain()
|
||||
|
||||
if not peek:
|
||||
self._host_states[host.name] = s
|
||||
|
||||
|
@ -281,7 +272,7 @@ class PlayIterator:
|
|||
return (s, task)
|
||||
|
||||
|
||||
def _get_next_task_from_state(self, state, host, peek):
|
||||
def _get_next_task_from_state(self, state, host, peek, in_child=False):
|
||||
|
||||
task = None
|
||||
|
||||
|
@ -347,7 +338,7 @@ class PlayIterator:
|
|||
# have one recurse into it for the next task. If we're done with the child
|
||||
# state, we clear it and drop back to geting the next task from the list.
|
||||
if state.tasks_child_state:
|
||||
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek)
|
||||
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.tasks_child_state):
|
||||
# failed child state, so clear it and move into the rescue portion
|
||||
state.tasks_child_state = None
|
||||
|
@ -376,7 +367,6 @@ class PlayIterator:
|
|||
if isinstance(task, Block) or state.tasks_child_state is not None:
|
||||
state.tasks_child_state = HostState(blocks=[task])
|
||||
state.tasks_child_state.run_state = self.ITERATING_TASKS
|
||||
state.tasks_child_state.cur_role = state.cur_role
|
||||
# since we've created the child state, clear the task
|
||||
# so we can pick up the child state on the next pass
|
||||
task = None
|
||||
|
@ -386,7 +376,7 @@ class PlayIterator:
|
|||
# The process here is identical to ITERATING_TASKS, except instead
|
||||
# we move into the always portion of the block.
|
||||
if state.rescue_child_state:
|
||||
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek)
|
||||
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.rescue_child_state):
|
||||
state.rescue_child_state = None
|
||||
self._set_failed_state(state)
|
||||
|
@ -401,12 +391,12 @@ class PlayIterator:
|
|||
if len(block.rescue) > 0:
|
||||
state.fail_state = self.FAILED_NONE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
state.did_rescue = True
|
||||
else:
|
||||
task = block.rescue[state.cur_rescue_task]
|
||||
if isinstance(task, Block) or state.rescue_child_state is not None:
|
||||
state.rescue_child_state = HostState(blocks=[task])
|
||||
state.rescue_child_state.run_state = self.ITERATING_TASKS
|
||||
state.rescue_child_state.cur_role = state.cur_role
|
||||
task = None
|
||||
state.cur_rescue_task += 1
|
||||
|
||||
|
@ -416,13 +406,14 @@ class PlayIterator:
|
|||
# run state to ITERATING_COMPLETE in the event of any errors, or when we
|
||||
# have hit the end of the list of blocks.
|
||||
if state.always_child_state:
|
||||
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek)
|
||||
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.always_child_state):
|
||||
state.always_child_state = None
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
state.always_child_state = None
|
||||
continue
|
||||
else:
|
||||
if state.cur_always_task >= len(block.always):
|
||||
if state.fail_state != self.FAILED_NONE:
|
||||
|
@ -436,12 +427,17 @@ class PlayIterator:
|
|||
state.tasks_child_state = None
|
||||
state.rescue_child_state = None
|
||||
state.always_child_state = None
|
||||
state.did_rescue = False
|
||||
|
||||
# we're advancing blocks, so if this was an end-of-role block we
|
||||
# mark the current role complete
|
||||
if block._eor and host.name in block._role._had_task_run and not in_child:
|
||||
block._role._completed[host.name] = True
|
||||
else:
|
||||
task = block.always[state.cur_always_task]
|
||||
if isinstance(task, Block) or state.always_child_state is not None:
|
||||
state.always_child_state = HostState(blocks=[task])
|
||||
state.always_child_state.run_state = self.ITERATING_TASKS
|
||||
state.always_child_state.cur_role = state.cur_role
|
||||
task = None
|
||||
state.cur_always_task += 1
|
||||
|
||||
|
@ -492,6 +488,7 @@ class PlayIterator:
|
|||
s = self._set_failed_state(s)
|
||||
display.debug("^ failed state is now: %s" % s)
|
||||
self._host_states[host.name] = s
|
||||
self._play._removed_hosts.append(host.name)
|
||||
|
||||
def get_failed_hosts(self):
|
||||
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
|
||||
|
@ -499,23 +496,23 @@ class PlayIterator:
|
|||
def _check_failed_state(self, state):
|
||||
if state is None:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
elif state.fail_state != self.FAILED_NONE:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return not state.did_rescue
|
||||
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
|
||||
cur_block = self._blocks[state.cur_block]
|
||||
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_failed(self, host):
|
||||
|
|
|
@ -68,21 +68,25 @@ class WorkerProcess(multiprocessing.Process):
|
|||
self._variable_manager = variable_manager
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except (AttributeError, ValueError):
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
if sys.stdin.isatty():
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except (AttributeError, ValueError):
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
else:
|
||||
# set to /dev/null
|
||||
self._new_stdin = os.devnull
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
class AggregateStats:
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
|
@ -31,6 +33,9 @@ class AggregateStats:
|
|||
self.changed = {}
|
||||
self.skipped = {}
|
||||
|
||||
# user defined stats, which can be per host or global
|
||||
self.custom = {}
|
||||
|
||||
def increment(self, what, host):
|
||||
''' helper function to bump a statistic '''
|
||||
|
||||
|
@ -49,3 +54,31 @@ class AggregateStats:
|
|||
skipped = self.skipped.get(host, 0)
|
||||
)
|
||||
|
||||
def set_custom_stats(self, which, what, host=None):
|
||||
''' allow setting of a custom fact '''
|
||||
|
||||
if host is None:
|
||||
host = '_run'
|
||||
if host not in self.custom:
|
||||
self.custom[host] = {which: what}
|
||||
else:
|
||||
self.custom[host][which] = what
|
||||
|
||||
def update_custom_stats(self, which, what, host=None):
|
||||
''' allow aggregation of a custom fact '''
|
||||
|
||||
if host is None:
|
||||
host = '_run'
|
||||
if host not in self.custom or which not in self.custom[host]:
|
||||
return self.set_custom_stats(which, what, host)
|
||||
|
||||
# mismatching types
|
||||
if type(what) != type(self.custom[host][which]):
|
||||
return None
|
||||
|
||||
if isinstance(what, dict):
|
||||
self.custom[host][which] = merge_hash(self.custom[host][which], what)
|
||||
else:
|
||||
# let overloaded + take care of other types
|
||||
self.custom[host][which] += what
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ class TaskExecutor:
|
|||
self._shared_loader_obj = shared_loader_obj
|
||||
self._connection = None
|
||||
self._rslt_q = rslt_q
|
||||
self._loop_eval_error = None
|
||||
|
||||
self._task.squash()
|
||||
|
||||
|
@ -85,10 +86,13 @@ class TaskExecutor:
|
|||
display.debug("in run()")
|
||||
|
||||
try:
|
||||
# get search path for this task to pass to lookup plugins
|
||||
self._job_vars['ansible_search_path'] = self._task.get_search_path()
|
||||
try:
|
||||
items = self._get_loop_items()
|
||||
except AnsibleUndefinedVariable as e:
|
||||
# save the error raised here for use later
|
||||
items = None
|
||||
self._loop_eval_error = e
|
||||
|
||||
items = self._get_loop_items()
|
||||
if items is not None:
|
||||
if len(items) > 0:
|
||||
item_results = self._run_loop(items)
|
||||
|
@ -126,17 +130,26 @@ class TaskExecutor:
|
|||
if 'changed' not in res:
|
||||
res['changed'] = False
|
||||
|
||||
def _clean_res(res):
|
||||
def _clean_res(res, errors='surrogate_or_strict'):
|
||||
if isinstance(res, UnsafeProxy):
|
||||
return res._obj
|
||||
elif isinstance(res, binary_type):
|
||||
return to_text(res, errors='surrogate_or_strict')
|
||||
return to_text(res, errors=errors)
|
||||
elif isinstance(res, dict):
|
||||
for k in res:
|
||||
res[k] = _clean_res(res[k])
|
||||
try:
|
||||
res[k] = _clean_res(res[k], errors=errors)
|
||||
except UnicodeError:
|
||||
if k == 'diff':
|
||||
# If this is a diff, substitute a replacement character if the value
|
||||
# is undecodable as utf8. (Fix #21804)
|
||||
display.warning("We were unable to decode all characters, replaced some in an effort to return as much as possible")
|
||||
res[k] = _clean_res(res[k], errors='surrogate_then_replace')
|
||||
else:
|
||||
raise
|
||||
elif isinstance(res, list):
|
||||
for idx,item in enumerate(res):
|
||||
res[idx] = _clean_res(item)
|
||||
res[idx] = _clean_res(item, errors=errors)
|
||||
return res
|
||||
|
||||
display.debug("dumping result to json")
|
||||
|
@ -173,6 +186,10 @@ class TaskExecutor:
|
|||
old_vars[k] = self._job_vars[k]
|
||||
self._job_vars[k] = play_context_vars[k]
|
||||
|
||||
# get search path for this task to pass to lookup plugins
|
||||
self._job_vars['ansible_search_path'] = self._task.get_search_path()
|
||||
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
|
||||
items = None
|
||||
if self._task.loop:
|
||||
|
@ -212,6 +229,11 @@ class TaskExecutor:
|
|||
for idx, item in enumerate(items):
|
||||
if item is not None and not isinstance(item, UnsafeProxy):
|
||||
items[idx] = UnsafeProxy(item)
|
||||
|
||||
# ensure basedir is always in (dwim already searches here but we need to display it)
|
||||
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
|
||||
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
|
||||
|
||||
return items
|
||||
|
||||
def _run_loop(self, items):
|
||||
|
@ -396,6 +418,11 @@ class TaskExecutor:
|
|||
if not self._task.evaluate_conditional(templar, variables):
|
||||
display.debug("when evaluation failed, skipping this task")
|
||||
return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
|
||||
# since we're not skipping, if there was a loop evaluation error
|
||||
# raised earlier we need to raise it now to halt the execution of
|
||||
# this task
|
||||
if self._loop_eval_error is not None:
|
||||
raise self._loop_eval_error
|
||||
except AnsibleError:
|
||||
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
|
||||
if self._task.action not in ['include', 'include_role']:
|
||||
|
@ -416,14 +443,10 @@ class TaskExecutor:
|
|||
include_file = templar.template(include_file)
|
||||
return dict(include=include_file, include_variables=include_variables)
|
||||
|
||||
# TODO: not needed?
|
||||
# if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
|
||||
elif self._task.action == 'include_role':
|
||||
include_variables = self._task.args.copy()
|
||||
role = templar.template(self._task._role_name)
|
||||
if not role:
|
||||
return dict(failed=True, msg="No role was specified to include")
|
||||
return dict(include_role=role, include_variables=include_variables)
|
||||
return dict(include_role=self._task, include_variables=include_variables)
|
||||
|
||||
# Now we do final validation on the task, which sets all fields to their final values.
|
||||
self._task.post_validate(templar=templar)
|
||||
|
@ -499,7 +522,7 @@ class TaskExecutor:
|
|||
vars_copy[self._task.register] = wrap_var(result.copy())
|
||||
|
||||
if self._task.async > 0:
|
||||
if self._task.poll > 0:
|
||||
if self._task.poll > 0 and not result.get('skipped'):
|
||||
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
|
||||
|
||||
# ensure no log is preserved
|
||||
|
|
|
@ -138,8 +138,8 @@ class TaskQueueManager:
|
|||
|
||||
# then initialize it with the given handler list
|
||||
for handler in handler_list:
|
||||
if handler not in self._notified_handlers:
|
||||
self._notified_handlers[handler] = []
|
||||
if handler._uuid not in self._notified_handlers:
|
||||
self._notified_handlers[handler._uuid] = []
|
||||
if handler.listen:
|
||||
listeners = handler.listen
|
||||
if not isinstance(listeners, list):
|
||||
|
@ -147,7 +147,7 @@ class TaskQueueManager:
|
|||
for listener in listeners:
|
||||
if listener not in self._listening_handlers:
|
||||
self._listening_handlers[listener] = []
|
||||
self._listening_handlers[listener].append(handler.get_name())
|
||||
self._listening_handlers[listener].append(handler._uuid)
|
||||
|
||||
def load_callbacks(self):
|
||||
'''
|
||||
|
@ -222,7 +222,7 @@ class TaskQueueManager:
|
|||
)
|
||||
|
||||
# Fork # of forks, # of hosts or serial, whichever is lowest
|
||||
num_hosts = len(self._inventory.get_hosts(new_play.hosts))
|
||||
num_hosts = len(self._inventory.get_hosts(new_play.hosts, ignore_restrictions=True))
|
||||
|
||||
max_serial = 0
|
||||
if new_play.serial:
|
||||
|
@ -353,17 +353,20 @@ class TaskQueueManager:
|
|||
|
||||
for method in methods:
|
||||
try:
|
||||
# temporary hack, required due to a change in the callback API, so
|
||||
# we don't break backwards compatibility with callbacks which were
|
||||
# designed to use the original API
|
||||
# Previously, the `v2_playbook_on_start` callback API did not accept
|
||||
# any arguments. In recent versions of the v2 callback API, the play-
|
||||
# book that started execution is given. In order to support both of
|
||||
# these method signatures, we need to use this `inspect` hack to send
|
||||
# no arguments to the methods that don't accept them. This way, we can
|
||||
# not break backwards compatibility until that API is deprecated.
|
||||
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
|
||||
if method_name == 'v2_playbook_on_start':
|
||||
import inspect
|
||||
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
|
||||
if 'playbook' in f_args:
|
||||
method(*args, **kwargs)
|
||||
else:
|
||||
argspec = inspect.getargspec(method)
|
||||
if argspec.args == ['self']:
|
||||
method()
|
||||
else:
|
||||
method(*args, **kwargs)
|
||||
else:
|
||||
method(*args, **kwargs)
|
||||
except Exception as e:
|
||||
|
|
|
@ -62,11 +62,13 @@ class TaskResult:
|
|||
return self._check_key('unreachable')
|
||||
|
||||
def _check_key(self, key):
|
||||
if self._result.get('results', []):
|
||||
'''get a specific key from the result or it's items'''
|
||||
|
||||
if isinstance(self._result, dict) and key in self._result:
|
||||
return self._result.get(key, False)
|
||||
else:
|
||||
flag = False
|
||||
for res in self._result.get('results', []):
|
||||
if isinstance(res, dict):
|
||||
flag |= res.get(key, False)
|
||||
return flag
|
||||
else:
|
||||
return self._result.get(key, False)
|
||||
|
|
|
@ -21,7 +21,6 @@ __metaclass__ = type
|
|||
|
||||
import fnmatch
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import re
|
||||
import itertools
|
||||
|
@ -38,6 +37,7 @@ from ansible.module_utils._text import to_bytes, to_text
|
|||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.plugins import vars_loader
|
||||
from ansible.utils.vars import combine_vars
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -58,7 +58,7 @@ class Inventory(object):
|
|||
|
||||
# the host file file, or script path, or list of hosts
|
||||
# if a list, inventory data will NOT be loaded
|
||||
self.host_list = host_list
|
||||
self.host_list = unfrackpath(host_list, follow=False)
|
||||
self._loader = loader
|
||||
self._variable_manager = variable_manager
|
||||
self.localhost = None
|
||||
|
@ -158,17 +158,39 @@ class Inventory(object):
|
|||
|
||||
self._vars_plugins = [ x for x in vars_loader.all(self) ]
|
||||
|
||||
### POST PROCESS groups and hosts after specific parser was invoked
|
||||
|
||||
hosts = []
|
||||
group_names = set()
|
||||
# set group vars from group_vars/ files and vars plugins
|
||||
for g in self.groups:
|
||||
group = self.groups[g]
|
||||
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
|
||||
self.get_group_vars(group)
|
||||
group_names.add(group.name)
|
||||
hosts.extend(group.get_hosts())
|
||||
|
||||
# get host vars from host_vars/ files and vars plugins
|
||||
for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True):
|
||||
for host in hosts:
|
||||
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
|
||||
self.get_host_vars(host)
|
||||
|
||||
mygroups = host.get_groups()
|
||||
|
||||
# ensure hosts are always in 'all'
|
||||
if all not in mygroups:
|
||||
all.add_host(host)
|
||||
|
||||
if ungrouped in mygroups:
|
||||
# clear ungrouped of any incorrectly stored by parser
|
||||
if set(mygroups).difference(set([all, ungrouped])):
|
||||
host.remove_group(ungrouped)
|
||||
else:
|
||||
# add ungrouped hosts to ungrouped
|
||||
length = len(mygroups)
|
||||
if length == 0 or (length == 1 and all in mygroups):
|
||||
ungrouped.add_host(host)
|
||||
|
||||
def _match(self, str, pattern_str):
|
||||
try:
|
||||
if pattern_str.startswith('~'):
|
||||
|
@ -780,7 +802,10 @@ class Inventory(object):
|
|||
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
|
||||
found_vars = set()
|
||||
if os.path.exists(path):
|
||||
found_vars = set(os.listdir(to_text(path)))
|
||||
if os.path.isdir(path):
|
||||
found_vars = set(os.listdir(to_text(path)))
|
||||
else:
|
||||
display.warning("Found group_vars that is not a directory, skipping: %s" % path)
|
||||
return found_vars
|
||||
|
||||
def _find_host_vars_files(self, basedir):
|
||||
|
|
|
@ -25,6 +25,7 @@ import os
|
|||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.vars import combine_vars
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
#FIXME: make into plugins
|
||||
from ansible.inventory.ini import InventoryParser as InventoryINIParser
|
||||
|
@ -44,11 +45,10 @@ def get_file_parser(hostsfile, groups, loader):
|
|||
parser = None
|
||||
|
||||
try:
|
||||
inv_file = open(hostsfile)
|
||||
first_line = inv_file.readlines()[0]
|
||||
inv_file.close()
|
||||
if first_line.startswith('#!'):
|
||||
shebang_present = True
|
||||
with open(hostsfile, 'rb') as inv_file:
|
||||
initial_chars = inv_file.read(2)
|
||||
if initial_chars.startswith(b'#!'):
|
||||
shebang_present = True
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -59,7 +59,7 @@ def get_file_parser(hostsfile, groups, loader):
|
|||
parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
|
||||
processed = True
|
||||
except Exception as e:
|
||||
myerr.append(str(e))
|
||||
myerr.append('Attempted to execute "%s" as inventory script: %s' % (hostsfile, to_native(e)))
|
||||
elif shebang_present:
|
||||
|
||||
myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
|
||||
|
@ -70,7 +70,7 @@ def get_file_parser(hostsfile, groups, loader):
|
|||
parser = InventoryYAMLParser(loader=loader, groups=groups, filename=hostsfile)
|
||||
processed = True
|
||||
except Exception as e:
|
||||
myerr.append(str(e))
|
||||
myerr.append('Attempted to read "%s" as YAML: %s' % (to_native(hostsfile), to_native(e)))
|
||||
|
||||
# ini
|
||||
if not processed and not shebang_present:
|
||||
|
@ -78,7 +78,7 @@ def get_file_parser(hostsfile, groups, loader):
|
|||
parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
|
||||
processed = True
|
||||
except Exception as e:
|
||||
myerr.append(str(e))
|
||||
myerr.append('Attempted to read "%s" as ini file: %s ' % (to_native(hostsfile), to_native(e)))
|
||||
|
||||
if not processed and myerr:
|
||||
raise AnsibleError('\n'.join(myerr))
|
||||
|
|
|
@ -114,6 +114,12 @@ class Group:
|
|||
host.add_group(self)
|
||||
self.clear_hosts_cache()
|
||||
|
||||
def remove_host(self, host):
|
||||
|
||||
self.hosts.remove(host)
|
||||
host.remove_group(self)
|
||||
self.clear_hosts_cache()
|
||||
|
||||
def set_variable(self, key, value):
|
||||
|
||||
self.vars[key] = value
|
||||
|
|
|
@ -112,6 +112,10 @@ class Host:
|
|||
|
||||
self.groups.append(group)
|
||||
|
||||
def remove_group(self, group):
|
||||
|
||||
self.groups.remove(group)
|
||||
|
||||
def set_variable(self, key, value):
|
||||
|
||||
self.vars[key]=value
|
||||
|
@ -138,6 +142,6 @@ class Host:
|
|||
def get_group_vars(self):
|
||||
results = {}
|
||||
groups = self.get_groups()
|
||||
for group in sorted(groups, key=lambda g: g.depth):
|
||||
for group in sorted(groups, key=lambda g: (g.depth, g.name)):
|
||||
results = combine_vars(results, group.get_vars())
|
||||
return results
|
||||
|
|
|
@ -38,9 +38,10 @@ class InventoryParser(object):
|
|||
Takes an INI-format inventory file and builds a list of groups and subgroups
|
||||
with their associated hosts and variable settings.
|
||||
"""
|
||||
_COMMENT_MARKERS = frozenset((u';', u'#'))
|
||||
b_COMMENT_MARKERS = frozenset((b';', b'#'))
|
||||
|
||||
def __init__(self, loader, groups, filename=C.DEFAULT_HOST_LIST):
|
||||
self._loader = loader
|
||||
self.filename = filename
|
||||
|
||||
# Start with an empty host list and whatever groups we're passed in
|
||||
|
@ -52,13 +53,28 @@ class InventoryParser(object):
|
|||
|
||||
# Read in the hosts, groups, and variables defined in the
|
||||
# inventory file.
|
||||
|
||||
if loader:
|
||||
(data, private) = loader._get_file_contents(filename)
|
||||
(b_data, private) = loader._get_file_contents(filename)
|
||||
else:
|
||||
with open(filename) as fh:
|
||||
data = to_text(fh.read())
|
||||
data = data.split('\n')
|
||||
with open(filename, 'rb') as fh:
|
||||
b_data = fh.read()
|
||||
|
||||
try:
|
||||
# Faster to do to_text once on a long string than many
|
||||
# times on smaller strings
|
||||
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
|
||||
except UnicodeError:
|
||||
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
|
||||
data = []
|
||||
for line in b_data.splitlines():
|
||||
if line and line[0] in self.b_COMMENT_MARKERS:
|
||||
# Replace is okay for comment lines
|
||||
#data.append(to_text(line, errors='surrogate_or_replace'))
|
||||
# Currently we only need these lines for accurate lineno in errors
|
||||
data.append(u'')
|
||||
else:
|
||||
# Non-comment lines still have to be valid uf-8
|
||||
data.append(to_text(line, errors='surrogate_or_strict'))
|
||||
|
||||
self._parse(data)
|
||||
|
||||
|
@ -89,7 +105,7 @@ class InventoryParser(object):
|
|||
line = line.strip()
|
||||
|
||||
# Skip empty lines and comments
|
||||
if line == '' or line.startswith(";") or line.startswith("#"):
|
||||
if not line or line[0] in self._COMMENT_MARKERS:
|
||||
continue
|
||||
|
||||
# Is this a [section] header? That tells us what group we're parsing
|
||||
|
|
|
@ -70,7 +70,7 @@ class InventoryParser(object):
|
|||
# 'all' at the time it was created.
|
||||
for group in self.groups.values():
|
||||
if group.depth == 0 and group.name not in ('all', 'ungrouped'):
|
||||
self.groups['all'].add_child_group(Group(group_name))
|
||||
self.groups['all'].add_child_group(group)
|
||||
|
||||
def _parse_groups(self, group, group_data):
|
||||
|
||||
|
|
|
@ -173,12 +173,6 @@ def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
|
|||
else:
|
||||
errors = 'strict'
|
||||
|
||||
if errors is None:
|
||||
if PY3:
|
||||
errors = 'surrogateescape'
|
||||
else:
|
||||
errors = 'replace'
|
||||
|
||||
if isinstance(obj, binary_type):
|
||||
return obj.decode(encoding, errors)
|
||||
|
||||
|
|
|
@ -35,8 +35,6 @@ from ansible.module_utils.network import to_list
|
|||
from ansible.module_utils.shell import CliBase
|
||||
from ansible.module_utils.netcli import Command
|
||||
|
||||
add_argument('show_command', dict(default='show running-config',
|
||||
choices=['show running-config', 'more system:running-config']))
|
||||
add_argument('context', dict(required=False))
|
||||
|
||||
|
||||
|
@ -64,8 +62,13 @@ class Cli(CliBase):
|
|||
|
||||
def authorize(self, params, **kwargs):
|
||||
passwd = params['auth_pass']
|
||||
errors = self.shell.errors
|
||||
# Disable errors (if already in enable mode)
|
||||
self.shell.errors = []
|
||||
cmd = Command('enable', prompt=self.NET_PASSWD_RE, response=passwd)
|
||||
self.execute([cmd, 'no terminal pager'])
|
||||
# Reapply error handling
|
||||
self.shell.errors = errors
|
||||
|
||||
def change_context(self, params):
|
||||
context = params['context']
|
||||
|
@ -88,10 +91,16 @@ class Cli(CliBase):
|
|||
responses = self.execute(cmds)
|
||||
return responses[1:]
|
||||
|
||||
def get_config(self, include_defaults=False):
|
||||
def get_config(self, include=None):
|
||||
if include not in [None, 'defaults', 'passwords']:
|
||||
raise ValueError('include must be one of None, defaults, passwords')
|
||||
cmd = 'show running-config'
|
||||
if include_defaults:
|
||||
cmd += ' all'
|
||||
if include == 'passwords':
|
||||
cmd = 'more system:running-config'
|
||||
elif include == 'defaults':
|
||||
cmd = 'show running-config all'
|
||||
else:
|
||||
cmd = 'show running-config'
|
||||
return self.run_commands(cmd)[0]
|
||||
|
||||
def load_config(self, commands):
|
||||
|
|
|
@ -27,7 +27,7 @@ import copy
|
|||
import importlib
|
||||
import inspect
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from packaging.version import Version
|
||||
from os.path import expanduser
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
@ -72,9 +72,18 @@ AZURE_FAILED_STATE = "Failed"
|
|||
HAS_AZURE = True
|
||||
HAS_AZURE_EXC = None
|
||||
|
||||
HAS_MSRESTAZURE = True
|
||||
HAS_MSRESTAZURE_EXC = None
|
||||
|
||||
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
|
||||
try:
|
||||
from msrest.serialization import Serializer
|
||||
except ImportError as exc:
|
||||
HAS_MSRESTAZURE_EXC = exc
|
||||
HAS_MSRESTAZURE = False
|
||||
|
||||
try:
|
||||
from enum import Enum
|
||||
from msrest.serialization import Serializer
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \
|
||||
NetworkInterfaceIPConfiguration, Subnet
|
||||
|
@ -92,7 +101,6 @@ except ImportError as exc:
|
|||
HAS_AZURE_EXC = exc
|
||||
HAS_AZURE = False
|
||||
|
||||
|
||||
def azure_id_to_dict(id):
|
||||
pieces = re.sub(r'^\/', '', id).split('/')
|
||||
result = {}
|
||||
|
@ -112,14 +120,6 @@ AZURE_EXPECTED_VERSIONS = dict(
|
|||
|
||||
AZURE_MIN_RELEASE = '2.0.0rc5'
|
||||
|
||||
|
||||
def check_client_version(client_name, client_version, expected_version):
|
||||
# Pinning Azure modules to 2.0.0rc5.
|
||||
if LooseVersion(client_version) != LooseVersion(expected_version):
|
||||
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
|
||||
"`pip install azure=={3}`".format(client_name, client_version, expected_version,
|
||||
AZURE_MIN_RELEASE))
|
||||
|
||||
class AzureRMModuleBase(object):
|
||||
|
||||
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
|
||||
|
@ -150,8 +150,12 @@ class AzureRMModuleBase(object):
|
|||
supports_check_mode=supports_check_mode,
|
||||
required_if=merged_required_if)
|
||||
|
||||
if not HAS_MSRESTAZURE:
|
||||
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
|
||||
"- {0}".format(HAS_MSRESTAZURE_EXC))
|
||||
|
||||
if not HAS_AZURE:
|
||||
self.fail("Do you have azure=={1} installed? Try `pip install azure=={1}`"
|
||||
self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
|
||||
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
|
||||
|
||||
self._network_client = None
|
||||
|
@ -192,6 +196,13 @@ class AzureRMModuleBase(object):
|
|||
res = self.exec_module(**self.module.params)
|
||||
self.module.exit_json(**res)
|
||||
|
||||
def check_client_version(self, client_name, client_version, expected_version):
|
||||
# Ensure Azure modules are at least 2.0.0rc5.
|
||||
if Version(client_version) < Version(expected_version):
|
||||
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
|
||||
"`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version,
|
||||
AZURE_MIN_RELEASE))
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
|
||||
|
||||
|
@ -241,12 +252,12 @@ class AzureRMModuleBase(object):
|
|||
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
|
||||
changed = False
|
||||
if isinstance(self.module.params.get('tags'), dict):
|
||||
for key, value in self.module.params['tags'].iteritems():
|
||||
for key, value in self.module.params['tags'].items():
|
||||
if not new_tags.get(key) or new_tags[key] != value:
|
||||
changed = True
|
||||
new_tags[key] = value
|
||||
if isinstance(tags, dict):
|
||||
for key, value in tags.iteritems():
|
||||
for key, value in tags.items():
|
||||
if not self.module.params['tags'].get(key):
|
||||
new_tags.pop(key)
|
||||
changed = True
|
||||
|
@ -319,7 +330,7 @@ class AzureRMModuleBase(object):
|
|||
|
||||
def _get_env_credentials(self):
|
||||
env_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
env_credentials[attribute] = os.environ.get(env_variable, None)
|
||||
|
||||
if env_credentials['profile']:
|
||||
|
@ -338,7 +349,7 @@ class AzureRMModuleBase(object):
|
|||
self.log('Getting credentials')
|
||||
|
||||
arg_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
arg_credentials[attribute] = params.get(attribute, None)
|
||||
|
||||
# try module params
|
||||
|
@ -574,7 +585,7 @@ class AzureRMModuleBase(object):
|
|||
def storage_client(self):
|
||||
self.log('Getting storage client...')
|
||||
if not self._storage_client:
|
||||
check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
|
||||
self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
|
||||
self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Storage')
|
||||
return self._storage_client
|
||||
|
@ -583,7 +594,7 @@ class AzureRMModuleBase(object):
|
|||
def network_client(self):
|
||||
self.log('Getting network client')
|
||||
if not self._network_client:
|
||||
check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
|
||||
self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
|
||||
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Network')
|
||||
return self._network_client
|
||||
|
@ -592,7 +603,7 @@ class AzureRMModuleBase(object):
|
|||
def rm_client(self):
|
||||
self.log('Getting resource manager client')
|
||||
if not self._resource_client:
|
||||
check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
|
||||
self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
|
||||
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
|
||||
return self._resource_client
|
||||
|
||||
|
@ -600,7 +611,7 @@ class AzureRMModuleBase(object):
|
|||
def compute_client(self):
|
||||
self.log('Getting compute client')
|
||||
if not self._compute_client:
|
||||
check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
|
||||
self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
|
||||
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Compute')
|
||||
return self._compute_client
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
|
@ -143,6 +144,8 @@ from ansible.module_utils.six import (PY2, PY3, b, binary_type, integer_types,
|
|||
from ansible.module_utils.six.moves import map, reduce
|
||||
from ansible.module_utils._text import to_native, to_bytes, to_text
|
||||
|
||||
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
|
||||
|
||||
_NUMBERTYPES = tuple(list(integer_types) + [float])
|
||||
|
||||
# Deprecated compat. Only kept in case another module used these names Using
|
||||
|
@ -634,6 +637,7 @@ class AnsibleModule(object):
|
|||
see library/* for examples
|
||||
'''
|
||||
|
||||
self._name = os.path.basename(__file__) #initialize name until we can parse from options
|
||||
self.argument_spec = argument_spec
|
||||
self.supports_check_mode = supports_check_mode
|
||||
self.check_mode = False
|
||||
|
@ -713,7 +717,7 @@ class AnsibleModule(object):
|
|||
|
||||
self._set_defaults(pre=False)
|
||||
|
||||
if not self.no_log and self._verbosity >= 3:
|
||||
if not self.no_log:
|
||||
self._log_invocation()
|
||||
|
||||
# finally, make sure we're in a sane working dir
|
||||
|
@ -1664,16 +1668,17 @@ class AnsibleModule(object):
|
|||
# TODO: generalize a separate log function and make log_invocation use it
|
||||
# Sanitize possible password argument when logging.
|
||||
log_args = dict()
|
||||
passwd_keys = ['password', 'login_password']
|
||||
|
||||
for param in self.params:
|
||||
canon = self.aliases.get(param, param)
|
||||
arg_opts = self.argument_spec.get(canon, {})
|
||||
no_log = arg_opts.get('no_log', False)
|
||||
arg_type = arg_opts.get('type', 'str')
|
||||
|
||||
if self.boolean(no_log):
|
||||
log_args[param] = 'NOT_LOGGING_PARAMETER'
|
||||
elif param in passwd_keys:
|
||||
# try to capture all passwords/passphrase named fields
|
||||
elif arg_type != 'bool' and PASSWORD_MATCH.search(param):
|
||||
log_args[param] = 'NOT_LOGGING_PASSWORD'
|
||||
else:
|
||||
param_val = self.params[param]
|
||||
|
@ -1843,7 +1848,7 @@ class AnsibleModule(object):
|
|||
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
|
||||
|
||||
blocksize = 64 * 1024
|
||||
infile = open(filename, 'rb')
|
||||
infile = open(os.path.realpath(filename), 'rb')
|
||||
block = infile.read(blocksize)
|
||||
while block:
|
||||
digest_method.update(block)
|
||||
|
@ -1924,18 +1929,6 @@ class AnsibleModule(object):
|
|||
|
||||
creating = not os.path.exists(b_dest)
|
||||
|
||||
try:
|
||||
login_name = os.getlogin()
|
||||
except OSError:
|
||||
# not having a tty can cause the above to fail, so
|
||||
# just get the LOGNAME environment variable instead
|
||||
login_name = os.environ.get('LOGNAME', None)
|
||||
|
||||
# if the original login_name doesn't match the currently
|
||||
# logged-in user, or if the SUDO_USER environment variable
|
||||
# is set, then this user has switched their credentials
|
||||
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
|
||||
|
||||
try:
|
||||
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
|
||||
os.rename(b_src, b_dest)
|
||||
|
@ -1959,19 +1952,28 @@ class AnsibleModule(object):
|
|||
except (OSError, IOError):
|
||||
e = get_exception()
|
||||
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e))
|
||||
except TypeError:
|
||||
# We expect that this is happening because python3.4.x and
|
||||
# below can't handle byte strings in mkstemp(). Traceback
|
||||
# would end in something like:
|
||||
# file = _os.path.join(dir, pre + name + suf)
|
||||
# TypeError: can't concat bytes to str
|
||||
self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. Please use Python2.x or Python3.5 or greater.', exception=sys.exc_info())
|
||||
|
||||
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
|
||||
|
||||
try:
|
||||
try:
|
||||
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
|
||||
os.close(tmp_dest_fd)
|
||||
# leaves tmp file behind when sudo and not root
|
||||
if switched_user and os.getuid() != 0:
|
||||
# leaves tmp file behind when sudo and not root
|
||||
try:
|
||||
shutil.move(b_src, b_tmp_dest_name)
|
||||
except OSError:
|
||||
# cleanup will happen by 'rm' of tempdir
|
||||
# copy2 will preserve some metadata
|
||||
shutil.copy2(b_src, b_tmp_dest_name)
|
||||
else:
|
||||
shutil.move(b_src, b_tmp_dest_name)
|
||||
|
||||
if self.selinux_enabled():
|
||||
self.set_context_if_different(
|
||||
b_tmp_dest_name, context, False)
|
||||
|
@ -1986,12 +1988,14 @@ class AnsibleModule(object):
|
|||
try:
|
||||
os.rename(b_tmp_dest_name, b_dest)
|
||||
except (shutil.Error, OSError, IOError):
|
||||
e = get_exception()
|
||||
if unsafe_writes:
|
||||
self._unsafe_writes(b_tmp_dest_name, b_dest, get_exception())
|
||||
self._unsafe_writes(b_tmp_dest_name, b_dest, e)
|
||||
else:
|
||||
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, exception))
|
||||
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
|
||||
except (shutil.Error, OSError, IOError):
|
||||
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, exception))
|
||||
e = get_exception()
|
||||
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
|
||||
finally:
|
||||
self.cleanup(b_tmp_dest_name)
|
||||
|
||||
|
@ -2001,8 +2005,12 @@ class AnsibleModule(object):
|
|||
umask = os.umask(0)
|
||||
os.umask(umask)
|
||||
os.chmod(b_dest, DEFAULT_PERM & ~umask)
|
||||
if switched_user:
|
||||
os.chown(b_dest, os.getuid(), os.getgid())
|
||||
try:
|
||||
os.chown(b_dest, os.geteuid(), os.getegid())
|
||||
except OSError:
|
||||
# We're okay with trying our best here. If the user is not
|
||||
# root (or old Unices) they won't be able to chown.
|
||||
pass
|
||||
|
||||
if self.selinux_enabled():
|
||||
# rename might not preserve context
|
||||
|
@ -2030,7 +2038,7 @@ class AnsibleModule(object):
|
|||
else:
|
||||
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, exception))
|
||||
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None):
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
|
||||
'''
|
||||
Execute a command, returns rc, stdout, and stderr.
|
||||
|
||||
|
@ -2052,8 +2060,27 @@ class AnsibleModule(object):
|
|||
:kw prompt_regex: Regex string (not a compiled regex) which can be
|
||||
used to detect prompts in the stdout which would otherwise cause
|
||||
the execution to hang (especially if no input data is specified)
|
||||
:kwarg environ_update: dictionary to *update* os.environ with
|
||||
:kw environ_update: dictionary to *update* os.environ with
|
||||
:kw umask: Umask to be used when running the command. Default None
|
||||
:kw encoding: Since we return native strings, on python3 we need to
|
||||
know the encoding to use to transform from bytes to text. If you
|
||||
want to always get bytes back, use encoding=None. The default is
|
||||
"utf-8". This does not affect transformation of strings given as
|
||||
args.
|
||||
:kw errors: Since we return native strings, on python3 we need to
|
||||
transform stdout and stderr from bytes to text. If the bytes are
|
||||
undecodable in the ``encoding`` specified, then use this error
|
||||
handler to deal with them. The default is ``surrogate_or_strict``
|
||||
which means that the bytes will be decoded using the
|
||||
surrogateescape error handler if available (available on all
|
||||
python3 versions we support) otherwise a UnicodeError traceback
|
||||
will be raised. This does not affect transformations of strings
|
||||
given as args.
|
||||
:returns: A 3-tuple of return code (integer), stdout (native string),
|
||||
and stderr (native string). On python2, stdout and stderr are both
|
||||
byte strings. On python3, stdout and stderr are text strings converted
|
||||
according to the encoding and errors parameters. If you want byte
|
||||
strings on python3, use encoding=None to turn decoding to text off.
|
||||
'''
|
||||
|
||||
shell = False
|
||||
|
@ -2167,14 +2194,13 @@ class AnsibleModule(object):
|
|||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if cwd and os.path.isdir(cwd):
|
||||
kwargs['cwd'] = cwd
|
||||
|
||||
# store the pwd
|
||||
prev_dir = os.getcwd()
|
||||
|
||||
# make sure we're in the right working directory
|
||||
if cwd and os.path.isdir(cwd):
|
||||
cwd = os.path.abspath(os.path.expanduser(cwd))
|
||||
kwargs['cwd'] = cwd
|
||||
try:
|
||||
os.chdir(cwd)
|
||||
except (OSError, IOError):
|
||||
|
@ -2186,13 +2212,8 @@ class AnsibleModule(object):
|
|||
old_umask = os.umask(umask)
|
||||
|
||||
try:
|
||||
|
||||
if self._debug:
|
||||
if isinstance(args, list):
|
||||
running = ' '.join(args)
|
||||
else:
|
||||
running = args
|
||||
self.log('Executing: ' + running)
|
||||
self.log('Executing: ' + clean_args)
|
||||
cmd = subprocess.Popen(args, **kwargs)
|
||||
|
||||
# the communication logic here is essentially taken from that
|
||||
|
@ -2225,6 +2246,10 @@ class AnsibleModule(object):
|
|||
# if we're checking for prompts, do it now
|
||||
if prompt_re:
|
||||
if prompt_re.search(stdout) and not data:
|
||||
if encoding:
|
||||
stdout = to_native(stdout, encoding=encoding, errors=errors)
|
||||
else:
|
||||
stdout = stdout
|
||||
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
|
||||
# only break out if no pipes are left to read or
|
||||
# the pipes are completely read and
|
||||
|
@ -2246,9 +2271,11 @@ class AnsibleModule(object):
|
|||
rc = cmd.returncode
|
||||
except (OSError, IOError):
|
||||
e = get_exception()
|
||||
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
|
||||
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
self.log("Error Executing CMD:%s Exception:%s" % (clean_args,to_native(traceback.format_exc())))
|
||||
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
|
||||
|
||||
# Restore env settings
|
||||
|
@ -2268,6 +2295,9 @@ class AnsibleModule(object):
|
|||
# reset the pwd
|
||||
os.chdir(prev_dir)
|
||||
|
||||
if encoding is not None:
|
||||
return (rc, to_native(stdout, encoding=encoding, errors=errors),
|
||||
to_native(stderr, encoding=encoding, errors=errors))
|
||||
return (rc, stdout, stderr)
|
||||
|
||||
def append_to_file(self, filename, str):
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import time
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
try:
|
||||
from cs import CloudStack, CloudStackException, read_config
|
||||
|
@ -148,7 +149,7 @@ class AnsibleCloudStack(object):
|
|||
|
||||
|
||||
def has_changed(self, want_dict, current_dict, only_keys=None):
|
||||
for key, value in want_dict.iteritems():
|
||||
for key, value in want_dict.items():
|
||||
|
||||
# Optionally limit by a list of keys
|
||||
if only_keys and key not in only_keys:
|
||||
|
@ -343,6 +344,9 @@ class AnsibleCloudStack(object):
|
|||
zone = self.module.params.get('zone')
|
||||
zones = self.cs.listZones()
|
||||
|
||||
if not zones:
|
||||
self.module.fail_json(msg="No zones available. Please create a zone first")
|
||||
|
||||
# use the first zone if no zone param given
|
||||
if not zone:
|
||||
self.zone = zones['zone'][0]
|
||||
|
@ -510,12 +514,12 @@ class AnsibleCloudStack(object):
|
|||
if resource:
|
||||
returns = self.common_returns.copy()
|
||||
returns.update(self.returns)
|
||||
for search_key, return_key in returns.iteritems():
|
||||
for search_key, return_key in returns.items():
|
||||
if search_key in resource:
|
||||
self.result[return_key] = resource[search_key]
|
||||
|
||||
# Bad bad API does not always return int when it should.
|
||||
for search_key, return_key in self.returns_to_int.iteritems():
|
||||
for search_key, return_key in self.returns_to_int.items():
|
||||
if search_key in resource:
|
||||
self.result[return_key] = int(resource[search_key])
|
||||
|
||||
|
|
|
@ -42,8 +42,9 @@ def get_config(module):
|
|||
if not contents:
|
||||
contents = module.config.get_config()
|
||||
module.params['config'] = contents
|
||||
|
||||
return NetworkConfig(indent=1, contents=contents[0])
|
||||
return NetworkConfig(indent=1, contents=contents[0])
|
||||
else:
|
||||
return NetworkConfig(indent=1, contents=contents)
|
||||
|
||||
|
||||
def get_sublevel_config(running_config, module):
|
||||
|
@ -54,11 +55,13 @@ def get_sublevel_config(running_config, module):
|
|||
contents = obj.children
|
||||
contents[:0] = module.params['parents']
|
||||
|
||||
indent = 0
|
||||
for c in contents:
|
||||
if isinstance(c, str):
|
||||
current_config_contents.append(c)
|
||||
current_config_contents.append(c.rjust(len(c) + indent, ' '))
|
||||
if isinstance(c, ConfigLine):
|
||||
current_config_contents.append(c.raw)
|
||||
indent = indent + 1
|
||||
sublevel_config = '\n'.join(current_config_contents)
|
||||
|
||||
return sublevel_config
|
||||
|
|
|
@ -38,28 +38,25 @@ from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, ignore_line,
|
|||
|
||||
def get_config(module):
|
||||
contents = module.params['config']
|
||||
|
||||
if not contents:
|
||||
contents = module.config.get_config()
|
||||
module.params['config'] = contents
|
||||
|
||||
return Dellos6NetworkConfig(indent=0, contents=contents[0])
|
||||
return Dellos6NetworkConfig(indent=0, contents=contents[0])
|
||||
else:
|
||||
return Dellos6NetworkConfig(indent=0, contents=contents)
|
||||
|
||||
|
||||
def get_sublevel_config(running_config, module):
|
||||
contents = list()
|
||||
current_config_contents = list()
|
||||
sublevel_config = Dellos6NetworkConfig(indent=0)
|
||||
|
||||
obj = running_config.get_object(module.params['parents'])
|
||||
if obj:
|
||||
contents = obj.children
|
||||
|
||||
for c in contents:
|
||||
if isinstance(c, ConfigLine):
|
||||
current_config_contents.append(c.raw)
|
||||
sublevel_config.add(current_config_contents, module.params['parents'])
|
||||
|
||||
return sublevel_config
|
||||
|
||||
|
||||
|
@ -68,6 +65,7 @@ def os6_parse(lines, indent=None, comment_tokens=None):
|
|||
re.compile(r'^vlan.*$'),
|
||||
re.compile(r'^stack.*$'),
|
||||
re.compile(r'^interface.*$'),
|
||||
re.compile(r'datacenter-bridging.*$'),
|
||||
re.compile(r'line (console|telnet|ssh).*$'),
|
||||
re.compile(r'ip ssh !(server).*$'),
|
||||
re.compile(r'ip (dhcp|vrf).*$'),
|
||||
|
@ -85,54 +83,66 @@ def os6_parse(lines, indent=None, comment_tokens=None):
|
|||
re.compile(r'banner motd.*$'),
|
||||
re.compile(r'openflow.*$'),
|
||||
re.compile(r'support-assist.*$'),
|
||||
re.compile(r'template.*$'),
|
||||
re.compile(r'address-family.*$'),
|
||||
re.compile(r'spanning-tree mst configuration.*$'),
|
||||
re.compile(r'logging.*$'),
|
||||
re.compile(r'(radius-server|tacacs-server) host.*$')]
|
||||
|
||||
childline = re.compile(r'^exit$')
|
||||
|
||||
config = list()
|
||||
inSubLevel = False
|
||||
parent = None
|
||||
children = list()
|
||||
subcommandcount = 0
|
||||
|
||||
parent = list()
|
||||
children = []
|
||||
parent_match = False
|
||||
for line in str(lines).split('\n'):
|
||||
text = str(re.sub(r'([{};])', '', line)).strip()
|
||||
|
||||
cfg = ConfigLine(text)
|
||||
cfg.raw = line
|
||||
|
||||
if not text or ignore_line(text, comment_tokens):
|
||||
parent = None
|
||||
children = list()
|
||||
inSubLevel = False
|
||||
parent = list()
|
||||
children = []
|
||||
continue
|
||||
|
||||
if inSubLevel is False:
|
||||
else:
|
||||
parent_match = False
|
||||
# handle sublevel parent
|
||||
for pr in sublevel_cmds:
|
||||
if pr.match(line):
|
||||
parent = cfg
|
||||
config.append(parent)
|
||||
inSubLevel = True
|
||||
if len(parent) != 0:
|
||||
cfg.parents.extend(parent)
|
||||
parent.append(cfg)
|
||||
config.append(cfg)
|
||||
if children:
|
||||
children.insert(len(parent) - 1, [])
|
||||
children[len(parent) - 2].append(cfg)
|
||||
parent_match = True
|
||||
continue
|
||||
if parent is None:
|
||||
# handle exit
|
||||
if childline.match(line):
|
||||
if children:
|
||||
parent[len(children) - 1].children.extend(children[len(children) - 1])
|
||||
if len(children) > 1:
|
||||
parent[len(children) - 2].children.extend(parent[len(children) - 1].children)
|
||||
cfg.parents.extend(parent)
|
||||
children.pop()
|
||||
parent.pop()
|
||||
if not children:
|
||||
children = list()
|
||||
if parent:
|
||||
cfg.parents.extend(parent)
|
||||
parent = list()
|
||||
config.append(cfg)
|
||||
# handle sublevel children
|
||||
elif parent_match is False and len(parent) > 0:
|
||||
if not children:
|
||||
cfglist = [cfg]
|
||||
children.append(cfglist)
|
||||
else:
|
||||
children[len(parent) - 1].append(cfg)
|
||||
cfg.parents.extend(parent)
|
||||
config.append(cfg)
|
||||
# handle global commands
|
||||
elif not parent:
|
||||
config.append(cfg)
|
||||
|
||||
# handle sub level commands
|
||||
elif inSubLevel and childline.match(line):
|
||||
parent.children = children
|
||||
inSubLevel = False
|
||||
children = list()
|
||||
parent = None
|
||||
|
||||
# handle top level commands
|
||||
elif inSubLevel:
|
||||
children.append(cfg)
|
||||
cfg.parents = [parent]
|
||||
config.append(cfg)
|
||||
|
||||
else: # global level
|
||||
config.append(cfg)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
|
@ -141,6 +151,18 @@ class Dellos6NetworkConfig(NetworkConfig):
|
|||
def load(self, contents):
|
||||
self._config = os6_parse(contents, self.indent, DEFAULT_COMMENT_TOKENS)
|
||||
|
||||
def diff_line(self, other, path=None):
|
||||
diff = list()
|
||||
for item in self.items:
|
||||
if str(item) == "exit":
|
||||
for diff_item in diff:
|
||||
if item.parents == diff_item.parents:
|
||||
diff.append(item)
|
||||
break
|
||||
elif item not in other:
|
||||
diff.append(item)
|
||||
return diff
|
||||
|
||||
|
||||
class Cli(CliBase):
|
||||
|
||||
|
@ -160,11 +182,9 @@ class Cli(CliBase):
|
|||
re.compile(r"[^\r\n]+ not found", re.I),
|
||||
re.compile(r"'[^']' +returned error code: ?\d+")]
|
||||
|
||||
|
||||
def connect(self, params, **kwargs):
|
||||
super(Cli, self).connect(params, kickstart=False, **kwargs)
|
||||
|
||||
|
||||
def authorize(self, params, **kwargs):
|
||||
passwd = params['auth_pass']
|
||||
self.run_commands(
|
||||
|
@ -172,7 +192,6 @@ class Cli(CliBase):
|
|||
)
|
||||
self.run_commands('terminal length 0')
|
||||
|
||||
|
||||
def configure(self, commands, **kwargs):
|
||||
cmds = ['configure terminal']
|
||||
cmds.extend(to_list(commands))
|
||||
|
@ -181,17 +200,13 @@ class Cli(CliBase):
|
|||
responses.pop(0)
|
||||
return responses
|
||||
|
||||
|
||||
def get_config(self, **kwargs):
|
||||
return self.execute(['show running-config'])
|
||||
|
||||
|
||||
def load_config(self, commands, **kwargs):
|
||||
return self.configure(commands)
|
||||
|
||||
|
||||
def save_config(self):
|
||||
self.execute(['copy running-config startup-config'])
|
||||
|
||||
|
||||
Cli = register_transport('cli', default=True)(Cli)
|
||||
|
|
|
@ -42,8 +42,9 @@ def get_config(module):
|
|||
if not contents:
|
||||
contents = module.config.get_config()
|
||||
module.params['config'] = contents
|
||||
|
||||
return NetworkConfig(indent=1, contents=contents[0])
|
||||
return NetworkConfig(indent=1, contents=contents[0])
|
||||
else:
|
||||
return NetworkConfig(indent=1, contents=contents)
|
||||
|
||||
|
||||
def get_sublevel_config(running_config, module):
|
||||
|
@ -54,11 +55,13 @@ def get_sublevel_config(running_config, module):
|
|||
contents = obj.children
|
||||
contents[:0] = module.params['parents']
|
||||
|
||||
indent = 0
|
||||
for c in contents:
|
||||
if isinstance(c, str):
|
||||
current_config_contents.append(c)
|
||||
current_config_contents.append(c.rjust(len(c) + indent, ' '))
|
||||
if isinstance(c, ConfigLine):
|
||||
current_config_contents.append(c.raw)
|
||||
indent = indent + 1
|
||||
sublevel_config = '\n'.join(current_config_contents)
|
||||
|
||||
return sublevel_config
|
||||
|
|
|
@ -21,10 +21,10 @@ import re
|
|||
import json
|
||||
import sys
|
||||
import copy
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from urlparse import urlparse
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE, BOOLEANS_FALSE
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
|
||||
HAS_DOCKER_PY = True
|
||||
HAS_DOCKER_ERROR = None
|
||||
|
@ -446,8 +446,7 @@ class AnsibleDockerClient(Client):
|
|||
'''
|
||||
self.log("Pulling image %s:%s" % (name, tag))
|
||||
try:
|
||||
for line in self.pull(name, tag=tag, stream=True):
|
||||
line = json.loads(line)
|
||||
for line in self.pull(name, tag=tag, stream=True, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
|
|
|
@ -52,7 +52,7 @@ try:
|
|||
except:
|
||||
HAS_LOOSE_VERSION = False
|
||||
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.six import string_types, binary_type, text_type
|
||||
|
||||
class AnsibleAWSError(Exception):
|
||||
pass
|
||||
|
@ -232,8 +232,8 @@ def get_aws_connection_info(module, boto3=False):
|
|||
boto_params['validate_certs'] = validate_certs
|
||||
|
||||
for param, value in boto_params.items():
|
||||
if isinstance(value, str):
|
||||
boto_params[param] = unicode(value, 'utf-8', 'strict')
|
||||
if isinstance(value, binary_type):
|
||||
boto_params[param] = text_type(value, 'utf-8', 'strict')
|
||||
|
||||
return region, ec2_url, boto_params
|
||||
|
||||
|
@ -343,7 +343,7 @@ def camel_dict_to_snake_dict(camel_dict):
|
|||
|
||||
|
||||
snake_dict = {}
|
||||
for k, v in camel_dict.iteritems():
|
||||
for k, v in camel_dict.items():
|
||||
if isinstance(v, dict):
|
||||
snake_dict[camel_to_snake(k)] = camel_dict_to_snake_dict(v)
|
||||
elif isinstance(v, list):
|
||||
|
@ -378,7 +378,7 @@ def ansible_dict_to_boto3_filter_list(filters_dict):
|
|||
"""
|
||||
|
||||
filters_list = []
|
||||
for k,v in filters_dict.iteritems():
|
||||
for k,v in filters_dict.items():
|
||||
filter_dict = {'Name': k}
|
||||
if isinstance(v, string_types):
|
||||
filter_dict['Values'] = [v]
|
||||
|
@ -443,7 +443,7 @@ def ansible_dict_to_boto3_tag_list(tags_dict):
|
|||
"""
|
||||
|
||||
tags_list = []
|
||||
for k,v in tags_dict.iteritems():
|
||||
for k,v in tags_dict.items():
|
||||
tags_list.append({'Key': k, 'Value': v})
|
||||
|
||||
return tags_list
|
||||
|
|
|
@ -35,6 +35,7 @@ from ansible.module_utils.network import add_argument, register_transport, to_li
|
|||
from ansible.module_utils.netcli import Command
|
||||
from ansible.module_utils.shell import CliBase
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
EAPI_FORMATS = ['json', 'text']
|
||||
|
||||
|
@ -50,7 +51,7 @@ class EosConfigMixin(object):
|
|||
cmds = ['configure terminal']
|
||||
cmds.extend(to_list(commands))
|
||||
cmds.append('end')
|
||||
responses = self.execute(commands)
|
||||
responses = self.execute(cmds)
|
||||
return responses[1:-1]
|
||||
|
||||
def get_config(self, include_defaults=False, **kwargs):
|
||||
|
@ -60,6 +61,12 @@ class EosConfigMixin(object):
|
|||
return self.execute([cmd])[0]
|
||||
|
||||
def load_config(self, config, commit=False, replace=False):
|
||||
if self.supports_sessions():
|
||||
return self.load_config_session(config, commit, replace)
|
||||
else:
|
||||
return self.configure(config)
|
||||
|
||||
def load_config_session(self, config, commit=False, replace=False):
|
||||
""" Loads the configuration into the remote device
|
||||
"""
|
||||
session = 'ansible_%s' % int(time.time())
|
||||
|
@ -82,7 +89,7 @@ class EosConfigMixin(object):
|
|||
self.execute(['no configure session %s' % session])
|
||||
except NetworkError:
|
||||
exc = get_exception()
|
||||
if 'timeout trying to send command' in exc.message:
|
||||
if 'timeout trying to send command' in to_native(exc):
|
||||
# try to get control back and get out of config mode
|
||||
if isinstance(self, Cli):
|
||||
self.execute(['\x03', 'end'])
|
||||
|
@ -116,6 +123,17 @@ class EosConfigMixin(object):
|
|||
commands = ['configure session %s' % session, 'abort']
|
||||
self.execute(commands)
|
||||
|
||||
def supports_sessions(self):
|
||||
try:
|
||||
if isinstance(self, Eapi):
|
||||
self.execute(['show configuration sessions'], output='text')
|
||||
else:
|
||||
self.execute('show configuration sessions')
|
||||
return True
|
||||
except NetworkError:
|
||||
return False
|
||||
|
||||
|
||||
|
||||
class Eapi(EosConfigMixin):
|
||||
|
||||
|
@ -148,6 +166,7 @@ class Eapi(EosConfigMixin):
|
|||
self.url_args.params['url_username'] = params['username']
|
||||
self.url_args.params['url_password'] = params['password']
|
||||
self.url_args.params['validate_certs'] = params['validate_certs']
|
||||
self.url_args.params['timeout'] = params['timeout']
|
||||
|
||||
if params['use_ssl']:
|
||||
proto = 'https'
|
||||
|
@ -187,10 +206,11 @@ class Eapi(EosConfigMixin):
|
|||
data = json.dumps(body)
|
||||
|
||||
headers = {'Content-Type': 'application/json-rpc'}
|
||||
timeout = self.url_args.params['timeout']
|
||||
|
||||
response, headers = fetch_url(
|
||||
self.url_args, self.url, data=data, headers=headers,
|
||||
method='POST'
|
||||
method='POST', timeout=timeout
|
||||
)
|
||||
|
||||
if headers['status'] != 200:
|
||||
|
|
|
@ -143,7 +143,7 @@ class ExoDns(object):
|
|||
|
||||
def has_changed(self, want_dict, current_dict, only_keys=None):
|
||||
changed = False
|
||||
for key, value in want_dict.iteritems():
|
||||
for key, value in want_dict.items():
|
||||
# Optionally limit by a list of keys
|
||||
if only_keys and key not in only_keys:
|
||||
continue
|
||||
|
|
|
@ -34,29 +34,8 @@ import pwd
|
|||
|
||||
from ansible.module_utils.basic import get_all_subclasses
|
||||
from ansible.module_utils.six import PY3, iteritems
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
# py2 vs py3; replace with six via ansiballz
|
||||
try:
|
||||
# python2
|
||||
import ConfigParser as configparser
|
||||
except ImportError:
|
||||
# python3
|
||||
import configparser
|
||||
|
||||
try:
|
||||
# python2
|
||||
from StringIO import StringIO
|
||||
except ImportError:
|
||||
# python3
|
||||
from io import StringIO
|
||||
|
||||
try:
|
||||
# python2
|
||||
from string import maketrans
|
||||
except ImportError:
|
||||
# python3
|
||||
maketrans = str.maketrans # TODO: is this really identical?
|
||||
from ansible.module_utils.six.moves import configparser, StringIO, reduce
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
|
||||
try:
|
||||
import selinux
|
||||
|
@ -261,9 +240,8 @@ class Facts(object):
|
|||
# try to read it as json first
|
||||
# if that fails read it with ConfigParser
|
||||
# if that fails, skip it
|
||||
rc, out, err = self.module.run_command(fn)
|
||||
try:
|
||||
out = out.decode('utf-8', 'strict')
|
||||
rc, out, err = self.module.run_command(fn)
|
||||
except UnicodeError:
|
||||
fact = 'error loading fact - output of running %s was not utf-8' % fn
|
||||
local[fact_base] = fact
|
||||
|
@ -350,11 +328,15 @@ class Facts(object):
|
|||
else:
|
||||
proc_1 = os.path.basename(proc_1)
|
||||
|
||||
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
|
||||
if proc_1 == "COMMAND\n":
|
||||
proc_1 = None
|
||||
|
||||
if proc_1 is not None:
|
||||
proc_1 = to_native(proc_1)
|
||||
proc_1 = proc_1.strip()
|
||||
|
||||
if proc_1 == 'init' or proc_1.endswith('sh'):
|
||||
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
|
||||
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
|
||||
proc_1 = None
|
||||
|
||||
|
@ -394,9 +376,8 @@ class Facts(object):
|
|||
def get_lsb_facts(self):
|
||||
lsb_path = self.module.get_bin_path('lsb_release')
|
||||
if lsb_path:
|
||||
rc, out, err = self.module.run_command([lsb_path, "-a"])
|
||||
rc, out, err = self.module.run_command([lsb_path, "-a"], errors='surrogate_or_replace')
|
||||
if rc == 0:
|
||||
out = out.decode('utf-8', 'replace')
|
||||
self.facts['lsb'] = {}
|
||||
for line in out.split('\n'):
|
||||
if len(line) < 1 or ':' not in line:
|
||||
|
@ -466,8 +447,7 @@ class Facts(object):
|
|||
def get_caps_facts(self):
|
||||
capsh_path = self.module.get_bin_path('capsh')
|
||||
if capsh_path:
|
||||
rc, out, err = self.module.run_command([capsh_path, "--print"])
|
||||
out = out.decode('utf-8', 'replace')
|
||||
rc, out, err = self.module.run_command([capsh_path, "--print"], errors='surrogate_or_replace')
|
||||
enforced_caps = []
|
||||
enforced = 'NA'
|
||||
for line in out.split('\n'):
|
||||
|
@ -648,13 +628,13 @@ class Distribution(object):
|
|||
OS_FAMILY = dict(
|
||||
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
|
||||
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
|
||||
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
|
||||
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', Virtuozzo = 'RedHat',
|
||||
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
|
||||
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
|
||||
SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
|
||||
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
|
||||
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
|
||||
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
|
||||
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse'
|
||||
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
|
||||
)
|
||||
|
||||
def __init__(self, module):
|
||||
|
@ -1026,11 +1006,11 @@ class LinuxHardware(Hardware):
|
|||
key = data[0]
|
||||
if key in self.ORIGINAL_MEMORY_FACTS:
|
||||
val = data[1].strip().split(' ')[0]
|
||||
self.facts["%s_mb" % key.lower()] = int(val) / 1024
|
||||
self.facts["%s_mb" % key.lower()] = int(val) // 1024
|
||||
|
||||
if key in self.MEMORY_FACTS:
|
||||
val = data[1].strip().split(' ')[0]
|
||||
memstats[key.lower()] = int(val) / 1024
|
||||
memstats[key.lower()] = int(val) // 1024
|
||||
|
||||
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
|
||||
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
|
||||
|
@ -1230,7 +1210,11 @@ class LinuxHardware(Hardware):
|
|||
self.facts[k] = 'NA'
|
||||
|
||||
def _run_lsblk(self, lsblk_path):
|
||||
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID']
|
||||
# call lsblk and collect all uuids
|
||||
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
|
||||
# this uses the linux major device number
|
||||
# for details see https://www.kernel.org/doc/Documentation/devices.txt
|
||||
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
|
||||
cmd = [lsblk_path] + args
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
return rc, out, err
|
||||
|
@ -1268,7 +1252,7 @@ class LinuxHardware(Hardware):
|
|||
def _run_findmnt(self, findmnt_path):
|
||||
args = ['--list', '--noheadings', '--notruncate']
|
||||
cmd = [findmnt_path] + args
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
rc, out, err = self.module.run_command(cmd, errors='surrogate_or_replace')
|
||||
return rc, out, err
|
||||
|
||||
def _find_bind_mounts(self):
|
||||
|
@ -1280,7 +1264,6 @@ class LinuxHardware(Hardware):
|
|||
rc, out, err = self._run_findmnt(findmnt_path)
|
||||
if rc != 0:
|
||||
return bind_mounts
|
||||
out = out.decode('utf-8', 'replace')
|
||||
|
||||
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
|
||||
for line in out.splitlines():
|
||||
|
@ -1359,8 +1342,7 @@ class LinuxHardware(Hardware):
|
|||
self.facts['devices'] = {}
|
||||
lspci = self.module.get_bin_path('lspci')
|
||||
if lspci:
|
||||
rc, pcidata, err = self.module.run_command([lspci, '-D'])
|
||||
pcidata = pcidata.decode('utf-8', 'replace')
|
||||
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_or_replace')
|
||||
else:
|
||||
pcidata = None
|
||||
|
||||
|
@ -1557,10 +1539,10 @@ class SunOSHardware(Hardware):
|
|||
reserved = int(out.split()[5][:-1])
|
||||
used = int(out.split()[8][:-1])
|
||||
free = int(out.split()[10][:-1])
|
||||
self.facts['swapfree_mb'] = free / 1024
|
||||
self.facts['swaptotal_mb'] = (free + used) / 1024
|
||||
self.facts['swap_allocated_mb'] = allocated / 1024
|
||||
self.facts['swap_reserved_mb'] = reserved / 1024
|
||||
self.facts['swapfree_mb'] = free // 1024
|
||||
self.facts['swaptotal_mb'] = (free + used) // 1024
|
||||
self.facts['swap_allocated_mb'] = allocated // 1024
|
||||
self.facts['swap_reserved_mb'] = reserved // 1024
|
||||
|
||||
@timeout(10)
|
||||
def get_mount_facts(self):
|
||||
|
@ -1589,7 +1571,6 @@ class OpenBSDHardware(Hardware):
|
|||
- devices
|
||||
"""
|
||||
platform = 'OpenBSD'
|
||||
DMESG_BOOT = '/var/run/dmesg.boot'
|
||||
|
||||
def populate(self):
|
||||
self.sysctl = self.get_sysctl()
|
||||
|
@ -1631,8 +1612,8 @@ class OpenBSDHardware(Hardware):
|
|||
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
|
||||
rc, out, err = self.module.run_command("/usr/bin/vmstat")
|
||||
if rc == 0:
|
||||
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) / 1024
|
||||
self.facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) / 1024 / 1024
|
||||
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
|
||||
self.facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
|
||||
|
||||
# Get swapctl info. swapctl output looks like:
|
||||
# total: 69268 1K-blocks allocated, 0 used, 69268 available
|
||||
|
@ -1640,26 +1621,26 @@ class OpenBSDHardware(Hardware):
|
|||
# total: 69268k bytes allocated = 0k used, 69268k available
|
||||
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
|
||||
if rc == 0:
|
||||
swaptrans = maketrans(' ', ' ')
|
||||
data = out.split()
|
||||
self.facts['swapfree_mb'] = int(data[-2].translate(swaptrans, "kmg")) / 1024
|
||||
self.facts['swaptotal_mb'] = int(data[1].translate(swaptrans, "kmg")) / 1024
|
||||
swaptrans = { ord(u'k'): None, ord(u'm'): None, ord(u'g'): None}
|
||||
data = to_text(out, errors='surrogate_or_strict').split()
|
||||
self.facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
|
||||
self.facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
|
||||
|
||||
def get_processor_facts(self):
|
||||
processor = []
|
||||
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
|
||||
if not dmesg_boot:
|
||||
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
|
||||
i = 0
|
||||
for line in dmesg_boot.splitlines():
|
||||
if line.split(' ', 1)[0] == 'cpu%i:' % i:
|
||||
processor.append(line.split(' ', 1)[1])
|
||||
i = i + 1
|
||||
processor_count = i
|
||||
for i in range(int(self.sysctl['hw.ncpu'])):
|
||||
processor.append(self.sysctl['hw.model'])
|
||||
|
||||
self.facts['processor'] = processor
|
||||
self.facts['processor_count'] = processor_count
|
||||
# I found no way to figure out the number of Cores per CPU in OpenBSD
|
||||
self.facts['processor_cores'] = 'NA'
|
||||
# The following is partly a lie because there is no reliable way to
|
||||
# determine the number of physical CPUs in the system. We can only
|
||||
# query the number of logical CPUs, which hides the number of cores.
|
||||
# On amd64/i386 we could try to inspect the smt/core/package lines in
|
||||
# dmesg, however even those have proven to be unreliable.
|
||||
# So take a shortcut and report the logical number of processors in
|
||||
# 'processor_count' and 'processor_cores' and leave it at that.
|
||||
self.facts['processor_count'] = self.sysctl['hw.ncpu']
|
||||
self.facts['processor_cores'] = self.sysctl['hw.ncpu']
|
||||
|
||||
def get_device_facts(self):
|
||||
devices = []
|
||||
|
@ -1718,8 +1699,8 @@ class FreeBSDHardware(Hardware):
|
|||
pagecount = int(data[1])
|
||||
if 'vm.stats.vm.v_free_count' in line:
|
||||
freecount = int(data[1])
|
||||
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
||||
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
||||
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
|
||||
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
|
||||
# Get swapinfo. swapinfo output looks like:
|
||||
# Device 1M-blocks Used Avail Capacity
|
||||
# /dev/ada0p3 314368 0 314368 0%
|
||||
|
@ -1730,8 +1711,8 @@ class FreeBSDHardware(Hardware):
|
|||
lines.pop()
|
||||
data = lines[-1].split()
|
||||
if data[0] != 'Device':
|
||||
self.facts['swaptotal_mb'] = int(data[1]) / 1024
|
||||
self.facts['swapfree_mb'] = int(data[3]) / 1024
|
||||
self.facts['swaptotal_mb'] = int(data[1]) // 1024
|
||||
self.facts['swapfree_mb'] = int(data[3]) // 1024
|
||||
|
||||
@timeout(10)
|
||||
def get_mount_facts(self):
|
||||
|
@ -1860,7 +1841,7 @@ class NetBSDHardware(Hardware):
|
|||
key = data[0]
|
||||
if key in NetBSDHardware.MEMORY_FACTS:
|
||||
val = data[1].strip().split(' ')[0]
|
||||
self.facts["%s_mb" % key.lower()] = int(val) / 1024
|
||||
self.facts["%s_mb" % key.lower()] = int(val) // 1024
|
||||
|
||||
@timeout(10)
|
||||
def get_mount_facts(self):
|
||||
|
@ -1930,8 +1911,8 @@ class AIX(Hardware):
|
|||
pagecount = int(data[0])
|
||||
if 'free pages' in line:
|
||||
freecount = int(data[0])
|
||||
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
||||
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
||||
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
|
||||
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
|
||||
# Get swapinfo. swapinfo output looks like:
|
||||
# Device 1M-blocks Used Avail Capacity
|
||||
# /dev/ada0p3 314368 0 314368 0%
|
||||
|
@ -2072,12 +2053,12 @@ class HPUX(Hardware):
|
|||
pagesize = 4096
|
||||
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
|
||||
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
|
||||
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
|
||||
self.facts['memfree_mb'] = pagesize * data // 1024 // 1024
|
||||
if self.facts['architecture'] == '9000/800':
|
||||
try:
|
||||
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
|
||||
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
|
||||
self.facts['memtotal_mb'] = int(data) / 1024
|
||||
self.facts['memtotal_mb'] = int(data) // 1024
|
||||
except AttributeError:
|
||||
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
|
||||
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
|
||||
|
@ -2168,11 +2149,11 @@ class Darwin(Hardware):
|
|||
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
|
||||
|
||||
def get_memory_facts(self):
|
||||
self.facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) / 1024 / 1024
|
||||
self.facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
|
||||
|
||||
rc, out, err = self.module.run_command("sysctl hw.usermem")
|
||||
if rc == 0:
|
||||
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) / 1024 / 1024
|
||||
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
|
||||
|
||||
|
||||
class Network(Facts):
|
||||
|
@ -2254,8 +2235,7 @@ class LinuxNetwork(Network):
|
|||
continue
|
||||
if v == 'v6' and not socket.has_ipv6:
|
||||
continue
|
||||
rc, out, err = self.module.run_command(command[v])
|
||||
out = out.decode('utf-8', 'replace')
|
||||
rc, out, err = self.module.run_command(command[v], errors='surrogate_or_replace')
|
||||
if not out:
|
||||
# v6 routing may result in
|
||||
# RTNETLINK answers: Invalid argument
|
||||
|
@ -2425,12 +2405,10 @@ class LinuxNetwork(Network):
|
|||
ip_path = self.module.get_bin_path("ip")
|
||||
|
||||
args = [ip_path, 'addr', 'show', 'primary', device]
|
||||
rc, stdout, stderr = self.module.run_command(args)
|
||||
primary_data = stdout.decode('utf-8', 'replace')
|
||||
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_or_replace')
|
||||
|
||||
args = [ip_path, 'addr', 'show', 'secondary', device]
|
||||
rc, stdout, stderr = self.module.run_command(args)
|
||||
secondary_data = stdout.decode('utf-8', 'decode')
|
||||
rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_or_replace')
|
||||
|
||||
parse_ip_output(primary_data)
|
||||
parse_ip_output(secondary_data, secondary=True)
|
||||
|
@ -2452,8 +2430,7 @@ class LinuxNetwork(Network):
|
|||
ethtool_path = self.module.get_bin_path("ethtool")
|
||||
if ethtool_path:
|
||||
args = [ethtool_path, '-k', device]
|
||||
rc, stdout, stderr = self.module.run_command(args)
|
||||
stdout = stdout.decode('utf-8', 'replace')
|
||||
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_or_replace')
|
||||
if rc == 0:
|
||||
for line in stdout.strip().split('\n'):
|
||||
if not line or line.endswith(":"):
|
||||
|
@ -3365,21 +3342,22 @@ class SunOSVirtual(Virtual):
|
|||
|
||||
else:
|
||||
smbios = self.module.get_bin_path('smbios')
|
||||
rc, out, err = self.module.run_command(smbios)
|
||||
if rc == 0:
|
||||
for line in out.split('\n'):
|
||||
if 'VMware' in line:
|
||||
self.facts['virtualization_type'] = 'vmware'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'Parallels' in line:
|
||||
self.facts['virtualization_type'] = 'parallels'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'VirtualBox' in line:
|
||||
self.facts['virtualization_type'] = 'virtualbox'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'HVM domU' in line:
|
||||
self.facts['virtualization_type'] = 'xen'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
if smbios:
|
||||
rc, out, err = self.module.run_command(smbios)
|
||||
if rc == 0:
|
||||
for line in out.split('\n'):
|
||||
if 'VMware' in line:
|
||||
self.facts['virtualization_type'] = 'vmware'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'Parallels' in line:
|
||||
self.facts['virtualization_type'] = 'parallels'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'VirtualBox' in line:
|
||||
self.facts['virtualization_type'] = 'virtualbox'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
elif 'HVM domU' in line:
|
||||
self.facts['virtualization_type'] = 'xen'
|
||||
self.facts['virtualization_role'] = 'guest'
|
||||
|
||||
class Ohai(Facts):
|
||||
"""
|
||||
|
|
|
@ -111,6 +111,7 @@ class Netconf(object):
|
|||
try:
|
||||
self.device = Device(host, **kwargs)
|
||||
self.device.open()
|
||||
self.device.timeout = params['timeout']
|
||||
except ConnectError:
|
||||
exc = get_exception()
|
||||
self.raise_exc('unable to connect to %s: %s' % (host, str(exc)))
|
||||
|
@ -183,8 +184,8 @@ class Netconf(object):
|
|||
merge = False
|
||||
overwrite = False
|
||||
elif overwrite:
|
||||
merge = True
|
||||
overwrite = False
|
||||
merge = False
|
||||
overwrite = True
|
||||
else:
|
||||
merge = True
|
||||
overwrite = False
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
import os
|
||||
import hmac
|
||||
import re
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
|
@ -41,23 +42,26 @@ except ImportError:
|
|||
|
||||
HASHED_KEY_MAGIC = "|1|"
|
||||
|
||||
|
||||
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
|
||||
|
||||
""" idempotently add a git url hostkey """
|
||||
|
||||
if is_ssh_url(url):
|
||||
|
||||
fqdn = get_fqdn(url)
|
||||
fqdn, port = get_fqdn_and_port(url)
|
||||
|
||||
if fqdn:
|
||||
known_host = check_hostkey(module, fqdn)
|
||||
if not known_host:
|
||||
if accept_hostkey:
|
||||
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
|
||||
rc, out, err = add_host_key(module, fqdn, port=port, create_dir=create_dir)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
|
||||
else:
|
||||
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
|
||||
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True "
|
||||
"or manually add the hostkey prior to running the git module" % fqdn)
|
||||
|
||||
|
||||
def is_ssh_url(url):
|
||||
|
||||
|
@ -70,45 +74,51 @@ def is_ssh_url(url):
|
|||
return True
|
||||
return False
|
||||
|
||||
def get_fqdn(repo_url):
|
||||
|
||||
""" chop the hostname out of a url """
|
||||
def get_fqdn_and_port(repo_url):
|
||||
|
||||
result = None
|
||||
""" chop the hostname and port out of a url """
|
||||
|
||||
fqdn = None
|
||||
port = None
|
||||
ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?')
|
||||
if "@" in repo_url and "://" not in repo_url:
|
||||
# most likely an user@host:path or user@host/path type URL
|
||||
repo_url = repo_url.split("@", 1)[1]
|
||||
if repo_url.startswith('['):
|
||||
result = repo_url.split(']', 1)[0] + ']'
|
||||
match = ipv6_re.match(repo_url)
|
||||
# For this type of URL, colon specifies the path, not the port
|
||||
if match:
|
||||
fqdn, path = match.groups()
|
||||
elif ":" in repo_url:
|
||||
result = repo_url.split(":")[0]
|
||||
fqdn = repo_url.split(":")[0]
|
||||
elif "/" in repo_url:
|
||||
result = repo_url.split("/")[0]
|
||||
fqdn = repo_url.split("/")[0]
|
||||
elif "://" in repo_url:
|
||||
# this should be something we can parse with urlparse
|
||||
parts = urlparse.urlparse(repo_url)
|
||||
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
|
||||
# ensure we actually have a parts[1] before continuing.
|
||||
if parts[1] != '':
|
||||
result = parts[1]
|
||||
if "@" in result:
|
||||
result = result.split("@", 1)[1]
|
||||
fqdn = parts[1]
|
||||
if "@" in fqdn:
|
||||
fqdn = fqdn.split("@", 1)[1]
|
||||
match = ipv6_re.match(fqdn)
|
||||
if match:
|
||||
fqdn, port = match.groups()
|
||||
elif ":" in fqdn:
|
||||
fqdn, port = fqdn.split(":")[0:2]
|
||||
return fqdn, port
|
||||
|
||||
if result[0].startswith('['):
|
||||
result = result.split(']', 1)[0] + ']'
|
||||
elif ":" in result:
|
||||
result = result.split(":")[0]
|
||||
return result
|
||||
|
||||
def check_hostkey(module, fqdn):
|
||||
return not not_in_host_file(module, fqdn)
|
||||
|
||||
|
||||
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
|
||||
# the paramiko code to import and use this.
|
||||
|
||||
def not_in_host_file(self, host):
|
||||
|
||||
|
||||
if 'USER' in os.environ:
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
|
@ -159,7 +169,7 @@ def not_in_host_file(self, host):
|
|||
return True
|
||||
|
||||
|
||||
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
|
||||
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
|
||||
|
||||
""" use ssh-keyscan to add the hostkey """
|
||||
|
||||
|
@ -184,10 +194,15 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
|
|||
elif not os.path.isdir(user_ssh_dir):
|
||||
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
|
||||
|
||||
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
|
||||
if port:
|
||||
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
|
||||
else:
|
||||
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
|
||||
|
||||
rc, out, err = module.run_command(this_cmd)
|
||||
# ssh-keyscan gives a 0 exit code and prints nothins on timeout
|
||||
if rc != 0 or not out:
|
||||
module.fail_json(msg='failed to get the hostkey for %s' % fqdn)
|
||||
module.append_to_file(user_host_file, out)
|
||||
|
||||
return rc, out, err
|
||||
|
||||
|
|
|
@ -121,6 +121,8 @@ class LXDClient(object):
|
|||
if resp_type == 'error':
|
||||
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
|
||||
return resp_json
|
||||
if resp_json['error'] == "Certificate already in trust store":
|
||||
return resp_json
|
||||
self._raise_err_from_json(resp_json)
|
||||
return resp_json
|
||||
except socket.error as e:
|
||||
|
|
|
@ -32,6 +32,7 @@ from ansible.module_utils.basic import AnsibleModule
|
|||
from ansible.module_utils.basic import env_fallback, get_exception
|
||||
from ansible.module_utils.netcli import Cli, Command
|
||||
from ansible.module_utils.netcfg import Config
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
NET_TRANSPORT_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
|
@ -105,7 +106,7 @@ class NetworkModule(AnsibleModule):
|
|||
self.fail_json(msg='Unknown transport or no default transport specified')
|
||||
except (TypeError, NetworkError):
|
||||
exc = get_exception()
|
||||
self.fail_json(msg=exc.message)
|
||||
self.fail_json(msg=to_native(exc))
|
||||
|
||||
if connect_on_load:
|
||||
self.connect()
|
||||
|
@ -147,17 +148,20 @@ class NetworkModule(AnsibleModule):
|
|||
self.connection.connect(self.params)
|
||||
if self.params['authorize']:
|
||||
self.connection.authorize(self.params)
|
||||
self.log('connected to %s:%s using %s' % (self.params['host'],
|
||||
self.params['port'], self.params['transport']))
|
||||
except NetworkError:
|
||||
exc = get_exception()
|
||||
self.fail_json(msg=exc.message)
|
||||
self.fail_json(msg=to_native(exc))
|
||||
|
||||
def disconnect(self):
|
||||
try:
|
||||
if self.connected:
|
||||
self.connection.disconnect()
|
||||
self.log('disconnected from %s' % self.params['host'])
|
||||
except NetworkError:
|
||||
exc = get_exception()
|
||||
self.fail_json(msg=exc.message)
|
||||
self.fail_json(msg=to_native(exc))
|
||||
|
||||
def register_transport(transport, default=False):
|
||||
def register(cls):
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
def openstack_argument_spec():
|
||||
# DEPRECATED: This argument spec is only used for the deprecated old
|
||||
|
@ -61,7 +62,7 @@ def openstack_argument_spec():
|
|||
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
|
||||
|
||||
ret = []
|
||||
for (k, v) in addresses.iteritems():
|
||||
for (k, v) in iteritems(addresses):
|
||||
if key_name and k == key_name:
|
||||
ret.extend([addrs['addr'] for addrs in v])
|
||||
else:
|
||||
|
|
|
@ -27,4 +27,24 @@
|
|||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
# This file is a placeholder for common code for the future split 'service' modules.
|
||||
import os
|
||||
import glob
|
||||
|
||||
def sysv_is_enabled(name):
|
||||
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
|
||||
|
||||
def get_sysv_script(name):
|
||||
|
||||
if name.startswith('/'):
|
||||
result = name
|
||||
else:
|
||||
result = '/etc/init.d/%s' % name
|
||||
|
||||
return result
|
||||
|
||||
def sysv_exists(name):
|
||||
return os.path.exists(get_sysv_script(name))
|
||||
|
||||
def fail_if_missing(module, found, service, msg=''):
|
||||
if not found:
|
||||
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
|
||||
|
|
|
@ -21,11 +21,6 @@ import re
|
|||
import socket
|
||||
import time
|
||||
|
||||
# py2 vs py3; replace with six via ansiballz
|
||||
try:
|
||||
from StringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
|
@ -36,6 +31,8 @@ except ImportError:
|
|||
|
||||
from ansible.module_utils.basic import get_exception
|
||||
from ansible.module_utils.network import NetworkError
|
||||
from ansible.module_utils.six.moves import StringIO
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
ANSI_RE = [
|
||||
re.compile(r'(\x1b\[\?1h\x1b=)'),
|
||||
|
@ -55,7 +52,6 @@ class ShellError(Exception):
|
|||
|
||||
def __init__(self, msg, command=None):
|
||||
super(ShellError, self).__init__(msg)
|
||||
self.message = msg
|
||||
self.command = command
|
||||
|
||||
|
||||
|
@ -106,6 +102,8 @@ class Shell(object):
|
|||
raise ShellError("unable to resolve host name")
|
||||
except AuthenticationException:
|
||||
raise ShellError('Unable to authenticate to remote device')
|
||||
except socket.timeout:
|
||||
raise ShellError("timeout trying to connect to remote device")
|
||||
except socket.error:
|
||||
exc = get_exception()
|
||||
if exc.errno == 60:
|
||||
|
@ -157,7 +155,7 @@ class Shell(object):
|
|||
raise ShellError("timeout trying to send command: %s" % cmd)
|
||||
except socket.error:
|
||||
exc = get_exception()
|
||||
raise ShellError("problem sending command to host: %s" % exc.message)
|
||||
raise ShellError("problem sending command to host: %s" % to_native(exc))
|
||||
return responses
|
||||
|
||||
def close(self):
|
||||
|
@ -230,7 +228,7 @@ class CliBase(object):
|
|||
except ShellError:
|
||||
exc = get_exception()
|
||||
raise NetworkError(
|
||||
msg='failed to connect to %s:%s' % (host, port), exc=str(exc)
|
||||
msg='failed to connect to %s:%s' % (host, port), exc=to_native(exc)
|
||||
)
|
||||
|
||||
self._connected = True
|
||||
|
@ -249,7 +247,7 @@ class CliBase(object):
|
|||
return self.shell.send(commands)
|
||||
except ShellError:
|
||||
exc = get_exception()
|
||||
raise NetworkError(exc.message, commands=commands)
|
||||
raise NetworkError(to_native(exc), commands=commands)
|
||||
|
||||
def run_commands(self, commands):
|
||||
return self.execute(to_list(commands))
|
||||
|
|
|
@ -38,6 +38,7 @@ if PY3:
|
|||
class_types = type,
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
cmp = lambda a, b: (a > b) - (a < b)
|
||||
|
||||
MAXSIZE = sys.maxsize
|
||||
else:
|
||||
|
@ -46,6 +47,7 @@ else:
|
|||
class_types = (type, types.ClassType)
|
||||
text_type = unicode
|
||||
binary_type = str
|
||||
cmp = cmp
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
# Jython always uses 32 bits.
|
||||
|
|
|
@ -105,8 +105,6 @@ import platform
|
|||
import tempfile
|
||||
import base64
|
||||
|
||||
from ansible.module_utils.basic import get_distribution, get_exception
|
||||
|
||||
try:
|
||||
import httplib
|
||||
except ImportError:
|
||||
|
@ -115,7 +113,9 @@ except ImportError:
|
|||
|
||||
import ansible.module_utils.six.moves.urllib.request as urllib_request
|
||||
import ansible.module_utils.six.moves.urllib.error as urllib_error
|
||||
from ansible.module_utils.basic import get_distribution, get_exception
|
||||
from ansible.module_utils.six import b
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
|
||||
try:
|
||||
# python3
|
||||
|
@ -182,6 +182,8 @@ if not HAS_SSLCONTEXT and HAS_SSL:
|
|||
del libssl
|
||||
|
||||
|
||||
LOADED_VERIFY_LOCATIONS = set()
|
||||
|
||||
HAS_MATCH_HOSTNAME = True
|
||||
try:
|
||||
from ssl import match_hostname, CertificateError
|
||||
|
@ -307,7 +309,7 @@ if not HAS_MATCH_HOSTNAME:
|
|||
# ca cert, regardless of validity, for Python on Mac OS to use the
|
||||
# keychain functionality in OpenSSL for validating SSL certificates.
|
||||
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
|
||||
DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
|
||||
b_DUMMY_CA_CERT = b("""-----BEGIN CERTIFICATE-----
|
||||
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
|
||||
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
|
||||
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
|
||||
|
@ -324,7 +326,7 @@ MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
|
|||
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
|
||||
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
""")
|
||||
|
||||
#
|
||||
# Exceptions
|
||||
|
@ -510,9 +512,15 @@ def RedirectHandlerFactory(follow_redirects=None, validate_certs=True):
|
|||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type")
|
||||
)
|
||||
try:
|
||||
# Python 2-3.3
|
||||
origin_req_host = req.get_origin_req_host()
|
||||
except AttributeError:
|
||||
# Python 3.4+
|
||||
origin_req_host = req.origin_req_host
|
||||
return urllib_request.Request(newurl,
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
origin_req_host=origin_req_host,
|
||||
unverifiable=True)
|
||||
else:
|
||||
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
|
||||
|
@ -568,21 +576,21 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
ca_certs = []
|
||||
paths_checked = []
|
||||
|
||||
system = platform.system()
|
||||
system = to_text(platform.system(), errors='surrogate_or_strict')
|
||||
# build a list of paths to check for .crt/.pem files
|
||||
# based on the platform type
|
||||
paths_checked.append('/etc/ssl/certs')
|
||||
if system == 'Linux':
|
||||
if system == u'Linux':
|
||||
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
|
||||
paths_checked.append('/etc/pki/tls/certs')
|
||||
paths_checked.append('/usr/share/ca-certificates/cacert.org')
|
||||
elif system == 'FreeBSD':
|
||||
elif system == u'FreeBSD':
|
||||
paths_checked.append('/usr/local/share/certs')
|
||||
elif system == 'OpenBSD':
|
||||
elif system == u'OpenBSD':
|
||||
paths_checked.append('/etc/ssl')
|
||||
elif system == 'NetBSD':
|
||||
elif system == u'NetBSD':
|
||||
ca_certs.append('/etc/openssl/certs')
|
||||
elif system == 'SunOS':
|
||||
elif system == u'SunOS':
|
||||
paths_checked.append('/opt/local/etc/openssl/certs')
|
||||
|
||||
# fall back to a user-deployed cert in a standard
|
||||
|
@ -590,10 +598,12 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
paths_checked.append('/etc/ansible')
|
||||
|
||||
tmp_fd, tmp_path = tempfile.mkstemp()
|
||||
to_add_fd, to_add_path = tempfile.mkstemp()
|
||||
to_add = False
|
||||
|
||||
# Write the dummy ca cert if we are running on Mac OS X
|
||||
if system == 'Darwin':
|
||||
os.write(tmp_fd, DUMMY_CA_CERT)
|
||||
if system == u'Darwin':
|
||||
os.write(tmp_fd, b_DUMMY_CA_CERT)
|
||||
# Default Homebrew path for OpenSSL certs
|
||||
paths_checked.append('/usr/local/etc/openssl')
|
||||
|
||||
|
@ -608,13 +618,21 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
|
||||
try:
|
||||
cert_file = open(full_path, 'rb')
|
||||
os.write(tmp_fd, cert_file.read())
|
||||
os.write(tmp_fd, b('\n'))
|
||||
cert = cert_file.read()
|
||||
cert_file.close()
|
||||
os.write(tmp_fd, cert)
|
||||
os.write(tmp_fd, b('\n'))
|
||||
if full_path not in LOADED_VERIFY_LOCATIONS:
|
||||
to_add = True
|
||||
os.write(to_add_fd, cert)
|
||||
os.write(to_add_fd, b('\n'))
|
||||
LOADED_VERIFY_LOCATIONS.add(full_path)
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
|
||||
return (tmp_path, paths_checked)
|
||||
if not to_add:
|
||||
to_add_path = None
|
||||
return (tmp_path, to_add_path, paths_checked)
|
||||
|
||||
def validate_proxy_response(self, response, valid_codes=[200]):
|
||||
'''
|
||||
|
@ -643,17 +661,18 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
return False
|
||||
return True
|
||||
|
||||
def _make_context(self, tmp_ca_cert_path):
|
||||
def _make_context(self, to_add_ca_cert_path):
|
||||
context = create_default_context()
|
||||
context.load_verify_locations(tmp_ca_cert_path)
|
||||
if to_add_ca_cert_path:
|
||||
context.load_verify_locations(to_add_ca_cert_path)
|
||||
return context
|
||||
|
||||
def http_request(self, req):
|
||||
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
|
||||
tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs()
|
||||
https_proxy = os.environ.get('https_proxy')
|
||||
context = None
|
||||
if HAS_SSLCONTEXT:
|
||||
context = self._make_context(tmp_ca_cert_path)
|
||||
context = self._make_context(to_add_ca_cert_path)
|
||||
|
||||
# Detect if 'no_proxy' environment variable is set and if our URL is included
|
||||
use_proxy = self.detect_no_proxy(req.get_full_url())
|
||||
|
@ -672,9 +691,14 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
|
||||
if proxy_parts.get('username'):
|
||||
credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
|
||||
s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
|
||||
s.sendall('\r\n')
|
||||
connect_result = s.recv(4096)
|
||||
s.sendall(b('Proxy-Authorization: Basic %s\r\n') % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
|
||||
s.sendall(b('\r\n'))
|
||||
connect_result = b("")
|
||||
while connect_result.find(b("\r\n\r\n")) <= 0:
|
||||
connect_result += s.recv(4096)
|
||||
# 128 kilobytes of headers should be enough for everyone.
|
||||
if len(connect_result) > 131072:
|
||||
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
|
||||
self.validate_proxy_response(connect_result)
|
||||
if context:
|
||||
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
|
||||
|
@ -714,6 +738,14 @@ class SSLValidationHandler(urllib_request.BaseHandler):
|
|||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
# cleanup the temp file created, don't worry
|
||||
# if it fails for some reason
|
||||
if to_add_ca_cert_path:
|
||||
os.remove(to_add_ca_cert_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
return req
|
||||
|
||||
https_request = http_request
|
||||
|
@ -793,9 +825,11 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
|
|||
# use this username/password combination for urls
|
||||
# for which `theurl` is a super-url
|
||||
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
|
||||
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
|
||||
|
||||
# create the AuthHandler
|
||||
handlers.append(authhandler)
|
||||
handlers.append(digest_authhandler)
|
||||
|
||||
elif username and force_basic_auth:
|
||||
headers["Authorization"] = basic_auth_header(username, password)
|
||||
|
@ -836,6 +870,7 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
|
|||
opener = urllib_request.build_opener(*handlers)
|
||||
urllib_request.install_opener(opener)
|
||||
|
||||
data = to_bytes(data, nonstring='passthru')
|
||||
if method:
|
||||
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT','PATCH'):
|
||||
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
|
||||
|
@ -848,13 +883,14 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
|
|||
if http_agent:
|
||||
request.add_header('User-agent', http_agent)
|
||||
|
||||
# if we're ok with getting a 304, set the timestamp in the
|
||||
# header, otherwise make sure we don't get a cached copy
|
||||
if last_mod_time and not force:
|
||||
# Cache control
|
||||
# Either we directly force a cache refresh
|
||||
if force:
|
||||
request.add_header('cache-control', 'no-cache')
|
||||
# or we do it if the original is more recent than our copy
|
||||
elif last_mod_time:
|
||||
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
|
||||
request.add_header('If-Modified-Since', tstamp)
|
||||
else:
|
||||
request.add_header('cache-control', 'no-cache')
|
||||
|
||||
# user defined headers now, which may override things we've set above
|
||||
if headers:
|
||||
|
@ -878,7 +914,10 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
|
|||
|
||||
|
||||
def basic_auth_header(username, password):
|
||||
return "Basic %s" % base64.b64encode("%s:%s" % (username, password))
|
||||
"""Takes a username and password and returns a byte string suitable for
|
||||
using as value of an Authorization header to do basic auth.
|
||||
"""
|
||||
return b("Basic %s") % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
|
||||
|
||||
|
||||
def url_argument_spec():
|
||||
|
@ -893,7 +932,7 @@ def url_argument_spec():
|
|||
use_proxy=dict(default='yes', type='bool'),
|
||||
validate_certs=dict(default='yes', type='bool'),
|
||||
url_username=dict(required=False),
|
||||
url_password=dict(required=False),
|
||||
url_password=dict(required=False, no_log=True),
|
||||
force_basic_auth=dict(required=False, type='bool', default='no'),
|
||||
|
||||
)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 6c4d71a7fab60601846363506bc8eebe9c52c240
|
||||
Subproject commit ebdd66c2b6cb15e2364b2ecc41a9da9c1d02d64f
|
|
@ -1 +1 @@
|
|||
Subproject commit df35d324d62e6034ab86db0fb4a56d3ca122d4b2
|
||||
Subproject commit 0cfb1c4c3492045d891cdaa2bbb9636ec683636f
|
|
@ -31,7 +31,7 @@ from ansible.errors import AnsibleFileNotFound, AnsibleParserError, AnsibleError
|
|||
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
|
||||
from ansible.module_utils.basic import is_executable
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.parsing.vault import VaultLib, is_encrypted, is_encrypted_file
|
||||
from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file
|
||||
from ansible.parsing.quoting import unquote
|
||||
from ansible.parsing.yaml.loader import AnsibleLoader
|
||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
|
||||
|
@ -71,9 +71,9 @@ class DataLoader():
|
|||
# initialize the vault stuff with an empty password
|
||||
self.set_vault_password(None)
|
||||
|
||||
def set_vault_password(self, vault_password):
|
||||
self._vault_password = vault_password
|
||||
self._vault = VaultLib(password=vault_password)
|
||||
def set_vault_password(self, b_vault_password):
|
||||
self._b_vault_password = b_vault_password
|
||||
self._vault = VaultLib(b_password=b_vault_password)
|
||||
|
||||
def load(self, data, file_name='<string>', show_content=True):
|
||||
'''
|
||||
|
@ -116,7 +116,9 @@ class DataLoader():
|
|||
parsed_data = self._FILE_CACHE[file_name]
|
||||
else:
|
||||
# read the file contents and load the data structure from them
|
||||
(file_data, show_content) = self._get_file_contents(file_name)
|
||||
(b_file_data, show_content) = self._get_file_contents(file_name)
|
||||
|
||||
file_data = to_text(b_file_data, errors='surrogate_or_strict')
|
||||
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
|
||||
|
||||
# cache the file contents for next time
|
||||
|
@ -149,7 +151,7 @@ class DataLoader():
|
|||
def _safe_load(self, stream, file_name=None):
|
||||
''' Implements yaml.safe_load(), except using our custom loader class. '''
|
||||
|
||||
loader = AnsibleLoader(stream, file_name, self._vault_password)
|
||||
loader = AnsibleLoader(stream, file_name, self._b_vault_password)
|
||||
try:
|
||||
return loader.get_single_data()
|
||||
finally:
|
||||
|
@ -178,7 +180,6 @@ class DataLoader():
|
|||
data = self._vault.decrypt(data, filename=b_file_name)
|
||||
show_content = False
|
||||
|
||||
data = to_text(data, errors='surrogate_or_strict')
|
||||
return (data, show_content)
|
||||
|
||||
except (IOError, OSError) as e:
|
||||
|
@ -300,6 +301,7 @@ class DataLoader():
|
|||
result = test_path
|
||||
else:
|
||||
search = []
|
||||
display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
|
||||
for path in paths:
|
||||
upath = unfrackpath(path)
|
||||
b_upath = to_bytes(upath, errors='surrogate_or_strict')
|
||||
|
@ -313,15 +315,21 @@ class DataLoader():
|
|||
search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source))
|
||||
search.append(os.path.join(b_mydir, b_source))
|
||||
else:
|
||||
search.append(os.path.join(b_upath, b_dirname, b_source))
|
||||
search.append(os.path.join(b_upath, b'tasks', b_source))
|
||||
# don't add dirname if user already is using it in source
|
||||
if b_source.split(b'/')[0] != b_dirname:
|
||||
search.append(os.path.join(b_upath, b_dirname, b_source))
|
||||
search.append(os.path.join(b_upath, b_source))
|
||||
|
||||
elif b_dirname not in b_source.split(b'/'):
|
||||
# don't add dirname if user already is using it in source
|
||||
search.append(os.path.join(b_upath, b_dirname, b_source))
|
||||
if b_source.split(b'/')[0] != dirname:
|
||||
search.append(os.path.join(b_upath, b_dirname, b_source))
|
||||
search.append(os.path.join(b_upath, b_source))
|
||||
|
||||
# always append basedir as last resort
|
||||
search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
|
||||
# don't add dirname if user already is using it in source
|
||||
if b_source.split(b'/')[0] != dirname:
|
||||
search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
|
||||
search.append(os.path.join(to_bytes(self.get_basedir()), b_source))
|
||||
|
||||
display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
|
||||
|
@ -351,7 +359,7 @@ class DataLoader():
|
|||
raise AnsibleError("Problem running vault password script %s (%s)."
|
||||
" If this is not a script, remove the executable bit from the file." % (' '.join(this_path), to_native(e)))
|
||||
stdout, stderr = p.communicate()
|
||||
self.set_vault_password(stdout.strip('\r\n'))
|
||||
self.set_vault_password(stdout.strip(b'\r\n'))
|
||||
else:
|
||||
try:
|
||||
f = open(this_path, "rb")
|
||||
|
@ -389,18 +397,21 @@ class DataLoader():
|
|||
raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % to_native(file_path))
|
||||
|
||||
if not self._vault:
|
||||
self._vault = VaultLib(password="")
|
||||
self._vault = VaultLib(b_password="")
|
||||
|
||||
real_path = self.path_dwim(file_path)
|
||||
|
||||
try:
|
||||
with open(to_bytes(real_path), 'rb') as f:
|
||||
if is_encrypted_file(f):
|
||||
# Limit how much of the file is read since we do not know
|
||||
# whether this is a vault file and therefore it could be very
|
||||
# large.
|
||||
if is_encrypted_file(f, count=len(b_HEADER)):
|
||||
# if the file is encrypted and no password was specified,
|
||||
# the decrypt call would throw an error, but we check first
|
||||
# since the decrypt function doesn't know the file name
|
||||
data = f.read()
|
||||
if not self._vault_password:
|
||||
if not self._b_vault_password:
|
||||
raise AnsibleParserError("A vault password must be specified to decrypt %s" % file_path)
|
||||
|
||||
data = self._vault.decrypt(data, filename=real_path)
|
||||
|
|
|
@ -83,9 +83,9 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
except Exception as e:
|
||||
display.warning("Optional dependency 'cryptography' raised an exception, falling back to 'Crypto'")
|
||||
display.vvvv("Optional dependency 'cryptography' raised an exception, falling back to 'Crypto'.")
|
||||
import traceback
|
||||
display.debug("Traceback from import of cryptography was {0}".format(traceback.format_exc()))
|
||||
display.vvvv("Traceback from import of cryptography was {0}".format(traceback.format_exc()))
|
||||
|
||||
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
|
||||
|
||||
|
@ -164,8 +164,8 @@ def is_encrypted_file(file_obj, start_pos=0, count=-1):
|
|||
|
||||
class VaultLib:
|
||||
|
||||
def __init__(self, password):
|
||||
self.b_password = to_bytes(password, errors='strict', encoding='utf-8')
|
||||
def __init__(self, b_password):
|
||||
self.b_password = to_bytes(b_password, errors='strict', encoding='utf-8')
|
||||
self.cipher_name = None
|
||||
self.b_version = b'1.1'
|
||||
|
||||
|
@ -311,8 +311,8 @@ class VaultLib:
|
|||
|
||||
class VaultEditor:
|
||||
|
||||
def __init__(self, password):
|
||||
self.vault = VaultLib(password)
|
||||
def __init__(self, b_password):
|
||||
self.vault = VaultLib(b_password)
|
||||
|
||||
# TODO: mv shred file stuff to it's own class
|
||||
def _shred_file_custom(self, tmp_path):
|
||||
|
@ -398,18 +398,18 @@ class VaultEditor:
|
|||
self._shred_file(tmp_path)
|
||||
raise
|
||||
|
||||
tmpdata = self.read_data(tmp_path)
|
||||
b_tmpdata = self.read_data(tmp_path)
|
||||
|
||||
# Do nothing if the content has not changed
|
||||
if existing_data == tmpdata and not force_save:
|
||||
if existing_data == b_tmpdata and not force_save:
|
||||
self._shred_file(tmp_path)
|
||||
return
|
||||
|
||||
# encrypt new data and write out to tmp
|
||||
# An existing vaultfile will always be UTF-8,
|
||||
# so decode to unicode here
|
||||
enc_data = self.vault.encrypt(tmpdata.decode())
|
||||
self.write_data(enc_data, tmp_path)
|
||||
b_ciphertext = self.vault.encrypt(b_tmpdata)
|
||||
self.write_data(b_ciphertext, tmp_path)
|
||||
|
||||
# shuffle tmp file into place
|
||||
self.shuffle_files(tmp_path, filename)
|
||||
|
@ -420,9 +420,9 @@ class VaultEditor:
|
|||
|
||||
# A file to be encrypted into a vaultfile could be any encoding
|
||||
# so treat the contents as a byte string.
|
||||
plaintext = self.read_data(filename)
|
||||
ciphertext = self.vault.encrypt(plaintext)
|
||||
self.write_data(ciphertext, output_file or filename)
|
||||
b_plaintext = self.read_data(filename)
|
||||
b_ciphertext = self.vault.encrypt(b_plaintext)
|
||||
self.write_data(b_ciphertext, output_file or filename)
|
||||
|
||||
def decrypt_file(self, filename, output_file=None):
|
||||
|
||||
|
@ -475,7 +475,7 @@ class VaultEditor:
|
|||
|
||||
return plaintext
|
||||
|
||||
def rekey_file(self, filename, new_password):
|
||||
def rekey_file(self, filename, b_new_password):
|
||||
|
||||
check_prereqs()
|
||||
|
||||
|
@ -486,7 +486,11 @@ class VaultEditor:
|
|||
except AnsibleError as e:
|
||||
raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename)))
|
||||
|
||||
new_vault = VaultLib(new_password)
|
||||
# This is more or less an assert, see #18247
|
||||
if b_new_password is None:
|
||||
raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
|
||||
|
||||
new_vault = VaultLib(b_new_password)
|
||||
new_ciphertext = new_vault.encrypt(plaintext)
|
||||
|
||||
self.write_data(new_ciphertext, filename)
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from yaml.constructor import Constructor, ConstructorError
|
||||
from yaml.constructor import SafeConstructor, ConstructorError
|
||||
from yaml.nodes import MappingNode
|
||||
|
||||
from ansible.module_utils._text import to_bytes
|
||||
|
@ -35,13 +35,13 @@ except ImportError:
|
|||
display = Display()
|
||||
|
||||
|
||||
class AnsibleConstructor(Constructor):
|
||||
def __init__(self, file_name=None, vault_password=None):
|
||||
self._vault_password = vault_password
|
||||
class AnsibleConstructor(SafeConstructor):
|
||||
def __init__(self, file_name=None, b_vault_password=None):
|
||||
self._b_vault_password = b_vault_password
|
||||
self._ansible_file_name = file_name
|
||||
super(AnsibleConstructor, self).__init__()
|
||||
self._vaults = {}
|
||||
self._vaults['default'] = VaultLib(password=self._vault_password)
|
||||
self._vaults['default'] = VaultLib(b_password=self._b_vault_password)
|
||||
|
||||
def construct_yaml_map(self, node):
|
||||
data = AnsibleMapping()
|
||||
|
@ -98,7 +98,7 @@ class AnsibleConstructor(Constructor):
|
|||
value = self.construct_scalar(node)
|
||||
ciphertext_data = to_bytes(value)
|
||||
|
||||
if self._vault_password is None:
|
||||
if self._b_vault_password is None:
|
||||
raise ConstructorError(None, None,
|
||||
"found vault but no vault password provided", node.start_mark)
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ from ansible.compat.six import PY3
|
|||
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping
|
||||
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
|
||||
from ansible.vars.hostvars import HostVars
|
||||
from ansible.vars.unsafe_proxy import AnsibleUnsafeText
|
||||
|
||||
|
||||
class AnsibleDumper(yaml.SafeDumper):
|
||||
|
@ -51,6 +52,11 @@ AnsibleDumper.add_representer(
|
|||
represent_unicode,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
AnsibleUnsafeText,
|
||||
represent_unicode,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
HostVars,
|
||||
represent_hostvars,
|
||||
|
|
|
@ -34,7 +34,7 @@ if HAVE_PYYAML_C:
|
|||
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
|
||||
def __init__(self, stream, file_name=None, vault_password=None):
|
||||
CParser.__init__(self, stream)
|
||||
AnsibleConstructor.__init__(self, file_name=file_name, vault_password=vault_password)
|
||||
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
|
||||
Resolver.__init__(self)
|
||||
else:
|
||||
from yaml.composer import Composer
|
||||
|
@ -48,5 +48,5 @@ else:
|
|||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
AnsibleConstructor.__init__(self, file_name=file_name, vault_password=vault_password)
|
||||
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
|
||||
Resolver.__init__(self)
|
||||
|
|
|
@ -132,3 +132,6 @@ class AnsibleVaultEncryptedUnicode(yaml.YAMLObject, AnsibleUnicode):
|
|||
|
||||
def __unicode__(self):
|
||||
return unicode(self.data)
|
||||
|
||||
def encode(self, encoding=None, errors=None):
|
||||
return self.data.encode(encoding, errors)
|
||||
|
|
|
@ -21,11 +21,12 @@ __metaclass__ = type
|
|||
|
||||
import os
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.playbook.playbook_include import PlaybookInclude
|
||||
from ansible.plugins import get_all_plugin_loaders
|
||||
from ansible import constants as C
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -43,7 +44,7 @@ class Playbook:
|
|||
# Entries in the datastructure of a playbook may
|
||||
# be either a play or an include statement
|
||||
self._entries = []
|
||||
self._basedir = os.getcwd()
|
||||
self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
|
||||
self._loader = loader
|
||||
self._file_name = None
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ class BaseMeta(type):
|
|||
method = "_get_attr_%s" % attr_name
|
||||
if method in src_dict or method in dst_dict:
|
||||
getter = partial(_generic_g_method, attr_name)
|
||||
elif '_get_parent_attribute' in dst_dict and value.inherit:
|
||||
elif ('_get_parent_attribute' in dst_dict or '_get_parent_attribute' in src_dict) and value.inherit:
|
||||
getter = partial(_generic_g_parent, attr_name)
|
||||
else:
|
||||
getter = partial(_generic_g, attr_name)
|
||||
|
@ -131,7 +131,9 @@ class BaseMeta(type):
|
|||
for parent in parents:
|
||||
if hasattr(parent, '__dict__'):
|
||||
_create_attrs(parent.__dict__, dst_dict)
|
||||
_process_parents(parent.__bases__, dst_dict)
|
||||
new_dst_dict = parent.__dict__.copy()
|
||||
new_dst_dict.update(dst_dict)
|
||||
_process_parents(parent.__bases__, new_dst_dict)
|
||||
|
||||
# create some additional class attributes
|
||||
dct['_attributes'] = dict()
|
||||
|
@ -480,7 +482,7 @@ class Base(with_metaclass(BaseMeta, object)):
|
|||
except TypeError as e:
|
||||
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds)
|
||||
|
||||
def _extend_value(self, value, new_value):
|
||||
def _extend_value(self, value, new_value, prepend=False):
|
||||
'''
|
||||
Will extend the value given with new_value (and will turn both
|
||||
into lists if they are not so already). The values are run through
|
||||
|
@ -492,7 +494,12 @@ class Base(with_metaclass(BaseMeta, object)):
|
|||
if not isinstance(new_value, list):
|
||||
new_value = [ new_value ]
|
||||
|
||||
return [i for i,_ in itertools.groupby(value + new_value) if i is not None]
|
||||
if prepend:
|
||||
combined = new_value + value
|
||||
else:
|
||||
combined = value + new_value
|
||||
|
||||
return [i for i,_ in itertools.groupby(combined) if i is not None]
|
||||
|
||||
def serialize(self):
|
||||
'''
|
||||
|
|
|
@ -49,6 +49,9 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
self._use_handlers = use_handlers
|
||||
self._implicit = implicit
|
||||
|
||||
# end of role flag
|
||||
self._eor = False
|
||||
|
||||
if task_include:
|
||||
self._parent = task_include
|
||||
elif parent_block:
|
||||
|
@ -56,6 +59,9 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
|
||||
super(Block, self).__init__()
|
||||
|
||||
def __repr__(self):
|
||||
return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
|
||||
|
||||
def get_vars(self):
|
||||
'''
|
||||
Blocks do not store variables directly, however they may be a member
|
||||
|
@ -175,6 +181,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
new_me = super(Block, self).copy()
|
||||
new_me._play = self._play
|
||||
new_me._use_handlers = self._use_handlers
|
||||
new_me._eor = self._eor
|
||||
|
||||
if self._dep_chain is not None:
|
||||
new_me._dep_chain = self._dep_chain[:]
|
||||
|
@ -207,6 +214,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
data[attr] = getattr(self, attr)
|
||||
|
||||
data['dep_chain'] = self.get_dep_chain()
|
||||
data['eor'] = self._eor
|
||||
|
||||
if self._role is not None:
|
||||
data['role'] = self._role.serialize()
|
||||
|
@ -234,6 +242,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
setattr(self, attr, data.get(attr))
|
||||
|
||||
self._dep_chain = data.get('dep_chain', None)
|
||||
self._eor = data.get('eor', False)
|
||||
|
||||
# if there was a serialized role, unpack it too
|
||||
role_data = data.get('role')
|
||||
|
@ -255,17 +264,6 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
self._parent = p
|
||||
self._dep_chain = self._parent.get_dep_chain()
|
||||
|
||||
def evaluate_conditional(self, templar, all_vars):
|
||||
dep_chain = self.get_dep_chain()
|
||||
if dep_chain:
|
||||
for dep in dep_chain:
|
||||
if not dep.evaluate_conditional(templar, all_vars):
|
||||
return False
|
||||
if self._parent is not None:
|
||||
if not self._parent.evaluate_conditional(templar, all_vars):
|
||||
return False
|
||||
return super(Block, self).evaluate_conditional(templar, all_vars)
|
||||
|
||||
def set_loader(self, loader):
|
||||
self._loader = loader
|
||||
if self._parent:
|
||||
|
@ -279,9 +277,9 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
dep.set_loader(loader)
|
||||
|
||||
def _get_attr_environment(self):
|
||||
return self._get_parent_attribute('environment', extend=True)
|
||||
return self._get_parent_attribute('environment', extend=True, prepend=True)
|
||||
|
||||
def _get_parent_attribute(self, attr, extend=False):
|
||||
def _get_parent_attribute(self, attr, extend=False, prepend=False):
|
||||
'''
|
||||
Generic logic to get the attribute or parent attribute for a block value.
|
||||
'''
|
||||
|
@ -294,7 +292,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
try:
|
||||
parent_value = getattr(self._parent, attr, None)
|
||||
if extend:
|
||||
value = self._extend_value(value, parent_value)
|
||||
value = self._extend_value(value, parent_value, prepend)
|
||||
else:
|
||||
value = parent_value
|
||||
except AttributeError:
|
||||
|
@ -303,7 +301,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
try:
|
||||
parent_value = getattr(self._role, attr, None)
|
||||
if extend:
|
||||
value = self._extend_value(value, parent_value)
|
||||
value = self._extend_value(value, parent_value, prepend)
|
||||
else:
|
||||
value = parent_value
|
||||
|
||||
|
@ -313,7 +311,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
for dep in dep_chain:
|
||||
dep_value = getattr(dep, attr, None)
|
||||
if extend:
|
||||
value = self._extend_value(value, dep_value)
|
||||
value = self._extend_value(value, dep_value, prepend)
|
||||
else:
|
||||
value = dep_value
|
||||
|
||||
|
@ -325,7 +323,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
try:
|
||||
parent_value = getattr(self._play, attr, None)
|
||||
if extend:
|
||||
value = self._extend_value(value, parent_value)
|
||||
value = self._extend_value(value, parent_value, prepend)
|
||||
else:
|
||||
value = parent_value
|
||||
except AttributeError:
|
||||
|
|
|
@ -19,14 +19,23 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import ast
|
||||
import re
|
||||
|
||||
from jinja2.compiler import generate
|
||||
from jinja2.exceptions import UndefinedError
|
||||
|
||||
from ansible.compat.six import text_type
|
||||
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.template import Templar
|
||||
from ansible.template.safe_eval import safe_eval
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
|
||||
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
|
||||
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
|
||||
|
||||
class Conditional:
|
||||
|
||||
'''
|
||||
|
@ -51,6 +60,29 @@ class Conditional:
|
|||
if not isinstance(value, list):
|
||||
setattr(self, name, [ value ])
|
||||
|
||||
def _get_attr_when(self):
|
||||
'''
|
||||
Override for the 'tags' getattr fetcher, used from Base.
|
||||
'''
|
||||
when = self._attributes['when']
|
||||
if when is None:
|
||||
when = []
|
||||
if hasattr(self, '_get_parent_attribute'):
|
||||
when = self._get_parent_attribute('when', extend=True, prepend=True)
|
||||
return when
|
||||
|
||||
def extract_defined_undefined(self, conditional):
|
||||
results = []
|
||||
|
||||
cond = conditional
|
||||
m = DEFINED_REGEX.search(cond)
|
||||
while m:
|
||||
results.append(m.groups())
|
||||
cond = cond[m.end():]
|
||||
m = DEFINED_REGEX.search(cond)
|
||||
|
||||
return results
|
||||
|
||||
def evaluate_conditional(self, templar, all_vars):
|
||||
'''
|
||||
Loops through the conditionals set on this object, returning
|
||||
|
@ -73,7 +105,9 @@ class Conditional:
|
|||
if not self._check_conditional(conditional, templar, all_vars):
|
||||
return False
|
||||
except Exception as e:
|
||||
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
|
||||
raise AnsibleError(
|
||||
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
@ -88,21 +122,75 @@ class Conditional:
|
|||
if conditional is None or conditional == '':
|
||||
return True
|
||||
|
||||
if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
|
||||
# pull the "bare" var out, which allows for nested conditionals
|
||||
# and things like:
|
||||
# - assert:
|
||||
# that:
|
||||
# - item
|
||||
# with_items:
|
||||
# - 1 == 1
|
||||
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
|
||||
conditional = all_vars[conditional]
|
||||
|
||||
# make sure the templar is using the variables specified with this method
|
||||
templar.set_available_variables(variables=all_vars)
|
||||
|
||||
try:
|
||||
conditional = templar.template(conditional)
|
||||
# if the conditional is "unsafe", disable lookups
|
||||
disable_lookups = hasattr(conditional, '__UNSAFE__')
|
||||
conditional = templar.template(conditional, disable_lookups=disable_lookups)
|
||||
if not isinstance(conditional, text_type) or conditional == "":
|
||||
return conditional
|
||||
|
||||
# a Jinja2 evaluation that results in something Python can eval!
|
||||
# update the lookups flag, as the string returned above may now be unsafe
|
||||
# and we don't want future templating calls to do unsafe things
|
||||
disable_lookups |= hasattr(conditional, '__UNSAFE__')
|
||||
|
||||
# First, we do some low-level jinja2 parsing involving the AST format of the
|
||||
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
|
||||
class CleansingNodeVisitor(ast.NodeVisitor):
|
||||
def generic_visit(self, node, inside_call=False, inside_yield=False):
|
||||
if isinstance(node, ast.Call):
|
||||
inside_call = True
|
||||
elif isinstance(node, ast.Yield):
|
||||
inside_yield = True
|
||||
elif isinstance(node, ast.Str):
|
||||
if disable_lookups:
|
||||
if inside_call and node.s.startswith("__"):
|
||||
# calling things with a dunder is generally bad at this point...
|
||||
raise AnsibleError(
|
||||
"Invalid access found in the conditional: '%s'" % conditional
|
||||
)
|
||||
elif inside_yield:
|
||||
# we're inside a yield, so recursively parse and traverse the AST
|
||||
# of the result to catch forbidden syntax from executing
|
||||
parsed = ast.parse(node.s, mode='exec')
|
||||
cnv = CleansingNodeVisitor()
|
||||
cnv.visit(parsed)
|
||||
# iterate over all child nodes
|
||||
for child_node in ast.iter_child_nodes(node):
|
||||
self.generic_visit(
|
||||
child_node,
|
||||
inside_call=inside_call,
|
||||
inside_yield=inside_yield
|
||||
)
|
||||
try:
|
||||
e = templar.environment.overlay()
|
||||
e.filters.update(templar._get_filters())
|
||||
e.tests.update(templar._get_tests())
|
||||
|
||||
res = e._parse(conditional, None, None)
|
||||
res = generate(res, e, None, None)
|
||||
parsed = ast.parse(res, mode='exec')
|
||||
|
||||
cnv = CleansingNodeVisitor()
|
||||
cnv.visit(parsed)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
|
||||
|
||||
# and finally we generate and template the presented string and look at the resulting string
|
||||
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
|
||||
conditional = templar.template(presented)
|
||||
val = conditional.strip()
|
||||
val = templar.template(presented, disable_lookups=disable_lookups).strip()
|
||||
if val == "True":
|
||||
return True
|
||||
elif val == "False":
|
||||
|
@ -110,14 +198,33 @@ class Conditional:
|
|||
else:
|
||||
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
||||
except (AnsibleUndefinedVariable, UndefinedError) as e:
|
||||
# the templating failed, meaning most likely a
|
||||
# variable was undefined. If we happened to be
|
||||
# looking for an undefined variable, return True,
|
||||
# otherwise fail
|
||||
if "is undefined" in original:
|
||||
return True
|
||||
elif "is defined" in original:
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
|
||||
# the templating failed, meaning most likely a variable was undefined. If we happened
|
||||
# to be looking for an undefined variable, return True, otherwise fail
|
||||
try:
|
||||
# first we extract the variable name from the error message
|
||||
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
|
||||
# next we extract all defined/undefined tests from the conditional string
|
||||
def_undef = self.extract_defined_undefined(conditional)
|
||||
# then we loop through these, comparing the error variable name against
|
||||
# each def/undef test we found above. If there is a match, we determine
|
||||
# whether the logic/state mean the variable should exist or not and return
|
||||
# the corresponding True/False
|
||||
for (du_var, logic, state) in def_undef:
|
||||
# when we compare the var names, normalize quotes because something
|
||||
# like hostvars['foo'] may be tested against hostvars["foo"]
|
||||
if var_name.replace("'", '"') == du_var.replace("'", '"'):
|
||||
# the should exist is a xor test between a negation in the logic portion
|
||||
# against the state (defined or undefined)
|
||||
should_exist = ('not' in logic) != (state == 'defined')
|
||||
if should_exist:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
# as nothing above matched the failed var name, re-raise here to
|
||||
# trigger the AnsibleUndefinedVariable exception again below
|
||||
raise
|
||||
except Exception as new_e:
|
||||
raise AnsibleUndefinedVariable(
|
||||
"error while evaluating conditional (%s): %s" % (original, e)
|
||||
)
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
|
|||
|
||||
# we import here to prevent a circular dependency with imports
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
from ansible.playbook.role_include import IncludeRole
|
||||
|
||||
assert isinstance(ds, (list, type(None)))
|
||||
|
||||
|
@ -54,14 +56,17 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
|
|||
task_include=task_include,
|
||||
use_handlers=use_handlers,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader
|
||||
loader=loader,
|
||||
)
|
||||
# Implicit blocks are created by bare tasks listed in a play without
|
||||
# an explicit block statement. If we have two implicit blocks in a row,
|
||||
# squash them down to a single block to save processing time later.
|
||||
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
|
||||
for t in b.block:
|
||||
t._block = block_list[-1]
|
||||
if isinstance(t._parent, (TaskInclude, IncludeRole)):
|
||||
t._parent._parent = block_list[-1]
|
||||
else:
|
||||
t._parent = block_list[-1]
|
||||
block_list[-1].block.extend(b.block)
|
||||
else:
|
||||
block_list.append(b)
|
||||
|
@ -197,7 +202,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
# the same fashion used by the on_include callback. We also do it here,
|
||||
# because the recursive nature of helper methods means we may be loading
|
||||
# nested includes, and we want the include order printed correctly
|
||||
display.display("statically included: %s" % include_file, color=C.COLOR_SKIP)
|
||||
display.vv("statically included: %s" % include_file)
|
||||
except AnsibleFileNotFound:
|
||||
if t.static or \
|
||||
C.DEFAULT_TASK_INCLUDES_STATIC or \
|
||||
|
@ -214,11 +219,13 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
task_list.append(t)
|
||||
continue
|
||||
|
||||
ti_copy = t.copy(exclude_parent=True)
|
||||
ti_copy._parent = block
|
||||
included_blocks = load_list_of_blocks(
|
||||
data,
|
||||
play=play,
|
||||
parent_block=None,
|
||||
task_include=t.copy(),
|
||||
task_include=ti_copy,
|
||||
role=role,
|
||||
use_handlers=use_handlers,
|
||||
loader=loader,
|
||||
|
@ -228,12 +235,12 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
# pop tags out of the include args, if they were specified there, and assign
|
||||
# them to the include. If the include already had tags specified, we raise an
|
||||
# error so that users know not to specify them both ways
|
||||
tags = t.vars.pop('tags', [])
|
||||
tags = ti_copy.vars.pop('tags', [])
|
||||
if isinstance(tags, string_types):
|
||||
tags = tags.split(',')
|
||||
|
||||
if len(tags) > 0:
|
||||
if len(t.tags) > 0:
|
||||
if len(ti_copy.tags) > 0:
|
||||
raise AnsibleParserError(
|
||||
"Include tasks should not specify tags in more than one way (both via args and directly on the task). " \
|
||||
"Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement",
|
||||
|
@ -242,7 +249,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
)
|
||||
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
|
||||
else:
|
||||
tags = t.tags[:]
|
||||
tags = ti_copy.tags[:]
|
||||
|
||||
# now we extend the tags on each of the included blocks
|
||||
for b in included_blocks:
|
||||
|
@ -289,6 +296,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
|
||||
(not needs_templating and ir.all_parents_static() and not ir.loop)
|
||||
display.debug('Determined that if include_role static is %s' % str(is_static))
|
||||
|
||||
if is_static:
|
||||
# uses compiled list from object
|
||||
t = task_list.extend(ir.get_block_list(variable_manager=variable_manager, loader=loader))
|
||||
|
|
|
@ -95,7 +95,9 @@ class Play(Base, Taggable, Become):
|
|||
def __init__(self):
|
||||
super(Play, self).__init__()
|
||||
|
||||
self._included_conditional = None
|
||||
self._included_path = None
|
||||
self._removed_hosts = []
|
||||
self.ROLE_CACHE = {}
|
||||
|
||||
def __repr__(self):
|
||||
|
@ -202,7 +204,7 @@ class Play(Base, Taggable, Become):
|
|||
for prompt_data in new_ds:
|
||||
if 'name' not in prompt_data:
|
||||
display.deprecated("Using the 'short form' for vars_prompt has been deprecated")
|
||||
for vname, prompt in prompt_data.iteritems():
|
||||
for vname, prompt in prompt_data.items():
|
||||
vars_prompts.append(dict(
|
||||
name = vname,
|
||||
prompt = prompt,
|
||||
|
@ -327,5 +329,6 @@ class Play(Base, Taggable, Become):
|
|||
def copy(self):
|
||||
new_me = super(Play, self).copy()
|
||||
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
|
||||
new_me._included_conditional = self._included_conditional
|
||||
new_me._included_path = self._included_path
|
||||
return new_me
|
||||
|
|
|
@ -31,6 +31,7 @@ import string
|
|||
from ansible.compat.six import iteritems, string_types
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.playbook.base import Base
|
||||
from ansible.utils.boolean import boolean
|
||||
|
@ -84,38 +85,39 @@ MAGIC_VARIABLE_MAPPING = dict(
|
|||
module_compression = ('ansible_module_compression',),
|
||||
)
|
||||
|
||||
SU_PROMPT_LOCALIZATIONS = [
|
||||
'Password',
|
||||
'암호',
|
||||
'パスワード',
|
||||
'Adgangskode',
|
||||
'Contraseña',
|
||||
'Contrasenya',
|
||||
'Hasło',
|
||||
'Heslo',
|
||||
'Jelszó',
|
||||
'Lösenord',
|
||||
'Mật khẩu',
|
||||
'Mot de passe',
|
||||
'Parola',
|
||||
'Parool',
|
||||
'Pasahitza',
|
||||
'Passord',
|
||||
'Passwort',
|
||||
'Salasana',
|
||||
'Sandi',
|
||||
'Senha',
|
||||
'Wachtwoord',
|
||||
'ססמה',
|
||||
'Лозинка',
|
||||
'Парола',
|
||||
'Пароль',
|
||||
'गुप्तशब्द',
|
||||
'शब्दकूट',
|
||||
'సంకేతపదము',
|
||||
'හස්පදය',
|
||||
'密码',
|
||||
'密碼',
|
||||
b_SU_PROMPT_LOCALIZATIONS = [
|
||||
to_bytes('Password'),
|
||||
to_bytes('암호'),
|
||||
to_bytes('パスワード'),
|
||||
to_bytes('Adgangskode'),
|
||||
to_bytes('Contraseña'),
|
||||
to_bytes('Contrasenya'),
|
||||
to_bytes('Hasło'),
|
||||
to_bytes('Heslo'),
|
||||
to_bytes('Jelszó'),
|
||||
to_bytes('Lösenord'),
|
||||
to_bytes('Mật khẩu'),
|
||||
to_bytes('Mot de passe'),
|
||||
to_bytes('Parola'),
|
||||
to_bytes('Parool'),
|
||||
to_bytes('Pasahitza'),
|
||||
to_bytes('Passord'),
|
||||
to_bytes('Passwort'),
|
||||
to_bytes('Salasana'),
|
||||
to_bytes('Sandi'),
|
||||
to_bytes('Senha'),
|
||||
to_bytes('Wachtwoord'),
|
||||
to_bytes('ססמה'),
|
||||
to_bytes('Лозинка'),
|
||||
to_bytes('Парола'),
|
||||
to_bytes('Пароль'),
|
||||
to_bytes('गुप्तशब्द'),
|
||||
to_bytes('शब्दकूट'),
|
||||
to_bytes('సంకేతపదము'),
|
||||
to_bytes('හස්පදය'),
|
||||
to_bytes('密码'),
|
||||
to_bytes('密碼'),
|
||||
to_bytes('口令'),
|
||||
]
|
||||
|
||||
TASK_ATTRIBUTE_OVERRIDES = (
|
||||
|
@ -515,21 +517,24 @@ class PlayContext(Base):
|
|||
elif self.become_method == 'su':
|
||||
|
||||
# passing code ref to examine prompt as simple string comparisson isn't good enough with su
|
||||
def detect_su_prompt(data):
|
||||
SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
|
||||
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
|
||||
def detect_su_prompt(b_data):
|
||||
b_password_string = b"|".join([b'(\w+\'s )?' + x for x in b_SU_PROMPT_LOCALIZATIONS])
|
||||
# Colon or unicode fullwidth colon
|
||||
b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
|
||||
b_SU_PROMPT_LOCALIZATIONS_RE = re.compile(b_password_string, flags=re.IGNORECASE)
|
||||
return bool(b_SU_PROMPT_LOCALIZATIONS_RE.match(b_data))
|
||||
prompt = detect_su_prompt
|
||||
|
||||
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote(command))
|
||||
|
||||
elif self.become_method == 'pbrun':
|
||||
|
||||
prompt='assword:'
|
||||
becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
|
||||
prompt='Password:'
|
||||
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
|
||||
|
||||
elif self.become_method == 'ksu':
|
||||
def detect_ksu_prompt(data):
|
||||
return re.match("Kerberos password for .*@.*:", data)
|
||||
def detect_ksu_prompt(b_data):
|
||||
return re.match(b"Kerberos password for .*@.*:", b_data)
|
||||
|
||||
prompt = detect_ksu_prompt
|
||||
becomecmd = '%s %s %s -e %s' % (exe, self.become_user, flags, command)
|
||||
|
|
|
@ -49,6 +49,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
|||
|
||||
# import here to avoid a dependency loop
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.playbook.play import Play
|
||||
|
||||
# first, we use the original parent method to correctly load the object
|
||||
# via the load_data/preprocess_data system we normally use for other
|
||||
|
@ -61,15 +62,6 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
|||
|
||||
templar = Templar(loader=loader, variables=all_vars)
|
||||
|
||||
try:
|
||||
forward_conditional = False
|
||||
if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
|
||||
return None
|
||||
except AnsibleError:
|
||||
# conditional evaluation raised an error, so we set a flag to indicate
|
||||
# we need to forward the conditionals on to the included play(s)
|
||||
forward_conditional = True
|
||||
|
||||
# then we use the object to load a Playbook
|
||||
pb = Playbook(loader=loader)
|
||||
|
||||
|
@ -82,6 +74,11 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
|||
# finally, update each loaded playbook entry with any variables specified
|
||||
# on the included playbook and/or any tags which may have been set
|
||||
for entry in pb._entries:
|
||||
|
||||
# conditional includes on a playbook need a marker to skip gathering
|
||||
if new_obj.when and isinstance(entry, Play):
|
||||
entry._included_conditional = new_obj.when[:]
|
||||
|
||||
temp_vars = entry.vars.copy()
|
||||
temp_vars.update(new_obj.vars)
|
||||
param_tags = temp_vars.pop('tags', None)
|
||||
|
@ -95,9 +92,9 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
|||
# Check to see if we need to forward the conditionals on to the included
|
||||
# plays. If so, we can take a shortcut here and simply prepend them to
|
||||
# those attached to each block (if any)
|
||||
if forward_conditional:
|
||||
for task_block in entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks:
|
||||
task_block.when = self.when[:] + task_block.when
|
||||
if new_obj.when:
|
||||
for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
|
||||
task_block._attributes['when'] = new_obj.when[:] + task_block.when[:]
|
||||
|
||||
return pb
|
||||
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.compat.six import iteritems
|
||||
|
||||
import collections
|
||||
import os
|
||||
|
||||
from ansible.compat.six import iteritems, binary_type, text_type
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.playbook.base import Base
|
||||
|
@ -41,25 +41,54 @@ __all__ = ['Role', 'hash_params']
|
|||
# the role due to the fact that it would require the use of self
|
||||
# in a static method. This is also used in the base class for
|
||||
# strategies (ansible/plugins/strategy/__init__.py)
|
||||
|
||||
def hash_params(params):
|
||||
if not isinstance(params, dict):
|
||||
if isinstance(params, list):
|
||||
return frozenset(params)
|
||||
"""
|
||||
Construct a data structure of parameters that is hashable.
|
||||
|
||||
This requires changing any mutable data structures into immutable ones.
|
||||
We chose a frozenset because role parameters have to be unique.
|
||||
|
||||
.. warning:: this does not handle unhashable scalars. Two things
|
||||
mitigate that limitation:
|
||||
|
||||
1) There shouldn't be any unhashable scalars specified in the yaml
|
||||
2) Our only choice would be to return an error anyway.
|
||||
"""
|
||||
# Any container is unhashable if it contains unhashable items (for
|
||||
# instance, tuple() is a Hashable subclass but if it contains a dict, it
|
||||
# cannot be hashed)
|
||||
if isinstance(params, collections.Container) and not isinstance(params, (text_type, binary_type)):
|
||||
if isinstance(params, collections.Mapping):
|
||||
try:
|
||||
# Optimistically hope the contents are all hashable
|
||||
new_params = frozenset(params.items())
|
||||
except TypeError:
|
||||
new_params = set()
|
||||
for k, v in params.items():
|
||||
# Hash each entry individually
|
||||
new_params.update((k, hash_params(v)))
|
||||
new_params = frozenset(new_params)
|
||||
|
||||
elif isinstance(params, (collections.Set, collections.Sequence)):
|
||||
try:
|
||||
# Optimistically hope the contents are all hashable
|
||||
new_params = frozenset(params)
|
||||
except TypeError:
|
||||
new_params = set()
|
||||
for v in params:
|
||||
# Hash each entry individually
|
||||
new_params.update(hash_params(v))
|
||||
new_params = frozenset(new_params)
|
||||
else:
|
||||
return params
|
||||
else:
|
||||
s = set()
|
||||
for k,v in iteritems(params):
|
||||
if isinstance(v, dict):
|
||||
s.update((k, hash_params(v)))
|
||||
elif isinstance(v, list):
|
||||
things = []
|
||||
for item in v:
|
||||
things.append(hash_params(item))
|
||||
s.update((k, tuple(things)))
|
||||
else:
|
||||
s.update((k, v))
|
||||
return frozenset(s)
|
||||
# This is just a guess.
|
||||
new_params = frozenset(params)
|
||||
return new_params
|
||||
|
||||
# Note: We do not handle unhashable scalars but our only choice would be
|
||||
# to raise an error there anyway.
|
||||
return frozenset((params,))
|
||||
|
||||
|
||||
class Role(Base, Become, Conditional, Taggable):
|
||||
|
||||
|
@ -207,6 +236,8 @@ class Role(Base, Become, Conditional, Taggable):
|
|||
main_file = self._resolve_main(file_path, main)
|
||||
if self._loader.path_exists(main_file):
|
||||
return self._loader.load_from_file(main_file)
|
||||
elif main is not None:
|
||||
raise AnsibleParserError("Could not find specified file in role: %s" % main)
|
||||
return None
|
||||
|
||||
def _resolve_main(self, basepath, main=None):
|
||||
|
@ -377,12 +408,14 @@ class Role(Base, Become, Conditional, Taggable):
|
|||
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
|
||||
block_list.extend(dep_blocks)
|
||||
|
||||
for task_block in self._task_blocks:
|
||||
for idx, task_block in enumerate(self._task_blocks):
|
||||
new_task_block = task_block.copy(exclude_parent=True)
|
||||
if task_block._parent:
|
||||
new_task_block._parent = task_block._parent.copy()
|
||||
new_task_block._dep_chain = new_dep_chain
|
||||
new_task_block._play = play
|
||||
if idx == len(self._task_blocks) - 1:
|
||||
new_task_block._eor = True
|
||||
block_list.append(new_task_block)
|
||||
|
||||
return block_list
|
||||
|
|
|
@ -34,6 +34,12 @@ from ansible.playbook.taggable import Taggable
|
|||
from ansible.template import Templar
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
__all__ = ['RoleDefinition']
|
||||
|
||||
|
@ -138,18 +144,22 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
|
|||
# we always start the search for roles in the base directory of the playbook
|
||||
role_search_paths = [
|
||||
os.path.join(self._loader.get_basedir(), u'roles'),
|
||||
self._loader.get_basedir(),
|
||||
]
|
||||
|
||||
# also search in the configured roles path
|
||||
if C.DEFAULT_ROLES_PATH:
|
||||
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
|
||||
|
||||
# finally, append the roles basedir, if it was set, so we can
|
||||
# next, append the roles basedir, if it was set, so we can
|
||||
# search relative to that directory for dependent roles
|
||||
if self._role_basedir:
|
||||
role_search_paths.append(self._role_basedir)
|
||||
|
||||
# finally as a last resort we look in the current basedir as set
|
||||
# in the loader (which should be the playbook dir itself) but without
|
||||
# the roles/ dir appended
|
||||
role_search_paths.append(self._loader.get_basedir())
|
||||
|
||||
# create a templar class to template the dependency names, in
|
||||
# case they contain variables
|
||||
if self._variable_manager is not None:
|
||||
|
@ -193,6 +203,11 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
|
|||
# or make this list more automatic in some way so we don't have to
|
||||
# remember to update it manually.
|
||||
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
|
||||
if key in ('connection', 'port', 'remote_user'):
|
||||
display.deprecated("Using '%s' as a role param has been deprecated. " % key + \
|
||||
"In the future, these values should be entered in the `vars:` " + \
|
||||
"section for roles, but for now we'll store it as both a param and an attribute.")
|
||||
role_def[key] = value
|
||||
# this key does not match a field attribute, so it must be a role param
|
||||
role_params[key] = value
|
||||
else:
|
||||
|
|
|
@ -28,6 +28,7 @@ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
|||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||
from ansible.playbook.role.definition import RoleDefinition
|
||||
from ansible.playbook.role.requirement import RoleRequirement
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
__all__ = ['RoleInclude']
|
||||
|
@ -49,7 +50,9 @@ class RoleInclude(RoleDefinition):
|
|||
@staticmethod
|
||||
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None):
|
||||
|
||||
assert isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)
|
||||
if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
|
||||
raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
|
||||
|
||||
if isinstance(data, string_types) and ',' in data:
|
||||
data = RoleRequirement.role_spec_parse(data)
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ __metaclass__ = type
|
|||
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.playbook.role import Role
|
||||
|
@ -46,8 +47,9 @@ class IncludeRole(Task):
|
|||
# ATTRIBUTES
|
||||
|
||||
# private as this is a 'module options' vs a task property
|
||||
_static = FieldAttribute(isa='bool', default=None, private=True)
|
||||
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
|
||||
_private = FieldAttribute(isa='bool', default=None, private=True)
|
||||
_static = FieldAttribute(isa='bool', default=None)
|
||||
|
||||
def __init__(self, block=None, role=None, task_include=None):
|
||||
|
||||
|
@ -56,6 +58,7 @@ class IncludeRole(Task):
|
|||
self.statically_loaded = False
|
||||
self._from_files = {}
|
||||
self._parent_role = role
|
||||
self._role_name = None
|
||||
|
||||
|
||||
def get_block_list(self, play=None, variable_manager=None, loader=None):
|
||||
|
@ -66,16 +69,22 @@ class IncludeRole(Task):
|
|||
else:
|
||||
myplay = play
|
||||
|
||||
ri = RoleInclude.load(self.name, play=myplay, variable_manager=variable_manager, loader=loader)
|
||||
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader)
|
||||
ri.vars.update(self.vars)
|
||||
|
||||
# build role
|
||||
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files)
|
||||
actual_role._metadata.allow_duplicates = self.allow_duplicates
|
||||
|
||||
# compile role
|
||||
blocks = actual_role.compile(play=myplay)
|
||||
# compile role with parent roles as dependencies to ensure they inherit
|
||||
# variables
|
||||
if not self._parent_role:
|
||||
dep_chain = []
|
||||
else:
|
||||
dep_chain = list(self._parent_role._parents)
|
||||
dep_chain.append(self._parent_role)
|
||||
|
||||
# set parent to ensure proper inheritance
|
||||
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
|
||||
for b in blocks:
|
||||
b._parent = self
|
||||
|
||||
|
@ -89,19 +98,25 @@ class IncludeRole(Task):
|
|||
|
||||
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
# set built in's
|
||||
attributes = frozenset(ir._valid_attrs.keys())
|
||||
for builtin in attributes:
|
||||
if ir.args.get(builtin):
|
||||
setattr(ir, builtin, ir.args.get(builtin))
|
||||
### Process options
|
||||
# name is needed, or use role as alias
|
||||
ir._role_name = ir.args.get('name', ir.args.get('role'))
|
||||
if ir._role_name is None:
|
||||
raise AnsibleParserError("'name' is a required field for include_role.")
|
||||
|
||||
# build options for role includes
|
||||
for key in ['tasks', 'vars', 'defaults']:
|
||||
from_key = key + '_from'
|
||||
from_key ='%s_from' % key
|
||||
if ir.args.get(from_key):
|
||||
ir._from_files[key] = basename(ir.args.get(from_key))
|
||||
|
||||
return ir.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||
#FIXME: find a way to make this list come from object ( attributes does not work as per below)
|
||||
# manual list as otherwise the options would set other task parameters we don't want.
|
||||
for option in ['private', 'allow_duplicates']:
|
||||
if option in ir.args:
|
||||
setattr(ir, option, ir.args.get(option))
|
||||
|
||||
return ir
|
||||
|
||||
def copy(self, exclude_parent=False, exclude_tasks=False):
|
||||
|
||||
|
@ -109,6 +124,7 @@ class IncludeRole(Task):
|
|||
new_me.statically_loaded = self.statically_loaded
|
||||
new_me._from_files = self._from_files.copy()
|
||||
new_me._parent_role = self._parent_role
|
||||
new_me._role_name = self._role_name
|
||||
|
||||
return new_me
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ __metaclass__ = type
|
|||
|
||||
import os
|
||||
|
||||
from ansible.compat.six import iteritems, string_types
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
|
||||
from ansible.module_utils.six import iteritems, string_types
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.parsing.mod_args import ModuleArgsParser
|
||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode
|
||||
|
@ -259,25 +259,39 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
Override post validation of vars on the play, as we don't want to
|
||||
template these too early.
|
||||
'''
|
||||
if value is None:
|
||||
return dict()
|
||||
env = {}
|
||||
if value is not None:
|
||||
|
||||
elif isinstance(value, list):
|
||||
if len(value) == 1:
|
||||
return templar.template(value[0], convert_bare=True)
|
||||
else:
|
||||
env = []
|
||||
def _parse_env_kv(k, v):
|
||||
try:
|
||||
env[k] = templar.template(v, convert_bare=False)
|
||||
except AnsibleUndefinedVariable as e:
|
||||
if self.action in ('setup', 'gather_facts') and 'ansible_env' in to_native(e):
|
||||
# ignore as fact gathering sets ansible_env
|
||||
pass
|
||||
|
||||
if isinstance(value, list):
|
||||
for env_item in value:
|
||||
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys():
|
||||
env[env_item] = templar.template(env_item, convert_bare=False)
|
||||
elif isinstance(value, dict):
|
||||
env = dict()
|
||||
for env_item in value:
|
||||
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys():
|
||||
env[env_item] = templar.template(value[env_item], convert_bare=False)
|
||||
if isinstance(env_item, dict):
|
||||
for k in env_item:
|
||||
_parse_env_kv(k, env_item[k])
|
||||
else:
|
||||
isdict = templar.template(env_item, convert_bare=False)
|
||||
if isinstance(isdict, dict):
|
||||
env.update(isdict)
|
||||
else:
|
||||
display.warning("could not parse environment value, skipping: %s" % value)
|
||||
|
||||
# at this point it should be a simple string
|
||||
return templar.template(value, convert_bare=True)
|
||||
elif isinstance(value, dict):
|
||||
# should not really happen
|
||||
env = dict()
|
||||
for env_item in value:
|
||||
_parse_env_kv(env_item, value[env_item])
|
||||
else:
|
||||
# at this point it should be a simple string, also should not happen
|
||||
env = templar.template(value, convert_bare=False)
|
||||
|
||||
return env
|
||||
|
||||
def _post_validate_changed_when(self, attr, value, templar):
|
||||
'''
|
||||
|
@ -376,12 +390,6 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
|
||||
super(Task, self).deserialize(data)
|
||||
|
||||
def evaluate_conditional(self, templar, all_vars):
|
||||
if self._parent is not None:
|
||||
if not self._parent.evaluate_conditional(templar, all_vars):
|
||||
return False
|
||||
return super(Task, self).evaluate_conditional(templar, all_vars)
|
||||
|
||||
def set_loader(self, loader):
|
||||
'''
|
||||
Sets the loader on this object and recursively on parent, child objects.
|
||||
|
@ -394,7 +402,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
if self._parent:
|
||||
self._parent.set_loader(loader)
|
||||
|
||||
def _get_parent_attribute(self, attr, extend=False):
|
||||
def _get_parent_attribute(self, attr, extend=False, prepend=False):
|
||||
'''
|
||||
Generic logic to get the attribute or parent attribute for a task value.
|
||||
'''
|
||||
|
@ -405,7 +413,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
if self._parent and (value is None or extend):
|
||||
parent_value = getattr(self._parent, attr, None)
|
||||
if extend:
|
||||
value = self._extend_value(value, parent_value)
|
||||
value = self._extend_value(value, parent_value, prepend)
|
||||
else:
|
||||
value = parent_value
|
||||
except KeyError:
|
||||
|
@ -417,7 +425,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
'''
|
||||
Override for the 'tags' getattr fetcher, used from Base.
|
||||
'''
|
||||
return self._get_parent_attribute('environment', extend=True)
|
||||
return self._get_parent_attribute('environment', extend=True, prepend=True)
|
||||
|
||||
def get_dep_chain(self):
|
||||
if self._parent:
|
||||
|
|
|
@ -221,7 +221,7 @@ class PluginLoader:
|
|||
self._extra_dirs.append(directory)
|
||||
self._paths = None
|
||||
|
||||
def find_plugin(self, name, mod_type=''):
|
||||
def find_plugin(self, name, mod_type='', ignore_deprecated=False):
|
||||
''' Find a plugin named name '''
|
||||
|
||||
if mod_type:
|
||||
|
@ -297,7 +297,7 @@ class PluginLoader:
|
|||
alias_name = '_' + name
|
||||
# We've already cached all the paths at this point
|
||||
if alias_name in pull_cache:
|
||||
if not os.path.islink(pull_cache[alias_name]):
|
||||
if not ignore_deprecated and not os.path.islink(pull_cache[alias_name]):
|
||||
display.deprecated('%s is kept for backwards compatibility '
|
||||
'but usage is discouraged. The module '
|
||||
'documentation details page may explain '
|
||||
|
@ -373,6 +373,7 @@ class PluginLoader:
|
|||
def all(self, *args, **kwargs):
|
||||
''' instantiates all plugins with the same arguments '''
|
||||
|
||||
path_only = kwargs.pop('path_only', False)
|
||||
class_only = kwargs.pop('class_only', False)
|
||||
all_matches = []
|
||||
found_in_cache = True
|
||||
|
@ -385,6 +386,10 @@ class PluginLoader:
|
|||
if '__init__' in name:
|
||||
continue
|
||||
|
||||
if path_only:
|
||||
yield path
|
||||
continue
|
||||
|
||||
if path not in self._module_cache:
|
||||
self._module_cache[path] = self._load_module_source(name, path)
|
||||
found_in_cache = False
|
||||
|
|
|
@ -30,15 +30,16 @@ import tempfile
|
|||
import time
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.compat.six import binary_type, string_types, text_type, iteritems, with_metaclass
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.executor.module_common import modify_module
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.json_utils import _filter_non_json_lines
|
||||
from ansible.parsing.utils.jsonify import jsonify
|
||||
from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING
|
||||
from ansible.release import __version__
|
||||
from ansible.vars.unsafe_proxy import wrap_var
|
||||
|
||||
|
||||
try:
|
||||
|
@ -217,7 +218,12 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
tmp_mode = 0o700
|
||||
|
||||
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
|
||||
if use_system_tmp:
|
||||
tmpdir = None
|
||||
else:
|
||||
tmpdir = self._remote_expand_user(C.DEFAULT_REMOTE_TMP, sudoable=False)
|
||||
|
||||
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode, tmpdir)
|
||||
result = self._low_level_execute_command(cmd, sudoable=False)
|
||||
|
||||
# error handling on this seems a little aggressive?
|
||||
|
@ -358,11 +364,16 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# Try to use file system acls to make the files readable for sudo'd
|
||||
# user
|
||||
if execute:
|
||||
mode = 'rx'
|
||||
chmod_mode = 'rx'
|
||||
setfacl_mode = 'r-x'
|
||||
else:
|
||||
mode = 'rX'
|
||||
chmod_mode = 'rX'
|
||||
### Note: this form fails silently on freebsd. We currently
|
||||
# never call _fixup_perms2() with execute=False but if we
|
||||
# start to we'll have to fix this.
|
||||
setfacl_mode = 'r-X'
|
||||
|
||||
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, mode)
|
||||
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, setfacl_mode)
|
||||
if res['rc'] != 0:
|
||||
# File system acls failed; let's try to use chown next
|
||||
# Set executable bit first as on some systems an
|
||||
|
@ -370,7 +381,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
if execute:
|
||||
res = self._remote_chmod(remote_paths, 'u+x')
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
|
||||
|
||||
res = self._remote_chown(remote_paths, self._play_context.become_user)
|
||||
if res['rc'] != 0 and remote_user == 'root':
|
||||
|
@ -384,20 +395,20 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user.'
|
||||
' This may be insecure. For information on securing this, see'
|
||||
' https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
|
||||
res = self._remote_chmod(remote_paths, 'a+%s' % mode)
|
||||
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
|
||||
else:
|
||||
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user'
|
||||
' (rc: {0}, err: {1}). For information on working around this,'
|
||||
' see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'.format(res['rc'], res['stderr']))
|
||||
' see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'.format(res['rc'], to_native(res['stderr'])))
|
||||
elif execute:
|
||||
# Can't depend on the file being transferred with execute
|
||||
# permissions. Only need user perms because no become was
|
||||
# used here
|
||||
res = self._remote_chmod(remote_paths, 'u+x')
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
|
||||
|
||||
return remote_paths
|
||||
|
||||
|
@ -448,6 +459,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# happens sometimes when it is a dir and not on bsd
|
||||
if 'checksum' not in mystat['stat']:
|
||||
mystat['stat']['checksum'] = ''
|
||||
elif not isinstance(mystat['stat']['checksum'], string_types):
|
||||
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
|
||||
|
||||
return mystat['stat']
|
||||
|
||||
|
@ -477,7 +490,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
finally:
|
||||
return x
|
||||
|
||||
def _remote_expand_user(self, path):
|
||||
def _remote_expand_user(self, path, sudoable=True):
|
||||
''' takes a remote path and performs tilde expansion on the remote host '''
|
||||
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
|
||||
return path
|
||||
|
@ -485,13 +498,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# FIXME: Can't use os.path.sep for Windows paths.
|
||||
split_path = path.split(os.path.sep, 1)
|
||||
expand_path = split_path[0]
|
||||
if expand_path == '~':
|
||||
if self._play_context.become and self._play_context.become_user:
|
||||
expand_path = '~%s' % self._play_context.become_user
|
||||
if sudoable and expand_path == '~' and self._play_context.become and self._play_context.become_user:
|
||||
expand_path = '~%s' % self._play_context.become_user
|
||||
|
||||
cmd = self._connection._shell.expand_user(expand_path)
|
||||
data = self._low_level_execute_command(cmd, sudoable=False)
|
||||
#initial_fragment = utils.last_non_blank_line(data['stdout'])
|
||||
initial_fragment = data['stdout'].strip().splitlines()[-1]
|
||||
|
||||
if not initial_fragment:
|
||||
|
@ -663,6 +674,39 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _clean_returned_data(self, data):
|
||||
remove_keys = set()
|
||||
fact_keys = set(data.keys())
|
||||
# first we add all of our magic variable names to the set of
|
||||
# keys we want to remove from facts
|
||||
for magic_var in MAGIC_VARIABLE_MAPPING:
|
||||
remove_keys.update(fact_keys.intersection(MAGIC_VARIABLE_MAPPING[magic_var]))
|
||||
# next we remove any connection plugin specific vars
|
||||
for conn_path in self._shared_loader_obj.connection_loader.all(path_only=True):
|
||||
try:
|
||||
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
|
||||
re_key = re.compile('^ansible_%s_' % conn_name)
|
||||
for fact_key in fact_keys:
|
||||
if re_key.match(fact_key):
|
||||
remove_keys.add(fact_key)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# remove some KNOWN keys
|
||||
for hard in ['ansible_rsync_path', 'ansible_playbook_python']:
|
||||
if hard in fact_keys:
|
||||
remove_keys.add(hard)
|
||||
|
||||
# finally, we search for interpreter keys to remove
|
||||
re_interp = re.compile('^ansible_.*_interpreter$')
|
||||
for fact_key in fact_keys:
|
||||
if re_interp.match(fact_key):
|
||||
remove_keys.add(fact_key)
|
||||
# then we remove them (except for ssh host keys)
|
||||
for r_key in remove_keys:
|
||||
if not r_key.startswith('ansible_ssh_host_key_'):
|
||||
del data[r_key]
|
||||
|
||||
def _parse_returned_data(self, res):
|
||||
try:
|
||||
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
|
||||
|
@ -670,6 +714,12 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.warning(w)
|
||||
data = json.loads(filtered_output)
|
||||
data['_ansible_parsed'] = True
|
||||
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
|
||||
self._clean_returned_data(data['ansible_facts'])
|
||||
data['ansible_facts'] = wrap_var(data['ansible_facts'])
|
||||
if 'add_host' in data and isinstance(data['add_host'].get('host_vars', None), dict):
|
||||
self._clean_returned_data(data['add_host']['host_vars'])
|
||||
data['add_host'] = wrap_var(data['add_host'])
|
||||
except ValueError:
|
||||
# not valid json, lets try to capture error
|
||||
data = dict(failed=True, _ansible_parsed=False)
|
||||
|
@ -721,8 +771,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
# Change directory to basedir of task for command execution when connection is local
|
||||
if self._connection.transport == 'local':
|
||||
os.chdir(self._loader.get_basedir())
|
||||
cwd = os.getcwd()
|
||||
os.chdir(self._loader.get_basedir())
|
||||
try:
|
||||
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
finally:
|
||||
|
@ -817,6 +867,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
to get back the first existing file found.
|
||||
'''
|
||||
|
||||
# dwim already deals with playbook basedirs
|
||||
path_stack = self._task.get_search_path()
|
||||
|
||||
result = self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
|
||||
|
|
|
@ -103,13 +103,8 @@ class ActionModule(ActionBase):
|
|||
return result
|
||||
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
self._cleanup_remote_tmp = True
|
||||
|
||||
if boolean(remote_src):
|
||||
result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False))
|
||||
self._remove_tmp_path(tmp)
|
||||
result.update(self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
return result
|
||||
else:
|
||||
try:
|
||||
|
@ -119,6 +114,10 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = to_native(e)
|
||||
return result
|
||||
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
self._cleanup_remote_tmp = True
|
||||
|
||||
if not os.path.isdir(src):
|
||||
result['failed'] = True
|
||||
result['msg'] = u"Source (%s) is not a directory" % src
|
||||
|
|
|
@ -91,7 +91,15 @@ class ActionModule(ActionBase):
|
|||
async_limit = self._task.async
|
||||
async_jid = str(random.randint(0, 999999999999))
|
||||
|
||||
async_cmd = [env_string, remote_async_module_path, async_jid, async_limit, remote_module_path]
|
||||
# call the interpreter for async_wrapper directly
|
||||
# this permits use of a script for an interpreter on non-Linux platforms
|
||||
# TODO: re-implement async_wrapper as a regular module to avoid this special case
|
||||
interpreter = shebang.replace('#!', '').strip()
|
||||
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
|
||||
|
||||
if env_string:
|
||||
async_cmd.insert(0, env_string)
|
||||
|
||||
if argsfile:
|
||||
async_cmd.append(argsfile)
|
||||
else:
|
||||
|
|
|
@ -86,7 +86,7 @@ class ActionModule(ActionBase):
|
|||
# if we have first_available_file in our vars
|
||||
# look up the files and use the first one we find as src
|
||||
elif remote_src:
|
||||
result.update(self._execute_module(module_name='copy', module_args=self._task.args, task_vars=task_vars, delete_remote_tmp=False))
|
||||
result.update(self._execute_module(module_name='copy', module_args=self._task.args, task_vars=task_vars))
|
||||
return result
|
||||
else: # find in expected paths
|
||||
try:
|
||||
|
|
|
@ -22,7 +22,7 @@ from os import path, walk
|
|||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
|
||||
|
@ -137,8 +137,8 @@ class ActionModule(ActionBase):
|
|||
results.update(updated_results)
|
||||
|
||||
except AnsibleError as e:
|
||||
failed = True
|
||||
err_msg = to_native(e)
|
||||
raise AnsibleError(err_msg)
|
||||
|
||||
if self.return_results_as_name:
|
||||
scope = dict()
|
||||
|
@ -226,7 +226,7 @@ class ActionModule(ActionBase):
|
|||
return success
|
||||
return success
|
||||
|
||||
def _load_files(self, filename):
|
||||
def _load_files(self, filename, validate_extensions=False):
|
||||
""" Loads a file and converts the output into a valid Python dict.
|
||||
Args:
|
||||
filename (str): The source file.
|
||||
|
@ -237,7 +237,7 @@ class ActionModule(ActionBase):
|
|||
results = dict()
|
||||
failed = False
|
||||
err_msg = ''
|
||||
if not self._is_valid_file_ext(filename):
|
||||
if validate_extensions and not self._is_valid_file_ext(filename):
|
||||
failed = True
|
||||
err_msg = (
|
||||
'{0} does not have a valid extension: {1}'
|
||||
|
@ -245,7 +245,9 @@ class ActionModule(ActionBase):
|
|||
)
|
||||
return failed, err_msg, results
|
||||
|
||||
data, show_content = self._loader._get_file_contents(filename)
|
||||
b_data, show_content = self._loader._get_file_contents(filename)
|
||||
data = to_text(b_data, errors='surrogate_or_strict')
|
||||
|
||||
self.show_content = show_content
|
||||
data = self._loader.load(data, show_content)
|
||||
if not data:
|
||||
|
@ -287,7 +289,7 @@ class ActionModule(ActionBase):
|
|||
|
||||
if not stop_iter and not failed:
|
||||
if path.exists(filepath) and not self._ignore_file(filename):
|
||||
failed, err_msg, loaded_data = self._load_files(filepath)
|
||||
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
|
||||
if not failed:
|
||||
results.update(loaded_data)
|
||||
|
||||
|
|
|
@ -125,8 +125,9 @@ class ActionModule(ActionBase):
|
|||
fd = None
|
||||
try:
|
||||
fd = self._connection._new_stdin.fileno()
|
||||
except ValueError:
|
||||
# someone is using a closed file descriptor as stdin
|
||||
except (ValueError, AttributeError):
|
||||
# ValueError: someone is using a closed file descriptor as stdin
|
||||
# AttributeError: someone is using a null file descriptor as stdin on windoez
|
||||
pass
|
||||
if fd is not None:
|
||||
if isatty(fd):
|
||||
|
|
|
@ -26,7 +26,7 @@ class ActionModule(ActionBase):
|
|||
TRANSFERS_FILES = False
|
||||
|
||||
UNUSED_PARAMS = {
|
||||
'systemd': ['pattern', 'runlevels', 'sleep', 'arguments'],
|
||||
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
|
||||
}
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
|
|
73
lib/ansible/plugins/action/set_stats.py
Normal file
73
lib/ansible/plugins/action/set_stats.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
# Copyright 2016 Ansible (RedHat, Inc)
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.compat.six import iteritems, string_types
|
||||
from ansible.constants import mk_boolean as boolean
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import isidentifier
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
#TODO: document this in non-empty set_stats.py module
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
stats = {'data': {}, 'per_host': False, 'aggregate': True}
|
||||
|
||||
if self._task.args:
|
||||
data = self._task.args.get('data', {})
|
||||
|
||||
if not isinstance(data, dict):
|
||||
data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
result['failed'] = True
|
||||
result['msg'] = "The 'data' option needs to be a dictionary/hash"
|
||||
return result
|
||||
|
||||
# set boolean options, defaults are set above in stats init
|
||||
for opt in ['per_host', 'aggregate']:
|
||||
val = self._task.args.get(opt, None)
|
||||
if val is not None:
|
||||
if not isinstance(val, bool):
|
||||
stats[opt] = boolean(self._templar.template(val))
|
||||
else:
|
||||
stats[opt] = val
|
||||
|
||||
for (k, v) in iteritems(data):
|
||||
|
||||
k = self._templar.template(k)
|
||||
|
||||
if not isidentifier(k):
|
||||
result['failed'] = True
|
||||
result['msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k
|
||||
return result
|
||||
|
||||
stats['data'][k] = self._templar.template(v)
|
||||
|
||||
result['changed'] = False
|
||||
result['ansible_stats'] = stats
|
||||
|
||||
return result
|
|
@ -114,7 +114,7 @@ class ActionModule(ActionBase):
|
|||
# connection to the remote host
|
||||
if 'ansible_syslog_facility' in task_vars:
|
||||
del task_vars['ansible_syslog_facility']
|
||||
for key in task_vars.keys():
|
||||
for key in list(task_vars.keys()):
|
||||
if key.startswith("ansible_") and key.endswith("_interpreter"):
|
||||
del task_vars[key]
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import pwd
|
|||
import time
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.compat.six import string_types
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
@ -115,19 +116,28 @@ class ActionModule(ActionBase):
|
|||
time.localtime(os.path.getmtime(b_source))
|
||||
)
|
||||
|
||||
# Create a new searchpath list to assign to the templar environment's file
|
||||
# loader, so that it knows about the other paths to find template files
|
||||
searchpath = [self._loader._basedir, os.path.dirname(source)]
|
||||
if self._task._role is not None:
|
||||
if C.DEFAULT_ROLES_PATH:
|
||||
searchpath[:0] = C.DEFAULT_ROLES_PATH
|
||||
searchpath.insert(1, self._task._role._role_path)
|
||||
|
||||
searchpath = []
|
||||
# set jinja2 internal search path for includes
|
||||
if 'ansible_search_path' in task_vars:
|
||||
searchpath = task_vars['ansible_search_path']
|
||||
# our search paths aren't actually the proper ones for jinja includes.
|
||||
|
||||
searchpath.extend([self._loader._basedir, os.path.dirname(source)])
|
||||
|
||||
# We want to search into the 'templates' subdir of each search path in
|
||||
# addition to our original search paths.
|
||||
newsearchpath = []
|
||||
for p in searchpath:
|
||||
newsearchpath.append(os.path.join(p, 'templates'))
|
||||
newsearchpath.append(p)
|
||||
searchpath = newsearchpath
|
||||
|
||||
self._templar.environment.loader.searchpath = searchpath
|
||||
|
||||
old_vars = self._templar._available_variables
|
||||
self._templar.set_available_variables(temp_vars)
|
||||
resultant = self._templar.template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, convert_data=False)
|
||||
resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
|
||||
self._templar.set_available_variables(old_vars)
|
||||
except Exception as e:
|
||||
result['failed'] = True
|
||||
|
|
9
lib/ansible/plugins/cache/jsonfile.py
vendored
9
lib/ansible/plugins/cache/jsonfile.py
vendored
|
@ -63,8 +63,11 @@ class CacheModule(BaseCacheModule):
|
|||
try:
|
||||
os.makedirs(self._cache_dir)
|
||||
except (OSError,IOError) as e:
|
||||
display.warning("error in 'jsonfile' cache plugin while trying to create cache dir %s : %s" % (self._cache_dir, to_bytes(e)))
|
||||
return None
|
||||
raise AnsibleError("error in 'jsonfile' cache plugin while trying to create cache dir %s : %s" % (self._cache_dir, to_bytes(e)))
|
||||
else:
|
||||
for x in (os.R_OK, os.W_OK, os.X_OK):
|
||||
if not os.access(self._cache_dir, x):
|
||||
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (self.plugin_name, self._cache_dir))
|
||||
|
||||
def get(self, key):
|
||||
""" This checks the in memory cache first as the fact was not expired at 'gather time'
|
||||
|
@ -124,7 +127,7 @@ class CacheModule(BaseCacheModule):
|
|||
return False
|
||||
else:
|
||||
display.warning("error in 'jsonfile' cache plugin while trying to stat %s : %s" % (cachefile, to_bytes(e)))
|
||||
pass
|
||||
return False
|
||||
|
||||
if time.time() - st.st_mtime <= self._timeout:
|
||||
return False
|
||||
|
|
|
@ -65,7 +65,7 @@ class CallbackModule(CallbackBase):
|
|||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
if result._task.ignore_errors:
|
||||
if ignore_errors:
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue