Compare commits
454 commits
devel
...
stable-2.1
Author | SHA1 | Date | |
---|---|---|---|
|
7baf994f2c | ||
|
e06e9b40b7 | ||
|
a54a4fb78a | ||
|
fd30f53289 | ||
|
d68a911141 | ||
|
813497bbda | ||
|
0c692484e6 | ||
|
3e9d4607ce | ||
|
39ce8c6610 | ||
|
bd8f81b13b | ||
|
f5dbb555fa | ||
|
1805f6b6c0 | ||
|
8f5212befe | ||
|
b25f0f4e00 | ||
|
109010c078 | ||
|
cdd5ed5f3d | ||
|
f40e1bc289 | ||
|
ad7c55dd9d | ||
|
68dbed2a5e | ||
|
20d67cc562 | ||
|
b4b65c6bd2 | ||
|
2546e8b4e5 | ||
|
36d8612ac0 | ||
|
799b180e2d | ||
|
44026f8d7b | ||
|
978311bf3f | ||
|
59407cf2cb | ||
|
2c832c736f | ||
|
71c8ab6ce8 | ||
|
8bbb2b3691 | ||
|
400a3b984e | ||
|
6c6570583f | ||
|
b5daf2286c | ||
|
7ec84394f9 | ||
|
35480106e5 | ||
|
51559b0a51 | ||
|
bd1ba1e21a | ||
|
596765c605 | ||
|
d69f096f3e | ||
|
d7dd41146a | ||
|
f7ff28336d | ||
|
20df246504 | ||
|
f821d3a9f6 | ||
|
a8421e2424 | ||
|
75cdac0d19 | ||
|
ff0b525608 | ||
|
a36ac1ad1b | ||
|
d6ab369916 | ||
|
78e47b4bbd | ||
|
c3fbe526ca | ||
|
eea3051a12 | ||
|
f99060b3b9 | ||
|
00c48637b0 | ||
|
0f6b318b29 | ||
|
4cadc98049 | ||
|
394dd2be04 | ||
|
0bb04e132a | ||
|
b056a5823f | ||
|
b5c95ea6fa | ||
|
12a38bc75f | ||
|
c03bdbfad0 | ||
|
e2926bd398 | ||
|
72cd60a1cd | ||
|
14e021496d | ||
|
aadf342b46 | ||
|
60a2da09de | ||
|
e00dc01803 | ||
|
28016bf1a0 | ||
|
768c1e2e58 | ||
|
a18e2976c6 | ||
|
3f7ac72427 | ||
|
4ea3cd3a38 | ||
|
264b33da6b | ||
|
3e58f08155 | ||
|
972379c907 | ||
|
6db31bb4c6 | ||
|
a7d0cc6e61 | ||
|
ea5e2d46ee | ||
|
e72d1e995f | ||
|
2a234c1ff9 | ||
|
751865d68c | ||
|
ec1f19ac3a | ||
|
29f2f26278 | ||
|
5f23d09e7c | ||
|
b989004a0b | ||
|
fd256a79a1 | ||
|
c3257027b9 | ||
|
4c845b4ef0 | ||
|
3d8dac2084 | ||
|
2b7761cd37 | ||
|
60a6c83abc | ||
|
ee4ba3ceca | ||
|
66ffe199e5 | ||
|
5635670191 | ||
|
a4611ff509 | ||
|
cc71765d9e | ||
|
b5b8a756fc | ||
|
4c9ed1f4fb | ||
|
e917a0bd7a | ||
|
9de21e1eaa | ||
|
eaba2152f3 | ||
|
adc158a499 | ||
|
01439aafaf | ||
|
b7168d2ac8 | ||
|
c004ae578d | ||
|
efe5bb122e | ||
|
27c621950c | ||
|
7bfa36eb8b | ||
|
fb71a9dfd9 | ||
|
d86b2ec225 | ||
|
7ea342d8ec | ||
|
6ba8926889 | ||
|
2e06f0b427 | ||
|
a900a0b4e3 | ||
|
c536ca399a | ||
|
2587d2aaf9 | ||
|
e83840c3fd | ||
|
e8dddc3679 | ||
|
da3fd2d588 | ||
|
c9b212c5bd | ||
|
478283f571 | ||
|
5035b8a8bf | ||
|
559fcbe531 | ||
|
0cce86cac9 | ||
|
f80c981ef6 | ||
|
fc3efdb057 | ||
|
7d6df50e0e | ||
|
b2fe1b39df | ||
|
719c73afa2 | ||
|
bf929ac532 | ||
|
9478e41394 | ||
|
29dd1635fe | ||
|
ea7a038f6f | ||
|
cf0eb42ad5 | ||
|
77fe1ac7af | ||
|
8312df1512 | ||
|
2de8d2ece7 | ||
|
d35377dac7 | ||
|
616a51ac00 | ||
|
3749d44cd5 | ||
|
1601e24593 | ||
|
69d66727ca | ||
|
6fe13bbb47 | ||
|
e71cce7776 | ||
|
f695dd6892 | ||
|
9255a618e3 | ||
|
cf9ef724e9 | ||
|
f956ff9619 | ||
|
0000b76a0a | ||
|
7bd9128848 | ||
|
9f645cdbdb | ||
|
baaa1d3013 | ||
|
e464237894 | ||
|
08ae111757 | ||
|
df16e37ad7 | ||
|
db4e661fef | ||
|
167a12003d | ||
|
3b6b4f6ce4 | ||
|
780c363482 | ||
|
32b6114bef | ||
|
02389d6c51 | ||
|
044547034d | ||
|
6ca9b406ec | ||
|
3ec6e95e47 | ||
|
53c348c89d | ||
|
3e964dbfae | ||
|
505a1de605 | ||
|
ed959d72f1 | ||
|
35da6ba9d1 | ||
|
cf6f46683a | ||
|
b7479a1dc6 | ||
|
fb6e58e888 | ||
|
87dcec9fc5 | ||
|
137385059c | ||
|
293723f4f6 | ||
|
bac0028350 | ||
|
ad3128e661 | ||
|
e7690b0dd1 | ||
|
637f6f23e1 | ||
|
2398ca917f | ||
|
628a67563f | ||
|
909d629c73 | ||
|
c6faf106f8 | ||
|
eff49c968a | ||
|
4b86191a24 | ||
|
1f74df5c91 | ||
|
9eb32357de | ||
|
56737da097 | ||
|
11eefdc486 | ||
|
5b9f795140 | ||
|
8a2f54bcee | ||
|
4dfc0edbc1 | ||
|
ef5a504b8b | ||
|
68232d10cf | ||
|
e5235e151d | ||
|
feebe73ede | ||
|
ff601f4161 | ||
|
6b84306f70 | ||
|
5e04dcf026 | ||
|
16c3fc5cec | ||
|
75a3526680 | ||
|
5982a0632f | ||
|
18ea3f1178 | ||
|
fbf8f5bccb | ||
|
6bcdb575e8 | ||
|
8659f255df | ||
|
6755e9c848 | ||
|
75fa80f73c | ||
|
a598f26006 | ||
|
3fd94769dc | ||
|
9e5fc8f08f | ||
|
e826d3c7d7 | ||
|
b70d83fe1d | ||
|
3489dcfd94 | ||
|
ba97d6069a | ||
|
c80d8b97e8 | ||
|
dd15a15edc | ||
|
57eb60757c | ||
|
25c5314f2e | ||
|
084a3e13b0 | ||
|
eeba5e2c3e | ||
|
e877ea7f2b | ||
|
7da2265e10 | ||
|
cb520bd86a | ||
|
62b3909ef5 | ||
|
5f1d50d53d | ||
|
a7199abb74 | ||
|
a7f93be2b6 | ||
|
05536b3be5 | ||
|
4d4bbcbb33 | ||
|
bb43d4d00b | ||
|
d6fef6f9a6 | ||
|
7083d0b5ea | ||
|
cd348abaa6 | ||
|
9d91d2c6b8 | ||
|
552172b141 | ||
|
55f2829e0c | ||
|
8ec11e9d6c | ||
|
5d03a65ee8 | ||
|
8223b83758 | ||
|
877347ad20 | ||
|
8fc5a6a6f5 | ||
|
9a00b2d4af | ||
|
d0cf725ef8 | ||
|
d88264ee3b | ||
|
969c6a4443 | ||
|
7248314233 | ||
|
73a3a5839b | ||
|
43d1ea0cfc | ||
|
f6727aff39 | ||
|
304493e795 | ||
|
75beca22a5 | ||
|
fc8d8b5cde | ||
|
97a9eaa299 | ||
|
6e2651ce07 | ||
|
9cbb1a196b | ||
|
4c59c0b31d | ||
|
467ef193fd | ||
|
2982b99b8e | ||
|
6b286ee0c8 | ||
|
efed4e577c | ||
|
9bb754c0a7 | ||
|
d65e2aec81 | ||
|
e971a63c33 | ||
|
553ceefe81 | ||
|
c9b125d21a | ||
|
00cf1cbac1 | ||
|
2e003adbc8 | ||
|
d4c78b84f0 | ||
|
405f636cc7 | ||
|
a2c1247dff | ||
|
3ced6d3e90 | ||
|
a9c9cd773f | ||
|
313d94cc71 | ||
|
aa66551352 | ||
|
66d54dec58 | ||
|
6fc2660194 | ||
|
8fd7e970a1 | ||
|
263e51095f | ||
|
e1765c9d0d | ||
|
d14b29efc2 | ||
|
8b3ce600d0 | ||
|
945fb3411e | ||
|
e2ebae5522 | ||
|
6f0bc4dd82 | ||
|
247d4ebb8d | ||
|
86516eae05 | ||
|
df33ff6c65 | ||
|
73b250ecf7 | ||
|
d77ff116c2 | ||
|
6c80be47a2 | ||
|
77dc6a36fd | ||
|
217f8fd824 | ||
|
dee38ceb95 | ||
|
66f8da7258 | ||
|
c1fe7aa322 | ||
|
4cabc94f09 | ||
|
7725c58315 | ||
|
9936d7355c | ||
|
0ba9a6a875 | ||
|
7287effb5c | ||
|
b950f75489 | ||
|
535a436703 | ||
|
41dde7259b | ||
|
31f6e26009 | ||
|
6e6dd98b86 | ||
|
a538b222dc | ||
|
55d40b50e4 | ||
|
6f5d1456bc | ||
|
7ddf6dbbd4 | ||
|
9f32dcdd28 | ||
|
1b5ec51988 | ||
|
71350e022a | ||
|
ff9641a6a1 | ||
|
a59478f4f4 | ||
|
2452bd2135 | ||
|
0de111be00 | ||
|
2399dd32a8 | ||
|
ccbcb4b5e9 | ||
|
4304574c00 | ||
|
a44743d142 | ||
|
4bf9cf6e41 | ||
|
f3c9672fc0 | ||
|
52c9e2ffcf | ||
|
b3ca832025 | ||
|
09fa05373b | ||
|
a6bff1e49c | ||
|
faf85ec57c | ||
|
d3367dd722 | ||
|
1867adfabc | ||
|
9e622dcf31 | ||
|
68807c461b | ||
|
3bef107872 | ||
|
6b964a1594 | ||
|
36f2312071 | ||
|
b970e2ca80 | ||
|
036547b4dd | ||
|
eae26891ea | ||
|
b5ccbf2986 | ||
|
7bfc3df933 | ||
|
1aa39ae78b | ||
|
a6150dbc14 | ||
|
92e4f25066 | ||
|
b3676392a8 | ||
|
41f45336a9 | ||
|
710a96956e | ||
|
7855612805 | ||
|
b0259a8631 | ||
|
5e9b13cb94 | ||
|
46e9e4c4da | ||
|
9602e43952 | ||
|
c901c9144c | ||
|
76cd7cadfe | ||
|
94e4e4105d | ||
|
309aba128c | ||
|
ff346a199c | ||
|
2259b90827 | ||
|
cd2991c02d | ||
|
d9fa5a3b80 | ||
|
e0112a3db3 | ||
|
8f4243c3ec | ||
|
956829f0f0 | ||
|
af5195e336 | ||
|
03e7f54071 | ||
|
0ad0b3d83b | ||
|
b40948acd9 | ||
|
cc347dcaa9 | ||
|
4c5dddb25a | ||
|
b181fe0bc2 | ||
|
71c7476199 | ||
|
9121ca2f8e | ||
|
cae6240e5e | ||
|
2f302e26f4 | ||
|
1968bc5952 | ||
|
047d62cce3 | ||
|
1c2f0ae8f7 | ||
|
3a052654f3 | ||
|
2a512affde | ||
|
90fb1fb3fa | ||
|
8cd0c432e7 | ||
|
151df71464 | ||
|
d22898a27c | ||
|
22b86bbb63 | ||
|
00b04ab794 | ||
|
af257c20da | ||
|
0040c6a4d2 | ||
|
8a84ef80e2 | ||
|
e9406bcfd3 | ||
|
1c21baa706 | ||
|
8a72972360 | ||
|
b7cab0533e | ||
|
6e8b12690b | ||
|
e840816da2 | ||
|
fc4edcbedc | ||
|
31f17d1191 | ||
|
d2e8e8d6a7 | ||
|
6f99f40f37 | ||
|
be28443943 | ||
|
9620eadc03 | ||
|
73dd183394 | ||
|
506e6255b2 | ||
|
4574a0e82d | ||
|
761cdc794e | ||
|
da1e62a6d9 | ||
|
198f57b61e | ||
|
90fef85143 | ||
|
a355ebcbb8 | ||
|
2c8715178f | ||
|
7662b06d5b | ||
|
e52caba801 | ||
|
efbcd8bda0 | ||
|
86768fdcc8 | ||
|
e1b7d4cb4d | ||
|
641b4cc952 | ||
|
7cc7684abd | ||
|
16c1f10e18 | ||
|
4abadead76 | ||
|
c7cae3b08c | ||
|
40c0f34c41 | ||
|
48fa4f842e | ||
|
145d8626d0 | ||
|
e27b22162a | ||
|
92c299cfd5 | ||
|
5909a4473d | ||
|
d5585220a4 | ||
|
1e5708514b | ||
|
a582664bc6 | ||
|
08ce27edfb | ||
|
bea94efd73 | ||
|
92f058b1f5 | ||
|
7ed6270808 | ||
|
6db3d4887e | ||
|
5e7e55830f | ||
|
8f31634977 | ||
|
688a7d3668 | ||
|
8bef5ccae9 | ||
|
74b4d7a2ae | ||
|
5a9e826647 | ||
|
3c42724d96 | ||
|
069e032dcd | ||
|
9535feb90a | ||
|
0b28f708c4 | ||
|
10c3472699 | ||
|
b6c59f89d2 | ||
|
8222a83bcd | ||
|
430bef907a | ||
|
d3de771d2e | ||
|
459c4ec124 | ||
|
1c447016c3 | ||
|
41f5ce80fa | ||
|
07e0fceb61 | ||
|
caa8fffa01 | ||
|
5954a82dd6 | ||
|
7f7e010a32 |
228 changed files with 6236 additions and 1844 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -61,3 +61,4 @@ venv
|
|||
Vagrantfile
|
||||
.vagrant
|
||||
ansible.egg-info/
|
||||
/shippable/
|
||||
|
|
40
.travis.yml
40
.travis.yml
|
@ -1,40 +0,0 @@
|
|||
dist: trusty
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
language: python
|
||||
matrix:
|
||||
include:
|
||||
- env: TARGET=centos6
|
||||
- env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
|
||||
- env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
|
||||
- env: TARGET=fedora-rawhide TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
|
||||
- env: TARGET=ubuntu1204
|
||||
- env: TARGET=ubuntu1404
|
||||
- env: TARGET=sanity TOXENV=py26
|
||||
python: 2.6
|
||||
- env: TARGET=sanity TOXENV=py27
|
||||
python: 2.7
|
||||
- env: TARGET=sanity TOXENV=py34
|
||||
python: 3.4
|
||||
- env: TARGET=sanity TOXENV=py35
|
||||
python: 3.5
|
||||
- env: TARGET=sanity TOXENV=py24
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- deadsnakes
|
||||
packages:
|
||||
- python2.4
|
||||
install:
|
||||
- pip install tox
|
||||
script:
|
||||
- ./test/utils/run_tests.sh
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- "chat.freenode.net#ansible-notices"
|
||||
on_success: change
|
||||
on_failure: always
|
||||
skip_join: true
|
||||
nick: ansibletravis
|
144
CHANGELOG.md
144
CHANGELOG.md
|
@ -1,7 +1,134 @@
|
|||
Ansible Changes By Release
|
||||
==========================
|
||||
|
||||
## 2.1 "The Song Remains the Same" - ACTIVE DEVELOPMENT
|
||||
## 2.1.6 "The Song Remains the Same" - 06-01-2017
|
||||
|
||||
* Security fix for CVE-2017-7481 - data for lookup plugins used as variables was not being correctly marked as "unsafe".
|
||||
|
||||
## 2.1.5 "The Song Remains the Same" - 03-27-2017
|
||||
|
||||
* Security continued fix for CVE-2016-9587 - Handle some additional corner cases in the way conditionals are parsed and evaluated.
|
||||
|
||||
## 2.1.4 "The Song Remains the Same" - 01-16-2017
|
||||
|
||||
* Security fix for CVE-2016-9587 - An attacker with control over a client system being managed by Ansible and the ability to send facts back to the Ansible server could use this flaw to execute arbitrary code on the Ansible server as the user and group Ansible is running as.
|
||||
* Fixed a bug with conditionals in loops, where undefined variables and other errors will defer raising the error until the conditional has been evaluated.
|
||||
* Added a version check for jinja2-2.9, which does not fully work with Ansible currently.
|
||||
|
||||
## 2.1.3 "The Song Remains the Same" - 11-04-2016
|
||||
|
||||
* Security fix for CVE-2016-8628 - Command injection by compromised server via fact variables. In some situations, facts returned by modules could overwrite connection-based facts or some other special variables, leading to injected commands running on the Ansible controller as the user running Ansible (or via escalated permissions).
|
||||
* Security fix for CVE-2016-8614 - apt_key module not properly validating keys in some situations.
|
||||
|
||||
###Minor Changes:
|
||||
* The subversion module from core now marks its password parameter as no_log so
|
||||
the password is obscured when logging.
|
||||
* The postgresql_lang and postgresql_ext modules from extras now mark
|
||||
login_password as no_log so the password is obscured when logging.
|
||||
* Fixed several bugs related to locating files relative to role/playbook directories.
|
||||
* Fixed a bug in the way hosts were tested for failed states, resulting in incorrectly skipped block sessions.
|
||||
* Fixed a bug in the way our custom JSON encoder is used for the to_json* filters.
|
||||
* Fixed some bugs related to the use of non-ascii characters in become passwords.
|
||||
* Fixed a bug with Azure modules which may be using the latest rc6 library.
|
||||
* Backported some docker_common fixes.
|
||||
|
||||
## 2.1.2 "The Song Remains the Same" - 09-29-2016
|
||||
|
||||
###Minor Changes:
|
||||
* Fixed a bug related to creation of retry files (#17456)
|
||||
* Fixed a bug in the way include params are used when an include task is dynamic (#17064)
|
||||
* Fixed a bug related to including blocks in an include task (#15963)
|
||||
* Fixed a bug related to the use of hostvars internally when creating the connection plugin. This prevents things like variables using lookups from being evaluated unnecessarily (#17024)
|
||||
* Fixed a bug where using a variable containing a list for the `hosts` of a play resulted in an list of lists (#16583)
|
||||
* Fixed a bug where integer values would cause an error if a module param was of type `float` (no issue)
|
||||
* Fixed a bug with net_template failing if src was not specified (#17726)
|
||||
* Fixed a bug in "ansible-galaxy import" (#17417)
|
||||
* Fixed a bug in which INI files incorrectly treated a hosts range as a section header (#15331)
|
||||
* Fixed a bug in which the max_fail_percentage calculation erroneously caused a series of plays to stop executing (#15954)
|
||||
* Fixed a bug in which the task names were not properly templated (#16295)
|
||||
* Fixed a bug causing "squashed" loops (ie. yum, apt) to incorrectly report results (ansible-modules-core#4214)
|
||||
* Fixed several bugs related to includes:
|
||||
- when including statically, make sure that all parents were also included statically (issue #16990)
|
||||
- properly resolve nested static include paths
|
||||
- print a message when a file is statically included
|
||||
* Fixed a bug in which module params expected to be float types were not converted from integers (only strings) (#17325)
|
||||
* Fixed a bug introduced by static includes in 2.1, which prevented notifications from going to the "top level" handler name.
|
||||
* Fixed a bug where a group_vars or host_vars directory in the current working directory would be used (and would take precedence) over those in the inventory and/or playbook directory.
|
||||
* Fixed a bug which could occur when the result of an async task did not parse as valid JSON.
|
||||
* (re)-allowed the use of ansible_python_interpreter lines with more than one argument.
|
||||
* Fixed several bugs related to the creation of the implicit localhost in inventory.
|
||||
* Fixed a bug related to an unspecified number of retries when using until.
|
||||
* Fixed a race-condition bug when creating temp directories before the worker process is forked.
|
||||
* Fix a bug with async's poll keyword not making use of ansible_python_interpreter to run (and thus breaking when /usr/bin/python is not present on the remote machine.)
|
||||
* Fix a bug where hosts that started with a range in inventory were being treated as an invalid section header.
|
||||
|
||||
Module fixes:
|
||||
* Fixed a bug where the temporary CA files created by the module helper code were not being deleted properly in some situations (#17073)
|
||||
* Fixed many bugs in the unarchive module
|
||||
* Fixes for module ec2:
|
||||
- Fixed a bug related to source_dest_check when used with non-vpc instances (core#3243)
|
||||
- Fixed a bug in ec2 where instances were not powering of when referenced via tags only (core#4765)
|
||||
- Fixed a bug where instances with multiple interfaces were not powering up/down correctly (core#3234)
|
||||
* Fixes for module get_url:
|
||||
- Fixed a bug in get_url module to force a download if there is a checksum mismatch regardless of the last modified time (core#4262)
|
||||
- Fixed a bug in get_url module to properly process FTP results (core#3661 and core#4601)
|
||||
* Fixed a bug in win_user related to users with disabled accounts/expired passwords (core#4369)
|
||||
* ini_file:
|
||||
- Fixed a bug where option lines are now inserted before blank lines.
|
||||
- Fixed a bug where leading whitespace prevented matches on options.
|
||||
* Fixed a bug in iam_cert when dup_ok is used as a string.
|
||||
* Fixed a bug in postgresql_db related to the changed logic when state=absent.
|
||||
* Fixed a bug where single_transaction and quick were not passed into db_dump for the mysql_db module.
|
||||
* Fixed a bug where the fetch module was not idempotent when retrieving the target of a symlink.
|
||||
* Many minor fixes for bugs in extras modules.
|
||||
|
||||
###Deprecations:
|
||||
|
||||
* Deprecated the use of `_fixup_perms`. Use `_fixup_perms2` instead.
|
||||
This change only impacts custom action plugins using `_fixup_perms`.
|
||||
|
||||
###Incompatible Changes:
|
||||
|
||||
* Use of `_fixup_perms` with `recursive=True` (the default) is no longer supported.
|
||||
Custom action plugins using `_fixup_perms` will require changes unless they already use `recursive=False`.
|
||||
Use `_fixup_perms2` if support for previous releases is not required.
|
||||
Otherwise use `_fixup_perms` with `recursive=False`.
|
||||
|
||||
## 2.1.1 "The Song Remains the Same" - 07-28-2016
|
||||
|
||||
###Minor Changes:
|
||||
|
||||
* If the user is not using paramiko or vault, allow Ansible to run if pycrypto is not installed.
|
||||
* Fixed a bug in pkg_util module that caused "update_catalog must be one of" error if 'update_catalog' arg was used.
|
||||
* Fixed a bug where psuedo-connection vars (eg, ansible_winrm_transport) defined in group_vars or host_vars were not getting passed to the connection.
|
||||
* Fixed a bug where temp file permissions on BSDs were not using filesystem acls when available.
|
||||
* Fixed some bugs in variable dependency resolution. These were mainly related to includes and roles, to bringe the VariableManager code in-line with our documentation.
|
||||
* Fixed a bug in unarchive, when the destination was a symlinked directory.
|
||||
* Fixed a bug related to performance when loading a large number of groups.
|
||||
* Fixed bugs related to the way host and group vars are loaded, which (for large sets of inventory variables) can reduce CPU and memory usage by 50%.
|
||||
* Fixed a bug where includes were not being implicitly evaluated as static when no loop or variables were being used.
|
||||
* Fixed several more bugs in relation to the way play execution continues or halts when hosts fail, to bringe the behavior more in line with 1.9.x.
|
||||
* Fixed bugs related to the use of the underlying shell executable with the script and raw modules.
|
||||
* Fixed several bugs in relation to the way ssh keys were used with various networking modules.
|
||||
* Fixed a bug related to the way handlers are tracked internally, which could cause bugs when roles are reused within the same play (allow_duplicates: yes) or when the role dependencies formed a "diamond" pattern.
|
||||
* Fixed a bug related to setfacl on platforms which do not support the -R option for recursive changes.
|
||||
* Several fixes to the way async works to prevent race conditions and other bugs
|
||||
* More fixes to the way failed and unreachable hosts affect future plays
|
||||
* Fixed a bug in the way the to_json filter encoded some objects
|
||||
* Fixed a bug in the way roles and dependencies are loaded, and how they inherit params from parent roles.
|
||||
* Fixed a bug in which the number of retries in a do/until loop was off by one
|
||||
* Fixed a bug in the way the passwd lookup deals with salts
|
||||
* When using the local connection, internally the remote_user value is now forced to be the current user even if remote_user is specified, to prevent issues with become settings
|
||||
* Fix for Azure modules to work with most recent Azure python library (2.0.0rc5)
|
||||
* Fix for bug related to unreachable hosts and any_errors_fatal in the linear strategy
|
||||
* Fix for error handling in relation to killed/dead worker processes. If workers are killed via SIGKILL or SIGTERM, this will halt execution of the playbook.
|
||||
* Fixed a regression in the way we handle variables from dependent roles.
|
||||
* Added better handling for certain errors thrown from the cryptography.
|
||||
* Fixed a typo in the azure_rm_storageaccount module.
|
||||
* Fixed some minor bugs in the os_user_role and cs_volume modules.
|
||||
* Fixed a bug related to the return value of a low-level inventory API call related to getting variables for hosts and groups.
|
||||
|
||||
## 2.1 "The Song Remains the Same" - 05-25-2016
|
||||
|
||||
###Major Changes:
|
||||
|
||||
|
@ -38,13 +165,21 @@ Ansible Changes By Release
|
|||
- azure:
|
||||
* azure_rm_deployment
|
||||
* azure_rm_networkinterface
|
||||
* azure_rm_networkinterface_facts (TECH PREVIEW)
|
||||
* azure_rm_publicipaddress
|
||||
* azure_rm_publicipaddress_facts (TECH PREVIEW)
|
||||
* azure_rm_resourcegroup
|
||||
* azure_rm_resourcegroup_facts (TECH PREVIEW)
|
||||
* azure_rm_securitygroup
|
||||
* azure_rm_securitygroup_facts (TECH PREVIEW)
|
||||
* azure_rm_storageaccount
|
||||
* azure_rm_storageaccount_facts (TECH PREVIEW)
|
||||
* azure_rm_storageblob
|
||||
* azure_rm_subnet
|
||||
* azure_rm_virtualmachine
|
||||
* azure_rm_virtualmachineimage_facts (TECH PREVIEW)
|
||||
* azure_rm_virtualnetwork
|
||||
* azure_rm_virtualnetwork_facts (TECH PREVIEW)
|
||||
- cloudflare_dns
|
||||
- cloudstack
|
||||
* cs_cluster
|
||||
|
@ -70,6 +205,7 @@ Ansible Changes By Release
|
|||
* eos_config
|
||||
* eos_eapi
|
||||
* eos_template
|
||||
- git_config
|
||||
- gitlab
|
||||
* gitlab_group
|
||||
* gitlab_project
|
||||
|
@ -160,7 +296,7 @@ Ansible Changes By Release
|
|||
|
||||
###Minor Changes:
|
||||
|
||||
* Added support for pipelining mode to more connection plugins, which helps prevent
|
||||
* Added support for pipelining mode to more connection plugins, which helps prevent
|
||||
module data from being written to disk.
|
||||
* Added a new '!unsafe' YAML decorator, which can be used in playbooks to ensure a
|
||||
string is not templated. For example: `foo: !unsafe "Don't template {{me}}"`.
|
||||
|
@ -172,12 +308,16 @@ Ansible Changes By Release
|
|||
two custom callback plugins to run in a certain order you can name them
|
||||
10-first-callback.py and 20-second-callback.py.
|
||||
* Added (alpha) Centirfy's dzdo as another become meethod (privilege escalation)
|
||||
* Fixes for unarchive when filenames contain non-ascii characters
|
||||
* Fixes for s3_bucket when setting an s3_url.
|
||||
* Fix for connections which return extra data after the module's done sending its information.
|
||||
|
||||
###Deprecations:
|
||||
|
||||
* Deprecated the use of "bare" variables in loops (ie. `with_items: foo`, where `foo` is a variable).
|
||||
The full jinja2 variable syntax of `{{foo}}` should always be used instead. This warning will be removed
|
||||
completely in 2.3, after which time it will be an error.
|
||||
* Deprecated accelerated mode.
|
||||
|
||||
## 2.0.2 "Over the Hills and Far Away"
|
||||
|
||||
|
|
4
Makefile
4
Makefile
|
@ -67,7 +67,7 @@ ifeq ($(OFFICIAL),yes)
|
|||
DEBUILD_OPTS += -k$(DEBSIGN_KEYID)
|
||||
endif
|
||||
else
|
||||
DEB_RELEASE = 0.git$(DATE)$(GITINFO)
|
||||
DEB_RELEASE = 100.git$(DATE)$(GITINFO)
|
||||
# Do not sign unofficial builds
|
||||
DEBUILD_OPTS += -uc -us
|
||||
DPUT_OPTS += -u
|
||||
|
@ -83,7 +83,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec
|
|||
RPMDIST = $(shell rpm --eval '%{?dist}')
|
||||
RPMRELEASE = $(RELEASE)
|
||||
ifneq ($(OFFICIAL),yes)
|
||||
RPMRELEASE = 0.git$(DATE)$(GITINFO)
|
||||
RPMRELEASE = 100.git$(DATE)$(GITINFO)
|
||||
endif
|
||||
RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)"
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
[![PyPI version](https://img.shields.io/pypi/v/ansible.svg)](https://pypi.python.org/pypi/ansible)
|
||||
[![PyPI downloads](https://img.shields.io/pypi/dm/ansible.svg)](https://pypi.python.org/pypi/ansible)
|
||||
[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible)
|
||||
|
||||
[![Build Status](https://api.shippable.com/projects/573f79d02a8192902e20e34b/badge?branch=stable-2.1)](https://app.shippable.com/projects/573f79d02a8192902e20e34b)
|
||||
|
||||
Ansible
|
||||
=======
|
||||
|
|
148
RELEASES.txt
148
RELEASES.txt
|
@ -1,73 +1,83 @@
|
|||
Ansible Releases at a Glance
|
||||
============================
|
||||
|
||||
Active Development
|
||||
++++++++++++++++++
|
||||
|
||||
2.1 "The Song Remains the Same" - in progress
|
||||
|
||||
Released
|
||||
++++++++
|
||||
|
||||
2.0.2 "Over the Hills and Far Away" 04-19-2015
|
||||
2.0.1 "Over the Hills and Far Away" 02-24-2016
|
||||
2.0.0 "Over the Hills and Far Away" 01-12-2016
|
||||
1.9.6 "Dancing In the Streets" 04-15-2016
|
||||
1.9.5 "Dancing In the Streets" 03-21-2016
|
||||
1.9.4 "Dancing In the Streets" 10-09-2015
|
||||
1.9.3 "Dancing In the Streets" 09-03-2015
|
||||
1.9.2 "Dancing In the Streets" 06-24-2015
|
||||
1.9.1 "Dancing In the Streets" 04-27-2015
|
||||
1.9.0 "Dancing In the Streets" 03-25-2015
|
||||
1.8.4 "You Really Got Me" ---- 02-19-2015
|
||||
1.8.3 "You Really Got Me" ---- 02-17-2015
|
||||
1.8.2 "You Really Got Me" ---- 12-04-2014
|
||||
1.8.1 "You Really Got Me" ---- 11-26-2014
|
||||
1.7.2 "Summer Nights" -------- 09-24-2014
|
||||
1.7.1 "Summer Nights" -------- 08-14-2014
|
||||
1.7 "Summer Nights" -------- 08-06-2014
|
||||
1.6.10 "The Cradle Will Rock" - 07-25-2014
|
||||
1.6.9 "The Cradle Will Rock" - 07-24-2014
|
||||
1.6.8 "The Cradle Will Rock" - 07-22-2014
|
||||
1.6.7 "The Cradle Will Rock" - 07-21-2014
|
||||
1.6.6 "The Cradle Will Rock" - 07-01-2014
|
||||
1.6.5 "The Cradle Will Rock" - 06-25-2014
|
||||
1.6.4 "The Cradle Will Rock" - 06-25-2014
|
||||
1.6.3 "The Cradle Will Rock" - 06-09-2014
|
||||
1.6.2 "The Cradle Will Rock" - 05-23-2014
|
||||
1.6.1 "The Cradle Will Rock" - 05-07-2014
|
||||
1.6 "The Cradle Will Rock" - 05-05-2014
|
||||
1.5.5 "Love Walks In" -------- 04-18-2014
|
||||
1.5.4 "Love Walks In" -------- 04-01-2014
|
||||
1.5.3 "Love Walks In" -------- 03-13-2014
|
||||
1.5.2 "Love Walks In" -------- 03-11-2014
|
||||
1.5.1 "Love Walks In" -------- 03-10-2014
|
||||
1.5 "Love Walks In" -------- 02-28-2014
|
||||
1.4.5 "Could This Be Magic?" - 02-12-2014
|
||||
1.4.4 "Could This Be Magic?" - 01-06-2014
|
||||
1.4.3 "Could This Be Magic?" - 12-20-2013
|
||||
1.4.2 "Could This Be Magic?" - 12-18-2013
|
||||
1.4.1 "Could This Be Magic?" - 11-27-2013
|
||||
1.4 "Could This Be Magic?" - 11-21-2013
|
||||
1.3.4 "Top of the World" ----- 10-29-2013
|
||||
1.3.3 "Top of the World" ----- 10-09-2013
|
||||
1.3.2 "Top of the World" ----- 09-19-2013
|
||||
1.3.1 "Top of the World" ----- 09-16-2013
|
||||
1.3 "Top of the World" ----- 09-13-2013
|
||||
1.2.3 "Hear About It Later" -- 08-21-2013
|
||||
1.2.2 "Hear About It Later" -- 07-05-2013
|
||||
1.2.1 "Hear About It Later" -- 07-04-2013
|
||||
1.2 "Right Now" ------------ 06-10-2013
|
||||
1.1 "Mean Street" ---------- 04-02-2013
|
||||
1.0 "Eruption" ------------- 02-01-2013
|
||||
0.9 "Dreams" --------------- 11-30-2012
|
||||
0.8 "Cathedral" ------------ 10-19-2012
|
||||
0.7 "Panama" --------------- 09-06-2012
|
||||
0.6 "Cabo" ----------------- 08-06-2012
|
||||
0.5 "Amsterdam" ------------ 07-04-2012
|
||||
0.4 "Unchained" ------------ 05-23-2012
|
||||
0.3 "Baluchitherium" ------- 04-23-2012
|
||||
0.0.2 Untitled
|
||||
0.0.1 Untitled
|
||||
|
||||
VERSION RELEASE CODE NAME
|
||||
++++++++++++++++++++++++++++++
|
||||
|
||||
2.4.0 TBD "Dancing Days"
|
||||
2.3.1 06-01-2017 "Ramble On"
|
||||
2.3.0 04-12-2017 "Ramble On"
|
||||
2.2.3 05-09-2017 "The Battle of Evermore"
|
||||
2.2.2 03-27-2017 "The Battle of Evermore"
|
||||
2.2.1 01-16-2017 "The Battle of Evermore"
|
||||
2.2.0 11-01-2016 "The Battle of Evermore"
|
||||
2.1.6 06-01-2017 "The Song Remains the Same"
|
||||
2.1.5 03-27-2017 "The Song Remains the Same"
|
||||
2.1.4 01-16-2017 "The Song Remains the Same"
|
||||
2.1.3 11-04-2016 "The Song Remains the Same"
|
||||
2.1.2 09-29-2016 "The Song Remains the Same"
|
||||
2.1.1 07-28-2016 "The Song Remains the Same"
|
||||
2.1.0 05-25-2016 "The Song Remains the Same"
|
||||
2.0.2 04-19-2016 "Over the Hills and Far Away"
|
||||
2.0.1 02-24-2016 "Over the Hills and Far Away"
|
||||
2.0.0 01-12-2016 "Over the Hills and Far Away"
|
||||
1.9.6 04-15-2016 "Dancing In the Streets"
|
||||
1.9.5 03-21-2016 "Dancing In the Streets"
|
||||
1.9.4 10-09-2015 "Dancing In the Streets"
|
||||
1.9.3 09-03-2015 "Dancing In the Streets"
|
||||
1.9.2 06-24-2015 "Dancing In the Streets"
|
||||
1.9.1 04-27-2015 "Dancing In the Streets"
|
||||
1.9.0 03-25-2015 "Dancing In the Streets"
|
||||
1.8.4 02-19-2015 "You Really Got Me"
|
||||
1.8.3 02-17-2015 "You Really Got Me"
|
||||
1.8.2 12-04-2014 "You Really Got Me"
|
||||
1.8.1 11-26-2014 "You Really Got Me"
|
||||
1.8.0 11-25-2014 "You Really Got Me"
|
||||
1.7.2 09-24-2014 "Summer Nights"
|
||||
1.7.1 08-14-2014 "Summer Nights"
|
||||
1.7.0 08-06-2014 "Summer Nights"
|
||||
1.6.10 07-25-2014 "The Cradle Will Rock"
|
||||
1.6.9 07-24-2014 "The Cradle Will Rock"
|
||||
1.6.8 07-22-2014 "The Cradle Will Rock"
|
||||
1.6.7 07-21-2014 "The Cradle Will Rock"
|
||||
1.6.6 07-01-2014 "The Cradle Will Rock"
|
||||
1.6.5 06-25-2014 "The Cradle Will Rock"
|
||||
1.6.4 06-25-2014 "The Cradle Will Rock"
|
||||
1.6.3 06-09-2014 "The Cradle Will Rock"
|
||||
1.6.2 05-23-2014 "The Cradle Will Rock"
|
||||
1.6.1 05-07-2014 "The Cradle Will Rock"
|
||||
1.6.0 05-05-2014 "The Cradle Will Rock"
|
||||
1.5.5 04-18-2014 "Love Walks In"
|
||||
1.5.4 04-01-2014 "Love Walks In"
|
||||
1.5.3 03-13-2014 "Love Walks In"
|
||||
1.5.2 03-11-2014 "Love Walks In"
|
||||
1.5.1 03-10-2014 "Love Walks In"
|
||||
1.5.0 02-28-2014 "Love Walks In"
|
||||
1.4.5 02-12-2014 "Could This Be Magic?"
|
||||
1.4.4 01-06-2014 "Could This Be Magic?"
|
||||
1.4.3 12-20-2013 "Could This Be Magic?"
|
||||
1.4.2 12-18-2013 "Could This Be Magic?"
|
||||
1.4.1 11-27-2013 "Could This Be Magic?"
|
||||
1.4.0 11-21-2013 "Could This Be Magic?"
|
||||
1.3.4 10-29-2013 "Top of the World"
|
||||
1.3.3 10-09-2013 "Top of the World"
|
||||
1.3.2 09-19-2013 "Top of the World"
|
||||
1.3.1 09-16-2013 "Top of the World"
|
||||
1.3.0 09-13-2013 "Top of the World"
|
||||
1.2.3 08-21-2013 "Right Now"
|
||||
1.2.2 07-05-2013 "Right Now"
|
||||
1.2.1 07-04-2013 "Right Now"
|
||||
1.2.0 06-10-2013 "Right Now"
|
||||
1.1.0 04-02-2013 "Mean Street"
|
||||
1.0.0 02-01-2013 "Eruption"
|
||||
0.9.0 11-30-2012 "Dreams"
|
||||
0.8.0 10-19-2012 "Cathedral"
|
||||
0.7.0 09-06-2012 "Panama"
|
||||
0.6.0 08-06-2012 "Cabo"
|
||||
0.5.0 07-04-2012 "Amsterdam"
|
||||
0.4.0 05-23-2012 "Unchained"
|
||||
0.3.0 04-23-2012 "Baluchitherium"
|
||||
0.2.0 ? "Untitled"
|
||||
0.1.0 ? "Untitled"
|
||||
0.0.2 ? "Untitled"
|
||||
0.0.1 ? "Untitled"
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.1.0
|
||||
2.1.6.0 1
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
|
||||
#tags=
|
||||
|
||||
# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
|
||||
#locations=
|
||||
|
||||
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
|
||||
include_powerstate=yes
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
|
||||
# Chris Houseknecht, <house@redhat.com>
|
||||
|
@ -23,7 +23,7 @@
|
|||
Azure External Inventory Script
|
||||
===============================
|
||||
Generates dynamic inventory by making API requests to the Azure Resource
|
||||
Manager using the AAzure Python SDK. For instruction on installing the
|
||||
Manager using the Azure Python SDK. For instruction on installing the
|
||||
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
|
||||
|
||||
Authentication
|
||||
|
@ -32,7 +32,7 @@ The order of precedence is command line arguments, environment variables,
|
|||
and finally the [default] profile found in ~/.azure/credentials.
|
||||
|
||||
If using a credentials file, it should be an ini formatted file with one or
|
||||
more sections, which we refer to as profiles. The script looks for a
|
||||
more sections, which we refer to as profiles. The script looks for a
|
||||
[default] section, if a profile is not specified either on the command line
|
||||
or with an environment variable. The keys in a profile will match the
|
||||
list of command line arguments below.
|
||||
|
@ -42,7 +42,7 @@ in your ~/.azure/credentials file, or a service principal or Active Directory
|
|||
user.
|
||||
|
||||
Command line arguments:
|
||||
- profile
|
||||
- profile
|
||||
- client_id
|
||||
- secret
|
||||
- subscription_id
|
||||
|
@ -61,7 +61,7 @@ Environment variables:
|
|||
|
||||
Run for Specific Host
|
||||
-----------------------
|
||||
When run for a specific host using the --host option, a resource group is
|
||||
When run for a specific host using the --host option, a resource group is
|
||||
required. For a specific host, this script returns the following variables:
|
||||
|
||||
{
|
||||
|
@ -76,7 +76,7 @@ required. For a specific host, this script returns the following variables:
|
|||
"version": "latest"
|
||||
},
|
||||
"location": "westus",
|
||||
"mac_address": "00-0D-3A-31-2C-EC",
|
||||
"mac_address": "00-00-5E-00-53-FE",
|
||||
"name": "object-name",
|
||||
"network_interface": "interface-name",
|
||||
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
|
||||
|
@ -130,6 +130,10 @@ Select hosts for specific tag key by assigning a comma separated list of tag key
|
|||
|
||||
AZURE_TAGS=key1,key2,key3
|
||||
|
||||
Select hosts for specific locations:
|
||||
|
||||
AZURE_LOCATIONS=eastus,westus,eastus2
|
||||
|
||||
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
|
||||
|
||||
AZURE_TAGS=key1:value1,key2:value2
|
||||
|
@ -138,11 +142,13 @@ If you don't need the powerstate, you can improve performance by turning off pow
|
|||
AZURE_INCLUDE_POWERSTATE=no
|
||||
|
||||
azure_rm.ini
|
||||
----------------------
|
||||
As mentioned above you can control execution using environment variables or an .ini file. A sample
|
||||
------------
|
||||
As mentioned above, you can control execution using environment variables or a .ini file. A sample
|
||||
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
|
||||
'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this
|
||||
script and having matching .ini files. Go forth and customize your Azure inventory!
|
||||
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
|
||||
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
|
||||
|
||||
export AZURE_INI_PATH=/path/to/custom.ini
|
||||
|
||||
Powerstate:
|
||||
-----------
|
||||
|
@ -152,13 +158,13 @@ up. If the value is anything other than 'running', the machine is down, and will
|
|||
Examples:
|
||||
---------
|
||||
Execute /bin/uname on all instances in the galaxy-qa resource group
|
||||
$ ansible -i azure_rm_inventory.py galaxy-qa -m shell -a "/bin/uname -a"
|
||||
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
|
||||
|
||||
Use the inventory script to print instance specific information
|
||||
$ contrib/inventory/azure_rm_inventory.py --host my_instance_host_name --pretty
|
||||
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
|
||||
|
||||
Use with a playbook
|
||||
$ ansible-playbook -i contrib/inventory/azure_rm_inventory.py my_playbook.yml --limit galaxy-qa
|
||||
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
|
||||
|
||||
|
||||
Insecure Platform Warning
|
||||
|
@ -180,11 +186,13 @@ Version: 1.0.0
|
|||
|
||||
import argparse
|
||||
import ConfigParser
|
||||
import json
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from packaging.version import Version
|
||||
|
||||
from os.path import expanduser
|
||||
|
||||
HAS_AZURE = True
|
||||
|
@ -195,12 +203,9 @@ try:
|
|||
from azure.mgmt.compute import __version__ as azure_compute_version
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
|
||||
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
|
||||
NetworkManagementClientConfiguration
|
||||
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
|
||||
ResourceManagementClientConfiguration
|
||||
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
|
||||
ComputeManagementClientConfiguration
|
||||
from azure.mgmt.network.network_management_client import NetworkManagementClient
|
||||
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
|
||||
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
|
||||
except ImportError as exc:
|
||||
HAS_AZURE_EXC = exc
|
||||
HAS_AZURE = False
|
||||
|
@ -219,6 +224,7 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict(
|
|||
AZURE_CONFIG_SETTINGS = dict(
|
||||
resource_groups='AZURE_RESOURCE_GROUPS',
|
||||
tags='AZURE_TAGS',
|
||||
locations='AZURE_LOCATIONS',
|
||||
include_powerstate='AZURE_INCLUDE_POWERSTATE',
|
||||
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
|
||||
group_by_location='AZURE_GROUP_BY_LOCATION',
|
||||
|
@ -226,7 +232,7 @@ AZURE_CONFIG_SETTINGS = dict(
|
|||
group_by_tag='AZURE_GROUP_BY_TAG'
|
||||
)
|
||||
|
||||
AZURE_MIN_VERSION = "2016-03-30"
|
||||
AZURE_MIN_VERSION = "0.30.0rc5"
|
||||
|
||||
|
||||
def azure_id_to_dict(id):
|
||||
|
@ -303,7 +309,7 @@ class AzureRM(object):
|
|||
|
||||
def _get_env_credentials(self):
|
||||
env_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
env_credentials[attribute] = os.environ.get(env_variable, None)
|
||||
|
||||
if env_credentials['profile'] is not None:
|
||||
|
@ -322,7 +328,7 @@ class AzureRM(object):
|
|||
self.log('Getting credentials')
|
||||
|
||||
arg_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
arg_credentials[attribute] = getattr(params, attribute)
|
||||
|
||||
# try module params
|
||||
|
@ -356,14 +362,17 @@ class AzureRM(object):
|
|||
resource_client = self.rm_client
|
||||
resource_client.providers.register(key)
|
||||
except Exception as exc:
|
||||
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
self.log("You might need to register {0} using an admin account".format(key))
|
||||
self.log(("To register a provider using the Python CLI: "
|
||||
"https://docs.microsoft.com/azure/azure-resource-manager/"
|
||||
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
|
||||
|
||||
@property
|
||||
def network_client(self):
|
||||
self.log('Getting network client')
|
||||
if not self._network_client:
|
||||
self._network_client = NetworkManagementClient(
|
||||
NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id))
|
||||
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Network')
|
||||
return self._network_client
|
||||
|
||||
|
@ -371,16 +380,14 @@ class AzureRM(object):
|
|||
def rm_client(self):
|
||||
self.log('Getting resource manager client')
|
||||
if not self._resource_client:
|
||||
self._resource_client = ResourceManagementClient(
|
||||
ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id))
|
||||
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
|
||||
return self._resource_client
|
||||
|
||||
@property
|
||||
def compute_client(self):
|
||||
self.log('Getting compute client')
|
||||
if not self._compute_client:
|
||||
self._compute_client = ComputeManagementClient(
|
||||
ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id))
|
||||
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Compute')
|
||||
return self._compute_client
|
||||
|
||||
|
@ -403,6 +410,7 @@ class AzureInventory(object):
|
|||
|
||||
self.resource_groups = []
|
||||
self.tags = None
|
||||
self.locations = None
|
||||
self.replace_dash_in_groups = False
|
||||
self.group_by_resource_group = True
|
||||
self.group_by_location = True
|
||||
|
@ -425,6 +433,9 @@ class AzureInventory(object):
|
|||
if self._args.tags:
|
||||
self.tags = self._args.tags.split(',')
|
||||
|
||||
if self._args.locations:
|
||||
self.locations = self._args.locations.split(',')
|
||||
|
||||
if self._args.no_powerstate:
|
||||
self.include_powerstate = False
|
||||
|
||||
|
@ -435,7 +446,7 @@ class AzureInventory(object):
|
|||
def _parse_cli_args(self):
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
|
@ -462,6 +473,8 @@ class AzureInventory(object):
|
|||
help='Return inventory for comma separated list of resource group names')
|
||||
parser.add_argument('--tags', action='store',
|
||||
help='Return inventory for comma separated list of tag key:value pairs')
|
||||
parser.add_argument('--locations', action='store',
|
||||
help='Return inventory for comma separated list of locations')
|
||||
parser.add_argument('--no-powerstate', action='store_true', default=False,
|
||||
help='Do not include the power state of each virtual host')
|
||||
return parser.parse_args()
|
||||
|
@ -487,7 +500,7 @@ class AzureInventory(object):
|
|||
except Exception as exc:
|
||||
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
|
||||
|
||||
if self._args.host or self.tags > 0:
|
||||
if self._args.host or self.tags or self.locations:
|
||||
selected_machines = self._selected_machines(virtual_machines)
|
||||
self._load_machines(selected_machines)
|
||||
else:
|
||||
|
@ -524,7 +537,7 @@ class AzureInventory(object):
|
|||
resource_group=resource_group,
|
||||
mac_address=None,
|
||||
plan=(machine.plan.name if machine.plan else None),
|
||||
virtual_machine_size=machine.hardware_profile.vm_size.value,
|
||||
virtual_machine_size=machine.hardware_profile.vm_size,
|
||||
computer_name=machine.os_profile.computer_name,
|
||||
provisioning_state=machine.provisioning_state,
|
||||
)
|
||||
|
@ -576,7 +589,7 @@ class AzureInventory(object):
|
|||
host_vars['mac_address'] = network_interface.mac_address
|
||||
for ip_config in network_interface.ip_configurations:
|
||||
host_vars['private_ip'] = ip_config.private_ip_address
|
||||
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method.value
|
||||
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
|
||||
if ip_config.public_ip_address:
|
||||
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
|
||||
public_ip_address = self._network_client.public_ip_addresses.get(
|
||||
|
@ -585,7 +598,7 @@ class AzureInventory(object):
|
|||
host_vars['ansible_host'] = public_ip_address.ip_address
|
||||
host_vars['public_ip'] = public_ip_address.ip_address
|
||||
host_vars['public_ip_name'] = public_ip_address.name
|
||||
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method.value
|
||||
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
|
||||
host_vars['public_ip_id'] = public_ip_address.id
|
||||
if public_ip_address.dns_settings:
|
||||
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
|
||||
|
@ -599,6 +612,8 @@ class AzureInventory(object):
|
|||
selected_machines.append(machine)
|
||||
if self.tags and self._tags_match(machine.tags, self.tags):
|
||||
selected_machines.append(machine)
|
||||
if self.locations and machine.location in self.locations:
|
||||
selected_machines.append(machine)
|
||||
return selected_machines
|
||||
|
||||
def _get_security_groups(self, resource_group):
|
||||
|
@ -653,7 +668,7 @@ class AzureInventory(object):
|
|||
self._inventory['azure'].append(host_name)
|
||||
|
||||
if self.group_by_tag and vars.get('tags'):
|
||||
for key, value in vars['tags'].iteritems():
|
||||
for key, value in vars['tags'].items():
|
||||
safe_key = self._to_safe(key)
|
||||
safe_value = safe_key + '_' + self._to_safe(value)
|
||||
if not self._inventory.get(safe_key):
|
||||
|
@ -676,17 +691,17 @@ class AzureInventory(object):
|
|||
file_settings = self._load_settings()
|
||||
if file_settings:
|
||||
for key in AZURE_CONFIG_SETTINGS:
|
||||
if key in ('resource_groups', 'tags') and file_settings.get(key, None) is not None:
|
||||
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
|
||||
values = file_settings.get(key).split(',')
|
||||
if len(values) > 0:
|
||||
setattr(self, key, values)
|
||||
elif file_settings.get(key, None) is not None:
|
||||
elif file_settings.get(key):
|
||||
val = self._to_boolean(file_settings[key])
|
||||
setattr(self, key, val)
|
||||
else:
|
||||
env_settings = self._get_env_settings()
|
||||
for key in AZURE_CONFIG_SETTINGS:
|
||||
if key in('resource_groups', 'tags') and env_settings.get(key, None) is not None:
|
||||
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
|
||||
values = env_settings.get(key).split(',')
|
||||
if len(values) > 0:
|
||||
setattr(self, key, values)
|
||||
|
@ -713,13 +728,14 @@ class AzureInventory(object):
|
|||
|
||||
def _get_env_settings(self):
|
||||
env_settings = dict()
|
||||
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
|
||||
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
|
||||
env_settings[attribute] = os.environ.get(env_variable, None)
|
||||
return env_settings
|
||||
|
||||
def _load_settings(self):
|
||||
basename = os.path.splitext(os.path.basename(__file__))[0]
|
||||
path = basename + '.ini'
|
||||
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
|
||||
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
|
||||
config = None
|
||||
settings = None
|
||||
try:
|
||||
|
@ -774,11 +790,11 @@ class AzureInventory(object):
|
|||
|
||||
def main():
|
||||
if not HAS_AZURE:
|
||||
sys.exit("The Azure python sdk is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
|
||||
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
|
||||
|
||||
if azure_compute_version < AZURE_MIN_VERSION:
|
||||
sys.exit("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
|
||||
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
|
||||
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
|
||||
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
|
||||
AzureInventory()
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
[collins]
|
||||
|
||||
# You should not have a trailing slash or collins
|
||||
# will not properly match the URI
|
||||
host = http://localhost:9000
|
||||
|
||||
username = blake
|
||||
|
|
|
@ -201,7 +201,8 @@ class CollinsInventory(object):
|
|||
response = open_url(query_url,
|
||||
timeout=self.collins_timeout_secs,
|
||||
url_username=self.collins_username,
|
||||
url_password=self.collins_password)
|
||||
url_password=self.collins_password,
|
||||
force_basic_auth=True)
|
||||
json_response = json.loads(response.read())
|
||||
# Adds any assets found to the array of assets.
|
||||
assets += json_response['data']['Data']
|
||||
|
|
|
@ -24,9 +24,16 @@ DOCUMENTATION = '''
|
|||
|
||||
Docker Inventory Script
|
||||
=======================
|
||||
Generates dynamic inventory by making API requests to one or more Docker daemons. Communicates with the API
|
||||
by way of docker-py (https://docker-py.readthedocs.org/en/stable/). So before running the script, you will need to
|
||||
install docker-py:
|
||||
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
|
||||
because the inventory is generated at run-time rather than being read from a static file. The script generates the
|
||||
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
|
||||
script contacts can be defined using environment variables or a configuration file.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Using the docker modules requires having docker-py <https://docker-py.readthedocs.org/en/stable/>
|
||||
installed on the host running Ansible. To install docker-py:
|
||||
|
||||
pip install docker-py
|
||||
|
||||
|
@ -197,126 +204,123 @@ When run in --list mode (the default), container instances are grouped by:
|
|||
Configuration:
|
||||
--------------
|
||||
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
|
||||
creating a docker.yml file (sample provided in ansible/contrib/inventory). The order of precedence is command
|
||||
line args, then the docker.yml file and finally environment variables.
|
||||
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
|
||||
is command line args, then the docker.yml file and finally environment variables.
|
||||
|
||||
Environment variables:
|
||||
;;;;;;;;;;;;;;;;;;;;;;
|
||||
......................
|
||||
|
||||
DOCKER_CONFIG_FILE
|
||||
description: path to docker inventory configuration file.
|
||||
default: ./docker.yml
|
||||
To connect to a single Docker API the following variables can be defined in the environment to control the connection
|
||||
options. These are the same environment variables used by the Docker modules.
|
||||
|
||||
DOCKER_HOST
|
||||
description: Docker daemon URL or Unix socket path.
|
||||
default: unix://var/run/docker.sock
|
||||
DOCKER_HOST
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
|
||||
|
||||
DOCKER_TLS_HOSTNAME:
|
||||
description: When DOCKER_TLS_VERIFY is true, provide the expected name of the host.
|
||||
default: localhost
|
||||
DOCKER_API_VERSION:
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
|
||||
DOCKER_API_VERSION:
|
||||
description: Version of the Docker API the client will use.
|
||||
default: DEFAULT_DOCKER_API_VERSION as defined in docker-py
|
||||
DOCKER_TIMEOUT:
|
||||
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
|
||||
|
||||
DOCKER_CERT_PATH:
|
||||
description: Path to the directory containing the client certificate and key files.
|
||||
default: None
|
||||
DOCKER_TLS:
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
|
||||
Defaults to False.
|
||||
|
||||
DOCKER_SSL_VERSION:
|
||||
description: Version of TLS supported by Docker daemon.
|
||||
default: None
|
||||
DOCKER_TLS_VERIFY:
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
Default is False
|
||||
|
||||
DOCKER_TLS:
|
||||
description: Use TLS when sending requests to Docker daemon. Set to 1, 0, true, false, True, False, yes, no.
|
||||
default: False
|
||||
DOCKER_TLS_HOSTNAME:
|
||||
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
|
||||
to localhost.
|
||||
|
||||
DOCKER_TLS_VERIFY:
|
||||
description: Verify hostname found in TLS certs. Set to 1, 0, true, false, True, False, yes, no.
|
||||
default: False
|
||||
DOCKER_CERT_PATH:
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
DOCKER_TIMEOUT:
|
||||
description: Docker request timeout in seconds.
|
||||
default: Value of DOCKER_TIMEOUT as defined in docker-py
|
||||
DOCKER_SSL_VERSION:
|
||||
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
|
||||
was 1.0
|
||||
|
||||
DOCKER_PRIVATE_SSH_PORT:
|
||||
description: The private port (container port) on which SSH is listening for connections
|
||||
default: 22
|
||||
In addition to the connection variables there are a couple variables used to control the execution and output of the
|
||||
script:
|
||||
|
||||
DOCKER_DEFAULT_IP:
|
||||
description: This environment variable overrides the container SSH connection
|
||||
IP address (aka, 'ansible_ssh_host').
|
||||
DOCKER_CONFIG_FILE
|
||||
Path to the configuration file. Defaults to ./docker.yml.
|
||||
|
||||
This option allows one to override the ansible_ssh_host whenever Docker has exercised its default behavior of
|
||||
binding private ports to all interfaces of the Docker host. This behavior, when dealing with remote Docker hosts,
|
||||
does not allow Ansible to determine a proper host IP address on which to connect via SSH to containers. By
|
||||
default, this inventory module assumes all 0.0.0.0-exposed ports to be bound to localhost:<port>. To override
|
||||
this behavior, for example, to bind a container's SSH port to the public interface of its host, one must
|
||||
manually set this IP.
|
||||
DOCKER_PRIVATE_SSH_PORT:
|
||||
The private port (container port) on which SSH is listening for connections. Defaults to 22.
|
||||
|
||||
It is preferable to begin to launch Docker containers with ports exposed on publicly accessible IP addresses,
|
||||
particularly if the containers are to be targeted by Ansible for remote configuration, not accessible via
|
||||
localhost SSH connections. Docker containers can be explicitly exposed on IP addresses by
|
||||
a) starting the daemon with the --ip argument
|
||||
b) running containers with the -P/--publish ip::containerPort
|
||||
argument
|
||||
default: 127.0.0.1 if port exposed on 0.0.0.0
|
||||
DOCKER_DEFAULT_IP:
|
||||
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
|
||||
|
||||
docker.yml
|
||||
;;;;;;;;;;;;;;;;;;;;
|
||||
Configuration File
|
||||
..................
|
||||
|
||||
A sample docker.yml file is included in the ansible/contrib/inventory. Using this file is not required. If
|
||||
the file is not found, environment variables will be used.
|
||||
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
|
||||
|
||||
The default name of the file is derived from the name of the inventory script. By default the script will look for
|
||||
basename of the script (i.e. docker) with an extension of '.yml'. You can override the default name by passing a
|
||||
command line argument or setting DOCKER_CONFIG_FILE in the environment.
|
||||
basename of the script (i.e. docker) with an extension of '.yml'.
|
||||
|
||||
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
|
||||
|
||||
Here's what you can define in docker_inventory.yml:
|
||||
|
||||
* defaults: Defines a default connnection. Defaults will be taken from this and applied to any values not provided
|
||||
for a host defined in the hosts list.
|
||||
defaults
|
||||
Defines a default connection. Defaults will be taken from this and applied to any values not provided
|
||||
for a host defined in the hosts list.
|
||||
|
||||
* hosts: If you wish to get inventory from more than one Docker daemon hosts, define a hosts list.
|
||||
hosts
|
||||
If you wish to get inventory from more than one Docker host, define a hosts list.
|
||||
|
||||
For a host defined in defaults or hosts, you can provided the following attributes. The only required attribute is host.
|
||||
For the default host and each host in the hosts list define the following attributes:
|
||||
|
||||
host:
|
||||
description: The URL or Unix socket path for the host.
|
||||
description: The URL or Unix socket path used to connect to the Docker API.
|
||||
required: yes
|
||||
|
||||
tls:
|
||||
description: Connect using https://
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
default: false
|
||||
required: false
|
||||
|
||||
tls_verify:
|
||||
description: Connect using https:// and verify the host name matches the host name found in the certificate.
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
default: false
|
||||
required: false
|
||||
|
||||
cert_path:
|
||||
description: Path to the host's certificate .pem file.
|
||||
description: Path to the client's TLS certificate file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
cacert_path:
|
||||
description: Path to the host's Certificate Authority .pem file.
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
key_path:
|
||||
description: Path to the host's encryption key .pem file
|
||||
description: Path to the client's TLS key file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
version:
|
||||
description: The API version.
|
||||
description: The Docker API version.
|
||||
required: false
|
||||
default: will be supplied by the docker-py module.
|
||||
|
||||
timeout:
|
||||
description: The amount of time in seconds to wait on an API response.
|
||||
required: false
|
||||
default: will be supplied by the docker-py module.
|
||||
default: 60
|
||||
|
||||
default_ip:
|
||||
description: The IP address to assign to ansilbe_host when the container's SSH port is mappped to 0.0.0.0
|
||||
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
|
||||
'0.0.0.0'.
|
||||
required: false
|
||||
default: 1267.0.0.1
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description: The port containers use for SSH
|
||||
required: false
|
||||
|
@ -324,28 +328,25 @@ For a host defined in defaults or hosts, you can provided the following attribut
|
|||
|
||||
Examples
|
||||
--------
|
||||
# Run the script with Env vars (for when you have Docker toolbox installed)
|
||||
./docker_inventory.py --pretty
|
||||
|
||||
# Connect to docker instance on localhost port 4243
|
||||
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
|
||||
# Connect to the Docker API on localhost port 4243 and format the JSON output
|
||||
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
|
||||
|
||||
# Any container's ssh port exposed on 0.0.0.0 will mapped to
|
||||
#another IP address (where Ansible will attempt to connect via SSH)
|
||||
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
|
||||
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
|
||||
# another IP address (where Ansible will attempt to connect via SSH)
|
||||
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
|
||||
|
||||
# Run as input to a playbook:
|
||||
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker_inventory.py docker_inventory_test.yml
|
||||
# Run as input to a playbook:
|
||||
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
|
||||
|
||||
# Simple playbook to invoke with the above example:
|
||||
# Simple playbook to invoke with the above example:
|
||||
|
||||
- name: Test docker_inventory
|
||||
hosts: all
|
||||
connection: local
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- debug: msg="Container - {{ inventory_hostname }}"
|
||||
|
||||
- debug: msg="Container - {{ inventory_hostname }}"
|
||||
|
||||
'''
|
||||
|
||||
|
|
|
@ -1,52 +1,65 @@
|
|||
# This is the configuration file for the Docker inventory script: docker_inventory.py.
|
||||
#
|
||||
# defaults: Defines a default connnection. Defaults will be taken from this and applied to any values not provided
|
||||
# for a host defined in the hosts list.
|
||||
# You can define the following in this file:
|
||||
#
|
||||
# hosts: If you wish to get inventory from more than one Docker daemon hosts, define a hosts list.
|
||||
#
|
||||
# For a host defined in defaults or hosts, you can provided the following attributes. The only required attribute is host.
|
||||
# defaults
|
||||
# Defines a default connection. Defaults will be taken from this and applied to any values not provided
|
||||
# for a host defined in the hosts list.
|
||||
#
|
||||
# host:
|
||||
# description: The URL or Unix socket path for the host.
|
||||
# required: yes
|
||||
# tls:
|
||||
# description: Connect using https://
|
||||
# default: false
|
||||
# required: false
|
||||
# tls_verify:
|
||||
# description: Connect using https:// and verify the host name matches the host name found in the certificate.
|
||||
# default: false
|
||||
# required: false
|
||||
# cert_path:
|
||||
# description: Path to the client's certificate .pem file.
|
||||
# default: null
|
||||
# required: false
|
||||
# cacert_path:
|
||||
# description: Path to the client's Certificate Authority .pem file.
|
||||
# default: null
|
||||
# required: false
|
||||
# key_path:
|
||||
# description: Path to the client's encryption key .pem file
|
||||
# default: null
|
||||
# required: false
|
||||
# version:
|
||||
# description: The API version the client will use.
|
||||
# required: false
|
||||
# default: will be supplied by the docker-py module.
|
||||
# timeout:
|
||||
# description: The amount of time in seconds to wait on an API response.
|
||||
# required: false
|
||||
# default: will be supplied by the docker-py module.
|
||||
# default_ip:
|
||||
# description: The IP address to assign to ansilbe_host when the container's SSH port is mappped to 0.0.0.0
|
||||
# required: false
|
||||
# default: 1267.0.0.1
|
||||
# private_ssh_port:
|
||||
# description: The port containers use for SSH
|
||||
# required: false
|
||||
# default: 22
|
||||
#
|
||||
# hosts
|
||||
# If you wish to get inventory from more than one Docker host, define a hosts list.
|
||||
#
|
||||
# For the default host and each host in the hosts list define the following attributes:
|
||||
#
|
||||
# host:
|
||||
# description: The URL or Unix socket path used to connect to the Docker API.
|
||||
# required: yes
|
||||
#
|
||||
# tls:
|
||||
# description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
# default: false
|
||||
# required: false
|
||||
#
|
||||
# tls_verify:
|
||||
# description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
# default: false
|
||||
# required: false
|
||||
#
|
||||
# cert_path:
|
||||
# description: Path to the client's TLS certificate file.
|
||||
# default: null
|
||||
# required: false
|
||||
#
|
||||
# cacert_path:
|
||||
# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
# default: null
|
||||
# required: false
|
||||
#
|
||||
# key_path:
|
||||
# description: Path to the client's TLS key file.
|
||||
# default: null
|
||||
# required: false
|
||||
#
|
||||
# version:
|
||||
# description: The Docker API version.
|
||||
# required: false
|
||||
# default: will be supplied by the docker-py module.
|
||||
#
|
||||
# timeout:
|
||||
# description: The amount of time in seconds to wait on an API response.
|
||||
# required: false
|
||||
# default: 60
|
||||
#
|
||||
# default_ip:
|
||||
# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
|
||||
# '0.0.0.0'.
|
||||
# required: false
|
||||
# default: 127.0.0.1
|
||||
#
|
||||
# private_ssh_port:
|
||||
# description: The port containers use for SSH
|
||||
# required: false
|
||||
# default: 22
|
||||
|
||||
#defaults:
|
||||
# host: unix:///var/run/docker.sock
|
||||
|
|
|
@ -45,3 +45,11 @@ gce_service_account_email_address =
|
|||
gce_service_account_pem_file_path =
|
||||
gce_project_id =
|
||||
|
||||
[inventory]
|
||||
# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
|
||||
# contain the instance internal or external address. Values may be either
|
||||
# 'internal' or 'external'. If 'external' is specified but no external instance
|
||||
# address exists, the internal address will be used.
|
||||
# The INVENTORY_IP_TYPE environment variable will override this value.
|
||||
inventory_ip_type =
|
||||
|
||||
|
|
|
@ -69,7 +69,8 @@ Examples:
|
|||
$ contrib/inventory/gce.py --host my_instance
|
||||
|
||||
Author: Eric Johnson <erjohnso@google.com>
|
||||
Version: 0.0.1
|
||||
Contributors: Matt Hite <mhite@hotmail.com>
|
||||
Version: 0.0.2
|
||||
'''
|
||||
|
||||
__requires__ = ['pycrypto>=2.6']
|
||||
|
@ -83,7 +84,7 @@ except ImportError:
|
|||
pass
|
||||
|
||||
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
|
||||
USER_AGENT_VERSION="v1"
|
||||
USER_AGENT_VERSION="v2"
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
@ -111,7 +112,11 @@ class GceInventory(object):
|
|||
def __init__(self):
|
||||
# Read settings and parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
self.config = self.get_config()
|
||||
self.driver = self.get_gce_driver()
|
||||
self.ip_type = self.get_inventory_options()
|
||||
if self.ip_type:
|
||||
self.ip_type = self.ip_type.lower()
|
||||
|
||||
# Just display data for specific host
|
||||
if self.args.host:
|
||||
|
@ -125,9 +130,13 @@ class GceInventory(object):
|
|||
pretty=self.args.pretty))
|
||||
sys.exit(0)
|
||||
|
||||
def get_gce_driver(self):
|
||||
"""Determine the GCE authorization settings and return a
|
||||
libcloud driver.
|
||||
def get_config(self):
|
||||
"""
|
||||
Populates a SafeConfigParser object with defaults and
|
||||
attempts to read an .ini-style configuration from the filename
|
||||
specified in GCE_INI_PATH. If the environment variable is
|
||||
not present, the filename defaults to gce.ini in the current
|
||||
working directory.
|
||||
"""
|
||||
gce_ini_default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
|
||||
|
@ -142,14 +151,32 @@ class GceInventory(object):
|
|||
'gce_service_account_pem_file_path': '',
|
||||
'gce_project_id': '',
|
||||
'libcloud_secrets': '',
|
||||
'inventory_ip_type': '',
|
||||
})
|
||||
if 'gce' not in config.sections():
|
||||
config.add_section('gce')
|
||||
config.read(gce_ini_path)
|
||||
if 'inventory' not in config.sections():
|
||||
config.add_section('inventory')
|
||||
|
||||
config.read(gce_ini_path)
|
||||
return config
|
||||
|
||||
def get_inventory_options(self):
|
||||
"""Determine inventory options. Environment variables always
|
||||
take precedence over configuration files."""
|
||||
ip_type = self.config.get('inventory', 'inventory_ip_type')
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration
|
||||
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
|
||||
return ip_type
|
||||
|
||||
def get_gce_driver(self):
|
||||
"""Determine the GCE authorization settings and return a
|
||||
libcloud driver.
|
||||
"""
|
||||
# Attempt to get GCE params from a configuration file, if one
|
||||
# exists.
|
||||
secrets_path = config.get('gce', 'libcloud_secrets')
|
||||
secrets_path = self.config.get('gce', 'libcloud_secrets')
|
||||
secrets_found = False
|
||||
try:
|
||||
import secrets
|
||||
|
@ -175,10 +202,10 @@ class GceInventory(object):
|
|||
pass
|
||||
if not secrets_found:
|
||||
args = [
|
||||
config.get('gce','gce_service_account_email_address'),
|
||||
config.get('gce','gce_service_account_pem_file_path')
|
||||
self.config.get('gce','gce_service_account_email_address'),
|
||||
self.config.get('gce','gce_service_account_pem_file_path')
|
||||
]
|
||||
kwargs = {'project': config.get('gce', 'gce_project_id')}
|
||||
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
|
||||
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration; process those into our args and kwargs.
|
||||
|
@ -218,6 +245,12 @@ class GceInventory(object):
|
|||
md[entry['key']] = entry['value']
|
||||
|
||||
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
# default to exernal IP unless user has specified they prefer internal
|
||||
if self.ip_type == 'internal':
|
||||
ssh_host = inst.private_ips[0]
|
||||
else:
|
||||
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
|
||||
|
||||
return {
|
||||
'gce_uuid': inst.uuid,
|
||||
'gce_id': inst.id,
|
||||
|
@ -233,7 +266,7 @@ class GceInventory(object):
|
|||
'gce_metadata': md,
|
||||
'gce_network': net,
|
||||
# Hosts don't have a public name, so we add an IP
|
||||
'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
|
||||
'ansible_ssh_host': ssh_host
|
||||
}
|
||||
|
||||
def get_instance(self, instance_name):
|
||||
|
|
|
@ -63,7 +63,7 @@ CREATE
|
|||
The *create* sub-command is used to initialize a new encrypted file.
|
||||
|
||||
After providing a password, the tool will launch whatever editor you have defined
|
||||
with $EDITOR, and defaults to vim. Once you are done with the editor session, the
|
||||
with $EDITOR, and defaults to vi. Once you are done with the editor session, the
|
||||
file will be saved as encrypted data.
|
||||
|
||||
The default cipher is AES (which is shared-secret based).
|
||||
|
|
|
@ -42,7 +42,7 @@ The 'ARGUMENTS' to pass to the module.
|
|||
Use privilege escalation (specific one depends on become_method),
|
||||
this does not imply prompting for passwords.
|
||||
|
||||
*K*, *--ask-become-pass*::
|
||||
*-K*, *--ask-become-pass*::
|
||||
|
||||
Ask for privilege escalation password.
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ Tower Support Questions
|
|||
|
||||
Ansible `Tower <http://ansible.com/tower>`_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc.
|
||||
|
||||
If you have a question about tower, email `support@ansible.com <mailto:support@ansible.com>`_ rather than using the IRC
|
||||
If you have a question about Tower, visit `support.ansible.com <https://support.ansible.com/>`_ rather than using the IRC
|
||||
channel or the general project mailing list.
|
||||
|
||||
IRC Channel
|
||||
|
|
|
@ -8,12 +8,12 @@ Requirements
|
|||
------------
|
||||
|
||||
Using the Azure Resource Manager modules requires having `Azure Python SDK <https://github.com/Azure/azure-sdk-for-python>`_
|
||||
installed on the host running Ansible. You will need to have >= v2.0.0RC2 installed. The simplest way to install the
|
||||
installed on the host running Ansible. You will need to have == v2.0.0RC5 installed. The simplest way to install the
|
||||
SDK is via pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install azure==2.0.0rc2
|
||||
$ pip install "azure==2.0.0rc5"
|
||||
|
||||
|
||||
Authenticating with Azure
|
||||
|
@ -320,6 +320,10 @@ Select hosts for specific tag key by assigning a comma separated list of tag key
|
|||
|
||||
* AZURE_TAGS=key1,key2,key3
|
||||
|
||||
Select hosts for specific locations by assigning a comma separated list of locations to:
|
||||
|
||||
* AZURE_LOCATIONS=eastus,eastus2,westus
|
||||
|
||||
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
|
||||
|
||||
* AZURE_TAGS=key1:value1,key2:value2
|
||||
|
@ -340,6 +344,9 @@ file will contain the following:
|
|||
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
|
||||
#tags=
|
||||
|
||||
# Control which locations are included. Set locations to a comma separated list of locations.
|
||||
#locations=
|
||||
|
||||
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
|
||||
# Valid values: yes, no, true, false, True, False, 0, 1.
|
||||
include_powerstate=yes
|
||||
|
|
317
docsite/rst/guide_docker.rst
Normal file
317
docsite/rst/guide_docker.rst
Normal file
|
@ -0,0 +1,317 @@
|
|||
Getting Started with Docker
|
||||
===========================
|
||||
|
||||
Ansible offers the following modules for orchestrating Docker containers:
|
||||
|
||||
docker_service
|
||||
Use your existing Docker compose files to orchestrate containers on a single Docker daemon or on
|
||||
Swarm. Supports compose versions 1 and 2.
|
||||
|
||||
docker_container
|
||||
Manages the container lifecycle by providing the ability to create, update, stop, start and destroy a
|
||||
container.
|
||||
|
||||
docker_image
|
||||
Provides full control over images, including: build, pull, push, tag and remove.
|
||||
|
||||
docker_image_facts
|
||||
Inspects one or more images in the Docker host's image cache, providing the information as facts for making
|
||||
decision or assertions in a playbook.
|
||||
|
||||
docker_login
|
||||
Authenticates with Docker Hub or any Docker registry and updates the Docker Engine config file, which
|
||||
in turn provides password-free pushing and pulling of images to and from the registry.
|
||||
|
||||
docker (dynamic inventory)
|
||||
Dynamically builds an inventory of all the available containers from a set of one or more Docker hosts.
|
||||
|
||||
|
||||
Ansible 2.1.0 includes major updates to the Docker modules, marking the start of a project to create a complete and
|
||||
integrated set of tools for orchestrating containers. In addition to the above modules, we are also working on the
|
||||
following:
|
||||
|
||||
Still using Dockerfile to build images? Check out `ansible-container <https://github.com/ansible/ansible-container>`_,
|
||||
and start building images from your Ansible playbooks.
|
||||
|
||||
Use the *shipit* command in `ansible-container <https://github.com/ansible/ansible-container>`_
|
||||
to launch your docker-compose file on `OpenShift <https://www.openshift.org/>`_. Go from an app on your laptop to a fully
|
||||
scalable app in the cloud in just a few moments.
|
||||
|
||||
There's more planned. See the latest ideas and thinking at the `Ansible proposal repo <https://github.com/ansible/proposals/tree/master/docker>`_.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Using the docker modules requires having `docker-py <https://docker-py.readthedocs.org/en/stable/>`_
|
||||
installed on the host running Ansible. You will need to have >= 1.7.0 installed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker-py>=1.7.0'
|
||||
|
||||
The docker_service module also requires `docker-compose <https://github.com/docker/compose>`_
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker-compose>=1.7.0'
|
||||
|
||||
|
||||
Connecting to the Docker API
|
||||
----------------------------
|
||||
|
||||
You can connect to a local or remote API using parameters passed to each task or by setting environment variables.
|
||||
The order of precedence is command line parameters and then environment variables. If neither a command line
|
||||
option or an environment variable is found, a default value will be used. The default values are provided under
|
||||
`Parameters`_
|
||||
|
||||
|
||||
Parameters
|
||||
..........
|
||||
|
||||
Control how modules connect to the Docker API by passing the following parameters:
|
||||
|
||||
docker_host
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``.
|
||||
To connect to a remote host, provide the TCP connection string. For example: ``tcp://192.168.99.100:2376``. If
|
||||
TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the
|
||||
connection URL with 'https'.
|
||||
|
||||
api_version
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
|
||||
timeout
|
||||
The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
|
||||
|
||||
tls
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
|
||||
Defaults to False.
|
||||
|
||||
tls_verify
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
Default is False.
|
||||
|
||||
cacert_path
|
||||
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
|
||||
cert_path
|
||||
Path to the client's TLS certificate file.
|
||||
|
||||
key_path
|
||||
Path to the client's TLS key file.
|
||||
|
||||
tls_hostname
|
||||
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
|
||||
to 'localhost'.
|
||||
|
||||
ssl_version
|
||||
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
|
||||
was 1.0
|
||||
|
||||
|
||||
Environment Variables
|
||||
.....................
|
||||
|
||||
Control how the modules connect to the Docker API by setting the following variables in the environment of the host
|
||||
running Ansible:
|
||||
|
||||
DOCKER_HOST
|
||||
The URL or Unix socket path used to connect to the Docker API.
|
||||
|
||||
DOCKER_API_VERSION
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
|
||||
DOCKER_TIMEOUT
|
||||
The maximum amount of time in seconds to wait on a response from the API.
|
||||
|
||||
DOCKER_CERT_PATH
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
DOCKER_SSL_VERSION
|
||||
Provide a valid SSL version number.
|
||||
|
||||
DOCKER_TLS
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||
|
||||
DOCKER_TLS_VERIFY
|
||||
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||
|
||||
|
||||
Dynamic Inventory Script
|
||||
------------------------
|
||||
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
|
||||
because the inventory is generated at run-time rather than being read from a static file. The script generates the
|
||||
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
|
||||
script contacts can be defined using environment variables or a configuration file.
|
||||
|
||||
Groups
|
||||
......
|
||||
The script will create the following host groups:
|
||||
|
||||
- container id
|
||||
- container name
|
||||
- container short id
|
||||
- image_name (image_<image name>)
|
||||
- docker_host
|
||||
- running
|
||||
- stopped
|
||||
|
||||
Examples
|
||||
........
|
||||
|
||||
You can run the script interactively from the command line or pass it as the inventory to a playbook. Here are few
|
||||
examples to get you started:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Connect to the Docker API on localhost port 4243 and format the JSON output
|
||||
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
|
||||
|
||||
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
|
||||
# another IP address (where Ansible will attempt to connect via SSH)
|
||||
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
|
||||
|
||||
# Run as input to a playbook:
|
||||
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
|
||||
|
||||
# Simple playbook to invoke with the above example:
|
||||
|
||||
- name: Test docker_inventory
|
||||
hosts: all
|
||||
connection: local
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- debug: msg="Container - {{ inventory_hostname }}"
|
||||
|
||||
Configuration
|
||||
.............
|
||||
You can control the behavior of the inventory script by defining environment variables, or
|
||||
creating a docker.yml file (sample provided in ansible/contrib/inventory). The order of precedence is the docker.yml
|
||||
file and then environment variables.
|
||||
|
||||
|
||||
Environment Variables
|
||||
;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
To connect to a single Docker API the following variables can be defined in the environment to control the connection
|
||||
options. These are the same environment variables used by the Docker modules.
|
||||
|
||||
DOCKER_HOST
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
|
||||
|
||||
DOCKER_API_VERSION:
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
|
||||
DOCKER_TIMEOUT:
|
||||
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
|
||||
|
||||
DOCKER_TLS:
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
|
||||
Defaults to False.
|
||||
|
||||
DOCKER_TLS_VERIFY:
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
Default is False
|
||||
|
||||
DOCKER_TLS_HOSTNAME:
|
||||
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
|
||||
to localhost.
|
||||
|
||||
DOCKER_CERT_PATH:
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
DOCKER_SSL_VERSION:
|
||||
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
|
||||
was 1.0
|
||||
|
||||
In addition to the connection variables there are a couple variables used to control the execution and output of the
|
||||
script:
|
||||
|
||||
DOCKER_CONFIG_FILE
|
||||
Path to the configuration file. Defaults to ./docker.yml.
|
||||
|
||||
DOCKER_PRIVATE_SSH_PORT:
|
||||
The private port (container port) on which SSH is listening for connections. Defaults to 22.
|
||||
|
||||
DOCKER_DEFAULT_IP:
|
||||
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
|
||||
|
||||
Configuration File
|
||||
;;;;;;;;;;;;;;;;;;
|
||||
|
||||
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
|
||||
|
||||
The default name of the file is derived from the name of the inventory script. By default the script will look for
|
||||
basename of the script (i.e. docker) with an extension of '.yml'.
|
||||
|
||||
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
|
||||
|
||||
Here's what you can define in docker_inventory.yml:
|
||||
|
||||
defaults
|
||||
Defines a default connection. Defaults will be taken from this and applied to any values not provided
|
||||
for a host defined in the hosts list.
|
||||
|
||||
hosts
|
||||
If you wish to get inventory from more than one Docker host, define a hosts list.
|
||||
|
||||
For the default host and each host in the hosts list define the following attributes:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
host:
|
||||
description: The URL or Unix socket path used to connect to the Docker API.
|
||||
required: yes
|
||||
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
default: false
|
||||
required: false
|
||||
|
||||
tls_verify:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
default: false
|
||||
required: false
|
||||
|
||||
cert_path:
|
||||
description: Path to the client's TLS certificate file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
cacert_path:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
key_path:
|
||||
description: Path to the client's TLS key file.
|
||||
default: null
|
||||
required: false
|
||||
|
||||
version:
|
||||
description: The Docker API version.
|
||||
required: false
|
||||
default: will be supplied by the docker-py module.
|
||||
|
||||
timeout:
|
||||
description: The amount of time in seconds to wait on an API response.
|
||||
required: false
|
||||
default: 60
|
||||
|
||||
default_ip:
|
||||
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
|
||||
'0.0.0.0'.
|
||||
required: false
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description: The port containers use for SSH
|
||||
required: false
|
||||
default: 22
|
||||
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ Introduction
|
|||
Ansible contains modules for managing Google Compute Engine resources, including creating instances, controlling network access, working with persistent disks, and managing
|
||||
load balancers. Additionally, there is an inventory plugin that can automatically suck down all of your GCE instances into Ansible dynamic inventory, and create groups by tag and other properties.
|
||||
|
||||
The GCE modules all require the apache-libcloud module, which you can install from pip:
|
||||
The GCE modules all require the apache-libcloud module which you can install from pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -22,16 +22,19 @@ The GCE modules all require the apache-libcloud module, which you can install fr
|
|||
Credentials
|
||||
-----------
|
||||
|
||||
To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console <https://console.developers.google.com/>`_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded (you must click **Generate new P12 Key**) the generated private key (in the `pkcs12 format <http://en.wikipedia.org/wiki/PKCS_12>`_), you'll need to convert the key by running the following command:
|
||||
To work with the GCE modules, you'll first need to get some credentials in the
|
||||
JSON format:
|
||||
|
||||
.. code-block:: bash
|
||||
1. `Create a Service Account <https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount>`_
|
||||
2. `Download JSON credentials <https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts>`_
|
||||
|
||||
$ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out pkey.pem
|
||||
There are three different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions:
|
||||
|
||||
There are two different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions:
|
||||
.. note:: If you would like to use JSON credentials you must have libcloud >= 0.17.0
|
||||
|
||||
* by providing to the modules directly
|
||||
* by populating a ``secrets.py`` file
|
||||
* by setting environment variables
|
||||
|
||||
Calling Modules By Passing Credentials
|
||||
``````````````````````````````````````
|
||||
|
@ -39,7 +42,7 @@ Calling Modules By Passing Credentials
|
|||
For the GCE modules you can specify the credentials as arguments:
|
||||
|
||||
* ``service_account_email``: email associated with the project
|
||||
* ``pem_file``: path to the pem file
|
||||
* ``credentials_file``: path to the JSON credentials file
|
||||
* ``project_id``: id of the project
|
||||
|
||||
For example, to create a new instance using the cloud module, you can use the following configuration:
|
||||
|
@ -48,12 +51,12 @@ For example, to create a new instance using the cloud module, you can use the fo
|
|||
|
||||
- name: Create instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
connection: local
|
||||
gather_facts: no
|
||||
|
||||
vars:
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
credentials_file: /path/to/project.json
|
||||
project_id: project-id
|
||||
machine_type: n1-standard-1
|
||||
image: debian-7
|
||||
|
@ -61,28 +64,50 @@ For example, to create a new instance using the cloud module, you can use the fo
|
|||
tasks:
|
||||
|
||||
- name: Launch instances
|
||||
gce:
|
||||
instance_names: dev
|
||||
gce:
|
||||
instance_names: dev
|
||||
machine_type: "{{ machine_type }}"
|
||||
image: "{{ image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
Calling Modules with secrets.py
|
||||
```````````````````````````````
|
||||
When running Ansible inside a GCE VM you can use the service account credentials from the local metadata server by
|
||||
setting both ``service_account_email`` and ``credentials_file`` to a blank string.
|
||||
|
||||
Configuring Modules with secrets.py
|
||||
```````````````````````````````````
|
||||
|
||||
Create a file ``secrets.py`` looking like following, and put it in some folder which is in your ``$PYTHONPATH``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem')
|
||||
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.json')
|
||||
GCE_KEYWORD_PARAMS = {'project': 'project_id'}
|
||||
|
||||
Ensure to enter the email address from the created services account and not the one from your main account.
|
||||
|
||||
Now the modules can be used as above, but the account information can be omitted.
|
||||
|
||||
If you are running Ansible from inside a GCE VM with an authorized service account you can set the email address and
|
||||
credentials path as follows so that get automatically picked up:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
GCE_PARAMS = ('', '')
|
||||
GCE_KEYWORD_PARAMS = {'project': 'project_id'}
|
||||
|
||||
Configuring Modules with Environment Variables
|
||||
``````````````````````````````````````````````
|
||||
|
||||
Set the following environment variables before running Ansible in order to configure your credentials:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
GCE_EMAIL
|
||||
GCE_PROJECT
|
||||
GCE_CREDENTIALS_FILE_PATH
|
||||
|
||||
GCE Dynamic Inventory
|
||||
---------------------
|
||||
|
||||
|
@ -171,7 +196,7 @@ A playbook would looks like this:
|
|||
machine_type: n1-standard-1 # default
|
||||
image: debian-7
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
credentials_file: /path/to/project.json
|
||||
project_id: project-id
|
||||
|
||||
tasks:
|
||||
|
@ -181,7 +206,7 @@ A playbook would looks like this:
|
|||
machine_type: "{{ machine_type }}"
|
||||
image: "{{ image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
tags: webserver
|
||||
register: gce
|
||||
|
@ -224,7 +249,7 @@ a basic example of what is possible::
|
|||
machine_type: n1-standard-1 # default
|
||||
image: debian-7
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
credentials_file: /path/to/project.json
|
||||
project_id: project-id
|
||||
|
||||
roles:
|
||||
|
@ -238,13 +263,12 @@ a basic example of what is possible::
|
|||
args:
|
||||
fwname: "all-http"
|
||||
name: "default"
|
||||
allowed: "tcp:80"
|
||||
state: "present"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
allowed: "tcp:80"
|
||||
state: "present"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
credentials_file: "{{ credentials_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
By pointing your browser to the IP of the server, you should see a page welcoming you.
|
||||
|
||||
Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions!
|
||||
|
||||
Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions!
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Detailed Guides
|
||||
```````````````
|
||||
|
||||
This section is new and evolving. The idea here is explore particular use cases in greater depth and provide a more "top down" explanation of some basic features.
|
||||
This section is new and evolving. The idea here is to explore particular use cases in greater depth and provide a more "top down" explanation of some basic features.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
@ -13,5 +13,6 @@ This section is new and evolving. The idea here is explore particular use cases
|
|||
guide_cloudstack
|
||||
guide_vagrant
|
||||
guide_rolling_upgrade
|
||||
guide_docker
|
||||
|
||||
Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/DigitalOcean, Continuous Deployment, and more.
|
||||
|
|
|
@ -74,6 +74,34 @@ different locations::
|
|||
|
||||
Most users will not need to use this feature. See :doc:`developing_plugins` for more details.
|
||||
|
||||
|
||||
.. _allow_unsafe_lookups:
|
||||
|
||||
allow_unsafe_lookups
|
||||
====================
|
||||
|
||||
.. versionadded:: 2.2.3, 2.3.1
|
||||
|
||||
When enabled, this option allows lookup plugins (whether used in variables as `{{lookup('foo')}}` or as a loop as `with_foo`) to return data that is **not** marked "unsafe". By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk.
|
||||
|
||||
This option is provided to allow for backwards-compatibility, however users should first consider adding `allow_unsafe=True` to any lookups which may be expected to contain data which may be run through the templating engine later. For example::
|
||||
|
||||
{{lookup('pipe', '/path/to/some/command', allow_unsafe=True)}}
|
||||
|
||||
|
||||
.. _allow_world_readable_tmpfiles:
|
||||
|
||||
allow_world_readable_tmpfiles
|
||||
=============================
|
||||
|
||||
.. versionadded:: 2.1
|
||||
This makes the temporary files created on the machine to be world readable and will issue a warning instead of failing the task.
|
||||
|
||||
It is useful when becoming an unprivileged user::
|
||||
|
||||
allow_world_readable_tmpfiles=True
|
||||
|
||||
|
||||
.. _ansible_managed:
|
||||
|
||||
ansible_managed
|
||||
|
@ -149,7 +177,7 @@ Callbacks are pieces of code in ansible that get called on specific events, perm
|
|||
This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from
|
||||
different locations::
|
||||
|
||||
callback_plugins = ~/.ansible/plugins/callback_plugins/:/usr/share/ansible_plugins/callback_plugins
|
||||
callback_plugins = ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
|
||||
|
||||
Most users will not need to use this feature. See :doc:`developing_plugins` for more details
|
||||
|
||||
|
|
|
@ -801,11 +801,11 @@ In 2.x, we have made the order of precedence more specific (with the last listed
|
|||
* playbook group_vars
|
||||
* playbook host_vars
|
||||
* host facts
|
||||
* registered vars
|
||||
* set_facts
|
||||
* play vars
|
||||
* play vars_prompt
|
||||
* play vars_files
|
||||
* registered vars
|
||||
* set_facts
|
||||
* role and include vars
|
||||
* block vars (only for tasks in block)
|
||||
* task vars (only for the task)
|
||||
|
|
|
@ -29,7 +29,7 @@ To create a new encrypted data file, run the following command::
|
|||
|
||||
First you will be prompted for a password. The password used with vault currently must be the same for all files you wish to use together at the same time.
|
||||
|
||||
After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vim. Once you are done with the editor session, the file will be saved as encrypted data.
|
||||
After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vi (before 2.1 the default was vim). Once you are done with the editor session, the file will be saved as encrypted data.
|
||||
|
||||
The default cipher is AES (which is shared-secret based).
|
||||
|
||||
|
|
|
@ -252,6 +252,12 @@
|
|||
# set to 0 for unlimited (RAM may suffer!).
|
||||
#max_diff_size = 1048576
|
||||
|
||||
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
|
||||
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
|
||||
# jinja2 templating language which will be run through the templating engine.
|
||||
# ENABLING THIS COULD BE A SECURITY RISK
|
||||
#allow_unsafe_lookups = False
|
||||
|
||||
[privilege_escalation]
|
||||
#become=True
|
||||
#become_method=sudo
|
||||
|
@ -331,7 +337,7 @@
|
|||
#libvirt_lxc_noseclabel = yes
|
||||
|
||||
[colors]
|
||||
#higlight = white
|
||||
#highlight = white
|
||||
#verbose = blue
|
||||
#warn = bright purple
|
||||
#error = red
|
||||
|
|
|
@ -8,6 +8,15 @@ set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
|
|||
set PREFIX_PATH $ANSIBLE_HOME/bin
|
||||
set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
|
||||
|
||||
# set quiet flag
|
||||
if set -q argv
|
||||
switch $argv
|
||||
case '-q' '--quiet'
|
||||
set QUIET "true"
|
||||
case '*'
|
||||
end
|
||||
end
|
||||
|
||||
# Set PYTHONPATH
|
||||
if not set -q PYTHONPATH
|
||||
set -gx PYTHONPATH $PREFIX_PYTHONPATH
|
||||
|
@ -15,7 +24,9 @@ else
|
|||
switch PYTHONPATH
|
||||
case "$PREFIX_PYTHONPATH*"
|
||||
case "*"
|
||||
echo "Appending PYTHONPATH"
|
||||
if not [ $QUIET ]
|
||||
echo "Appending PYTHONPATH"
|
||||
end
|
||||
set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH"
|
||||
end
|
||||
end
|
||||
|
@ -38,7 +49,11 @@ set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
|
|||
|
||||
# Generate egg_info so that pkg_resources works
|
||||
pushd $ANSIBLE_HOME
|
||||
python setup.py egg_info
|
||||
if [ $QUIET ]
|
||||
python setup.py -q egg_info
|
||||
else
|
||||
python setup.py egg_info
|
||||
end
|
||||
if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
|
||||
end
|
||||
|
@ -47,22 +62,19 @@ find . -type f -name "*.pyc" -delete
|
|||
popd
|
||||
|
||||
|
||||
if set -q argv
|
||||
switch $argv
|
||||
case '-q' '--quiet'
|
||||
case '*'
|
||||
echo ""
|
||||
echo "Setting up Ansible to run out of checkout..."
|
||||
echo ""
|
||||
echo "PATH=$PATH"
|
||||
echo "PYTHONPATH=$PYTHONPATH"
|
||||
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
|
||||
echo "MANPATH=$MANPATH"
|
||||
echo ""
|
||||
|
||||
echo "Remember, you may wish to specify your host file with -i"
|
||||
echo ""
|
||||
echo "Done!"
|
||||
echo ""
|
||||
end
|
||||
if not [ $QUIET ]
|
||||
echo ""
|
||||
echo "Setting up Ansible to run out of checkout..."
|
||||
echo ""
|
||||
echo "PATH=$PATH"
|
||||
echo "PYTHONPATH=$PYTHONPATH"
|
||||
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
|
||||
echo "MANPATH=$MANPATH"
|
||||
echo ""
|
||||
echo "Remember, you may wish to specify your host file with -i"
|
||||
echo ""
|
||||
echo "Done!"
|
||||
echo ""
|
||||
end
|
||||
|
||||
set -e QUIET
|
||||
|
|
|
@ -175,7 +175,7 @@ def ziploader_setup(modfile, modname):
|
|||
print("* ziploader module detected; extracted module source to: %s" % debug_dir)
|
||||
return modfile, argsfile
|
||||
|
||||
def runtest(modstyle, modfile, argspath, modname, module_style):
|
||||
def runtest(modfile, argspath, modname, module_style):
|
||||
"""Test run a module, piping it's output for reporting."""
|
||||
if module_style == 'ziploader':
|
||||
modfile, argspath = ziploader_setup(modfile, modname)
|
||||
|
|
|
@ -19,5 +19,10 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
__version__ = '2.1.0'
|
||||
__author__ = 'Ansible, Inc.'
|
||||
# Note: Do not add any code to this file. The ansible module may be
|
||||
# a namespace package when using Ansible-2.1+ Anything in this file may not be
|
||||
# available if one of the other packages in the namespace is loaded first.
|
||||
#
|
||||
# This is for backwards compat. Code should be ported to get these from
|
||||
# ansible.release instead of from here.
|
||||
from ansible.release import __version__, __author__
|
||||
|
|
|
@ -30,7 +30,7 @@ import getpass
|
|||
import signal
|
||||
import subprocess
|
||||
|
||||
from ansible import __version__
|
||||
from ansible.release import __version__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
|
@ -90,7 +90,10 @@ class CLI(object):
|
|||
break
|
||||
|
||||
if not self.action:
|
||||
raise AnsibleOptionsError("Missing required action")
|
||||
# if no need for action if version/help
|
||||
tmp_options, tmp_args = self.parser.parse_args()
|
||||
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
|
||||
raise AnsibleOptionsError("Missing required action")
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
|
@ -476,7 +479,7 @@ class CLI(object):
|
|||
display.display(text)
|
||||
else:
|
||||
self.pager_pipe(text, os.environ['PAGER'])
|
||||
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
|
||||
elif subprocess.call('(less --version) &> /dev/null', shell = True) == 0:
|
||||
self.pager_pipe(text, 'less')
|
||||
else:
|
||||
display.display(text)
|
||||
|
|
|
@ -130,7 +130,7 @@ class AdHocCLI(CLI):
|
|||
variable_manager.set_inventory(inventory)
|
||||
|
||||
no_hosts = False
|
||||
if len(inventory.list_hosts(pattern)) == 0:
|
||||
if len(inventory.list_hosts()) == 0:
|
||||
# Empty inventory
|
||||
display.warning("provided hosts list is empty, only localhost is available")
|
||||
no_hosts = True
|
||||
|
@ -139,7 +139,7 @@ class AdHocCLI(CLI):
|
|||
hosts = inventory.list_hosts(pattern)
|
||||
if len(hosts) == 0 and no_hosts is False:
|
||||
# Invalid limit
|
||||
raise AnsibleError("Specified --limit does not match any hosts")
|
||||
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
|
||||
|
||||
if self.options.listhosts:
|
||||
display.display(' hosts (%d):' % len(hosts))
|
||||
|
|
|
@ -38,7 +38,7 @@ import sys
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.inventory import Inventory
|
||||
|
@ -301,7 +301,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
|
|||
def do_become(self, arg):
|
||||
"""Toggle whether plays run with become"""
|
||||
if arg:
|
||||
self.options.become_user = arg
|
||||
self.options.become = C.mk_boolean(arg)
|
||||
display.v("become changed to %s" % self.options.become)
|
||||
self.set_prompt()
|
||||
else:
|
||||
|
@ -419,13 +419,19 @@ class ConsoleCLI(CLI, cmd.Cmd):
|
|||
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory)
|
||||
self.variable_manager.set_inventory(self.inventory)
|
||||
|
||||
if len(self.inventory.list_hosts(self.pattern)) == 0:
|
||||
no_hosts = False
|
||||
if len(self.inventory.list_hosts()) == 0:
|
||||
# Empty inventory
|
||||
no_hosts = True
|
||||
display.warning("provided hosts list is empty, only localhost is available")
|
||||
|
||||
self.inventory.subset(self.options.subset)
|
||||
hosts = self.inventory.list_hosts(self.pattern)
|
||||
if len(hosts) == 0 and not no_hosts:
|
||||
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
|
||||
|
||||
self.groups = self.inventory.list_groups()
|
||||
self.hosts = [x.name for x in self.inventory.list_hosts(self.pattern)]
|
||||
self.hosts = [x.name for x in hosts]
|
||||
|
||||
# This hack is to work around readline issues on a mac:
|
||||
# http://stackoverflow.com/a/7116997/541202
|
||||
|
|
|
@ -51,7 +51,7 @@ class GalaxyCLI(CLI):
|
|||
|
||||
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
|
||||
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
|
||||
|
||||
|
||||
def __init__(self, args):
|
||||
self.api = None
|
||||
self.galaxy = None
|
||||
|
@ -65,95 +65,71 @@ class GalaxyCLI(CLI):
|
|||
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
|
||||
)
|
||||
|
||||
|
||||
self.set_action()
|
||||
|
||||
# options specific to actions
|
||||
# common
|
||||
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
|
||||
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
|
||||
|
||||
# specific to actions
|
||||
if self.action == "delete":
|
||||
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
|
||||
elif self.action == "import":
|
||||
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
|
||||
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
|
||||
help='Don\'t wait for import results.')
|
||||
self.parser.add_option('--branch', dest='reference',
|
||||
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
|
||||
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
|
||||
help='Check the status of the most recent import request for given github_user/github_repo.')
|
||||
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
|
||||
self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
|
||||
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
|
||||
elif self.action == "info":
|
||||
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
|
||||
elif self.action == "init":
|
||||
self.parser.set_usage("usage: %prog init [options] role_name")
|
||||
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
|
||||
help='The path in which the skeleton role will be created. The default is the current working directory.')
|
||||
self.parser.add_option(
|
||||
'--offline', dest='offline', default=False, action='store_true',
|
||||
help="Don't query the galaxy API when creating roles")
|
||||
self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.')
|
||||
elif self.action == "install":
|
||||
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
|
||||
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
|
||||
help='Ignore errors and continue with the next specified role.')
|
||||
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
|
||||
help='Don\'t download roles listed as dependencies')
|
||||
self.parser.add_option('-r', '--role-file', dest='role_file',
|
||||
help='A file containing a list of roles to be imported')
|
||||
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
|
||||
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
|
||||
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
|
||||
elif self.action == "remove":
|
||||
self.parser.set_usage("usage: %prog remove role1 role2 ...")
|
||||
elif self.action == "list":
|
||||
self.parser.set_usage("usage: %prog list [role_name]")
|
||||
elif self.action == "login":
|
||||
self.parser.set_usage("usage: %prog login [options]")
|
||||
self.parser.add_option('--github-token', dest='token', default=None,
|
||||
help='Identify with github token rather than username and password.')
|
||||
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
|
||||
elif self.action == "search":
|
||||
self.parser.add_option('--platforms', dest='platforms',
|
||||
help='list of OS platforms to filter by')
|
||||
self.parser.add_option('--galaxy-tags', dest='tags',
|
||||
help='list of galaxy tags to filter by')
|
||||
self.parser.add_option('--author', dest='author',
|
||||
help='GitHub username')
|
||||
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
|
||||
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
|
||||
self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by')
|
||||
self.parser.add_option('--author', dest='author', help='GitHub username')
|
||||
elif self.action == "setup":
|
||||
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
|
||||
self.parser.add_option('--remove', dest='remove_id', default=None,
|
||||
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
|
||||
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
|
||||
help='List all of your integrations.')
|
||||
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
|
||||
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
|
||||
|
||||
# options that apply to more than one action
|
||||
if self.action in ['init', 'info']:
|
||||
self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
|
||||
|
||||
if not self.action in ("delete","import","init","login","setup"):
|
||||
# NOTE: while the option type=str, the default is a list, and the
|
||||
# callback will set the value to a list.
|
||||
self.parser.add_option('-p', '--roles-path', dest='roles_path',
|
||||
action="callback", callback=CLI.expand_paths,
|
||||
type=str, default=C.DEFAULT_ROLES_PATH,
|
||||
help='The path to the directory containing your roles. '
|
||||
'The default is the roles_path configured in your '
|
||||
'ansible.cfg file (/etc/ansible/roles if not configured)')
|
||||
|
||||
if self.action in ("import","info","init","install","login","search","setup","delete"):
|
||||
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
|
||||
help='The API server destination')
|
||||
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
|
||||
help='Ignore SSL certificate validation errors.')
|
||||
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH,
|
||||
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)')
|
||||
|
||||
if self.action in ("init","install"):
|
||||
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
|
||||
help='Force overwriting an existing role')
|
||||
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
|
||||
|
||||
self.options, self.args =self.parser.parse_args()
|
||||
display.verbosity = self.options.verbosity
|
||||
self.galaxy = Galaxy(self.options)
|
||||
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
|
||||
|
||||
super(GalaxyCLI, self).run()
|
||||
|
||||
# if not offline, get connect to galaxy api
|
||||
if self.action in ("import","info","install","search","login","setup","delete") or \
|
||||
(self.action == 'init' and not self.options.offline):
|
||||
self.api = GalaxyAPI(self.galaxy)
|
||||
|
||||
self.api = GalaxyAPI(self.galaxy)
|
||||
self.execute()
|
||||
|
||||
def exit_without_ignore(self, rc=1):
|
||||
|
@ -242,7 +218,7 @@ class GalaxyCLI(CLI):
|
|||
# platforms included (but commented out), the galaxy_tags
|
||||
# list, and the dependencies section
|
||||
platforms = []
|
||||
if not offline and self.api:
|
||||
if not offline:
|
||||
platforms = self.api.get_list("platforms") or []
|
||||
|
||||
# group the list of platforms from the api based
|
||||
|
@ -315,7 +291,7 @@ class GalaxyCLI(CLI):
|
|||
role_info.update(install_info)
|
||||
|
||||
remote_data = False
|
||||
if self.api:
|
||||
if not self.options.offline:
|
||||
remote_data = self.api.lookup_role_by_name(role, False)
|
||||
|
||||
if remote_data:
|
||||
|
@ -489,22 +465,23 @@ class GalaxyCLI(CLI):
|
|||
else:
|
||||
# show all valid roles in the roles_path directory
|
||||
roles_path = self.get_opt('roles_path')
|
||||
roles_path = os.path.expanduser(roles_path)
|
||||
if not os.path.exists(roles_path):
|
||||
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
|
||||
elif not os.path.isdir(roles_path):
|
||||
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
|
||||
path_files = os.listdir(roles_path)
|
||||
for path_file in path_files:
|
||||
gr = GalaxyRole(self.galaxy, path_file)
|
||||
if gr.metadata:
|
||||
install_info = gr.install_info
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
display.display("- %s, %s" % (path_file, version))
|
||||
for path in roles_path:
|
||||
role_path = os.path.expanduser(path)
|
||||
if not os.path.exists(role_path):
|
||||
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
|
||||
elif not os.path.isdir(role_path):
|
||||
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
|
||||
path_files = os.listdir(role_path)
|
||||
for path_file in path_files:
|
||||
gr = GalaxyRole(self.galaxy, path_file)
|
||||
if gr.metadata:
|
||||
install_info = gr.install_info
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
display.display("- %s, %s" % (path_file, version))
|
||||
return 0
|
||||
|
||||
def execute_search(self):
|
||||
|
|
|
@ -158,6 +158,12 @@ class PlaybookCLI(CLI):
|
|||
|
||||
display.display('\nplaybook: %s' % p['playbook'])
|
||||
for idx, play in enumerate(p['plays']):
|
||||
if play._included_path is not None:
|
||||
loader.set_basedir(play._included_path)
|
||||
else:
|
||||
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
|
||||
loader.set_basedir(pb_dir)
|
||||
|
||||
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
|
||||
mytags = set(play.tags)
|
||||
msg += '\tTAGS: [%s]' % (','.join(mytags))
|
||||
|
|
|
@ -28,6 +28,7 @@ from ansible.compat.six.moves import configparser
|
|||
|
||||
from ansible.parsing.quoting import unquote
|
||||
from ansible.errors import AnsibleOptionsError
|
||||
from ansible.utils.path import makedirs_safe
|
||||
|
||||
# copied from utils, avoid circular reference fun :)
|
||||
def mk_boolean(value):
|
||||
|
@ -39,16 +40,22 @@ def mk_boolean(value):
|
|||
else:
|
||||
return False
|
||||
|
||||
def shell_expand(path):
|
||||
def shell_expand(path, expand_relative_paths=False):
|
||||
'''
|
||||
shell_expand is needed as os.path.expanduser does not work
|
||||
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
|
||||
'''
|
||||
if path:
|
||||
path = os.path.expanduser(os.path.expandvars(path))
|
||||
if expand_relative_paths and not path.startswith('/'):
|
||||
# paths are always 'relative' to the config?
|
||||
if 'CONFIG_FILE' in globals():
|
||||
CFGDIR = os.path.dirname(CONFIG_FILE)
|
||||
path = os.path.join(CFGDIR, path)
|
||||
path = os.path.abspath(path)
|
||||
return path
|
||||
|
||||
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False):
|
||||
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False, expand_relative_paths=False):
|
||||
''' return a configuration variable with casting '''
|
||||
value = _get_config(p, section, key, env_var, default)
|
||||
if boolean:
|
||||
|
@ -69,11 +76,13 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
|
|||
elif istmppath:
|
||||
value = shell_expand(value)
|
||||
if not os.path.exists(value):
|
||||
os.makedirs(value, 0o700)
|
||||
value = tempfile.mkdtemp(prefix='ansible-local-tmp', dir=value)
|
||||
makedirs_safe(value, 0o700)
|
||||
prefix = 'ansible-local-%s' % os.getpid()
|
||||
value = tempfile.mkdtemp(prefix=prefix, dir=value)
|
||||
elif ispathlist:
|
||||
if isinstance(value, string_types):
|
||||
value = [shell_expand(x) for x in value.split(os.pathsep)]
|
||||
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
|
||||
for x in value.split(os.pathsep)]
|
||||
elif isinstance(value, string_types):
|
||||
value = unquote(value)
|
||||
return value
|
||||
|
@ -140,7 +149,7 @@ DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
|
|||
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
|
||||
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
|
||||
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispathlist=True)
|
||||
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True)
|
||||
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True, expand_relative_paths=True)
|
||||
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
|
||||
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '$HOME/.ansible/tmp', istmppath=True)
|
||||
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
|
||||
|
@ -173,6 +182,7 @@ DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIB
|
|||
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
|
||||
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
|
||||
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
|
||||
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, boolean=True)
|
||||
|
||||
# static includes
|
||||
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True)
|
||||
|
@ -302,7 +312,7 @@ COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECAT
|
|||
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
|
||||
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
|
||||
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
|
||||
COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
|
||||
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
|
||||
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
|
||||
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
|
||||
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
|
||||
|
|
|
@ -30,11 +30,14 @@ import zipfile
|
|||
from io import BytesIO
|
||||
|
||||
# from Ansible
|
||||
from ansible import __version__
|
||||
from ansible.release import __version__, __author__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
from ansible.plugins.strategy import action_write_locks
|
||||
# Must import strategy and use write_locks from there
|
||||
# If we import write_locks directly then we end up binding a
|
||||
# variable to the object and then it never gets updated.
|
||||
from ansible.plugins import strategy
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -118,7 +121,7 @@ def invoke_module(module, modlib_path, json_params):
|
|||
else:
|
||||
os.environ['PYTHONPATH'] = modlib_path
|
||||
|
||||
p = subprocess.Popen(['%(interpreter)s', module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
(stdout, stderr) = p.communicate(json_params)
|
||||
|
||||
if not isinstance(stderr, (bytes, unicode)):
|
||||
|
@ -215,7 +218,7 @@ def debug(command, zipped_mod, json_params):
|
|||
else:
|
||||
os.environ['PYTHONPATH'] = basedir
|
||||
|
||||
p = subprocess.Popen(['%(interpreter)s', script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
(stdout, stderr) = p.communicate()
|
||||
|
||||
if not isinstance(stderr, (bytes, unicode)):
|
||||
|
@ -267,18 +270,33 @@ if __name__ == '__main__':
|
|||
# remote_tmpdir and this module executing under async. So we cannot
|
||||
# store this in remote_tmpdir (use system tempdir instead)
|
||||
temp_path = tempfile.mkdtemp(prefix='ansible_')
|
||||
|
||||
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
|
||||
modlib = open(zipped_mod, 'wb')
|
||||
modlib.write(base64.b64decode(ZIPDATA))
|
||||
modlib.close()
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS)
|
||||
else:
|
||||
z = zipfile.ZipFile(zipped_mod)
|
||||
z = zipfile.ZipFile(zipped_mod, mode='r')
|
||||
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
|
||||
f = open(module, 'wb')
|
||||
f.write(z.read('ansible_module_%(ansible_module)s.py'))
|
||||
f.close()
|
||||
|
||||
# When installed via setuptools (including python setup.py install),
|
||||
# ansible may be installed with an easy-install.pth file. That file
|
||||
# may load the system-wide install of ansible rather than the one in
|
||||
# the module. sitecustomize is the only way to override that setting.
|
||||
z = zipfile.ZipFile(zipped_mod, mode='a')
|
||||
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
|
||||
z = zipfile.ZipFile(zipped_mod, mode='a')
|
||||
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
|
||||
sitecustomize = sitecustomize.encode('utf-8')
|
||||
z.writestr('sitecustomize.py', sitecustomize)
|
||||
z.close()
|
||||
|
||||
exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS)
|
||||
finally:
|
||||
try:
|
||||
|
@ -372,12 +390,12 @@ def _get_shebang(interpreter, task_vars, args=tuple()):
|
|||
file rather than trust that we reformatted what they already have
|
||||
correctly.
|
||||
"""
|
||||
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter)
|
||||
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
|
||||
|
||||
if interpreter_config not in task_vars:
|
||||
return (None, interpreter)
|
||||
|
||||
interpreter = task_vars[interpreter_config]
|
||||
interpreter = task_vars[interpreter_config].strip()
|
||||
shebang = u'#!' + interpreter
|
||||
|
||||
if args:
|
||||
|
@ -385,12 +403,6 @@ def _get_shebang(interpreter, task_vars, args=tuple()):
|
|||
|
||||
return (shebang, interpreter)
|
||||
|
||||
def _get_facility(task_vars):
|
||||
facility = C.DEFAULT_SYSLOG_FACILITY
|
||||
if 'ansible_syslog_facility' in task_vars:
|
||||
facility = task_vars['ansible_syslog_facility']
|
||||
return facility
|
||||
|
||||
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
|
||||
"""
|
||||
Using ModuleDepFinder, make sure we have all of the module_utils files that
|
||||
|
@ -529,14 +541,7 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
py_module_names = set()
|
||||
|
||||
if module_substyle == 'python':
|
||||
# ziploader for new-style python classes
|
||||
constants = dict(
|
||||
SELINUX_SPECIAL_FS=C.DEFAULT_SELINUX_SPECIAL_FS,
|
||||
SYSLOG_FACILITY=_get_facility(task_vars),
|
||||
)
|
||||
params = dict(ANSIBLE_MODULE_ARGS=module_args,
|
||||
ANSIBLE_MODULE_CONSTANTS=constants,
|
||||
)
|
||||
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
|
||||
python_repred_params = to_bytes(repr(json.dumps(params)), errors='strict')
|
||||
|
||||
try:
|
||||
|
@ -551,19 +556,34 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
zipdata = None
|
||||
# Optimization -- don't lock if the module has already been cached
|
||||
if os.path.exists(cached_module_filename):
|
||||
display.debug('ZIPLOADER: using cached module: %s' % cached_module_filename)
|
||||
zipdata = open(cached_module_filename, 'rb').read()
|
||||
# Fool the check later... I think we should just remove the check
|
||||
py_module_names.add(('basic',))
|
||||
else:
|
||||
with action_write_locks[module_name]:
|
||||
if module_name in strategy.action_write_locks:
|
||||
display.debug('ZIPLOADER: Using lock for %s' % module_name)
|
||||
lock = strategy.action_write_locks[module_name]
|
||||
else:
|
||||
# If the action plugin directly invokes the module (instead of
|
||||
# going through a strategy) then we don't have a cross-process
|
||||
# Lock specifically for this module. Use the "unexpected
|
||||
# module" lock instead
|
||||
display.debug('ZIPLOADER: Using generic lock for %s' % module_name)
|
||||
lock = strategy.action_write_locks[None]
|
||||
|
||||
display.debug('ZIPLOADER: Acquiring lock')
|
||||
with lock:
|
||||
display.debug('ZIPLOADER: Lock acquired: %s' % id(lock))
|
||||
# Check that no other process has created this while we were
|
||||
# waiting for the lock
|
||||
if not os.path.exists(cached_module_filename):
|
||||
display.debug('ZIPLOADER: Creating module')
|
||||
# Create the module zip data
|
||||
zipoutput = BytesIO()
|
||||
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
|
||||
zf.writestr('ansible/__init__.py', b''.join((b"__version__ = '", to_bytes(__version__), b"'\n")))
|
||||
zf.writestr('ansible/module_utils/__init__.py', b'')
|
||||
zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\ntry:\n from ansible.release import __version__,__author__\nexcept ImportError:\n __version__="' + to_bytes(__version__) + b'"\n __author__="' + to_bytes(__author__) + b'"\n')
|
||||
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
|
||||
|
||||
zf.writestr('ansible_module_%s.py' % module_name, module_data)
|
||||
|
||||
|
@ -579,15 +599,19 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
# Note -- if we have a global function to setup, that would
|
||||
# be a better place to run this
|
||||
os.mkdir(lookup_path)
|
||||
display.debug('ZIPLOADER: Writing module')
|
||||
with open(cached_module_filename + '-part', 'w') as f:
|
||||
f.write(zipdata)
|
||||
|
||||
# Rename the file into its final position in the cache so
|
||||
# future users of this module can read it off the
|
||||
# filesystem instead of constructing from scratch.
|
||||
display.debug('ZIPLOADER: Renaming module')
|
||||
os.rename(cached_module_filename + '-part', cached_module_filename)
|
||||
display.debug('ZIPLOADER: Done creating module')
|
||||
|
||||
if zipdata is None:
|
||||
display.debug('ZIPLOADER: Reading module after lock')
|
||||
# Another process wrote the file while we were waiting for
|
||||
# the write lock. Go ahead and read the data from disk
|
||||
# instead of re-creating it.
|
||||
|
@ -601,6 +625,12 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
|
||||
if shebang is None:
|
||||
shebang = u'#!/usr/bin/python'
|
||||
|
||||
# Enclose the parts of the interpreter in quotes because we're
|
||||
# substituting it into the template as a Python string
|
||||
interpreter_parts = interpreter.split(u' ')
|
||||
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
|
||||
|
||||
output.write(to_bytes(ACTIVE_ZIPLOADER_TEMPLATE % dict(
|
||||
zipdata=zipdata,
|
||||
ansible_module=module_name,
|
||||
|
@ -656,7 +686,7 @@ def _find_snippet_imports(module_name, module_data, module_path, module_args, ta
|
|||
# The main event -- substitute the JSON args string into the module
|
||||
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
|
||||
|
||||
facility = b'syslog.' + to_bytes(_get_facility(task_vars), errors='strict')
|
||||
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='strict')
|
||||
module_data = module_data.replace(b'syslog.LOG_USER', facility)
|
||||
|
||||
return (module_data, module_style, shebang)
|
||||
|
|
|
@ -80,7 +80,7 @@ class HostState:
|
|||
ret.append(states[i])
|
||||
return "|".join(ret)
|
||||
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s, did start at task? %s" % (
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did start at task? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
|
@ -216,10 +216,13 @@ class PlayIterator:
|
|||
self._play.handlers.extend(play.compile_roles_handlers())
|
||||
|
||||
def get_host_state(self, host):
|
||||
try:
|
||||
return self._host_states[host.name].copy()
|
||||
except KeyError:
|
||||
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||
# Since we're using the PlayIterator to carry forward failed hosts,
|
||||
# in the event that a previous host was not in the current inventory
|
||||
# we create a stub state for it now
|
||||
if host.name not in self._host_states:
|
||||
self._host_states[host.name] = HostState(blocks=[])
|
||||
|
||||
return self._host_states[host.name].copy()
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
|
||||
|
@ -295,10 +298,10 @@ class PlayIterator:
|
|||
setup_block = self._blocks[0]
|
||||
if setup_block.has_tasks() and len(setup_block.block) > 0:
|
||||
task = setup_block.block[0]
|
||||
if not peek:
|
||||
# mark the host as having gathered facts, because we're
|
||||
# returning the setup task to be executed
|
||||
host.set_gathered_facts(True)
|
||||
if not peek:
|
||||
# mark the host as having gathered facts, because we're
|
||||
# returning the setup task to be executed
|
||||
host.set_gathered_facts(True)
|
||||
else:
|
||||
# This is the second trip through ITERATING_SETUP, so we clear
|
||||
# the flag and move onto the next block in the list while setting
|
||||
|
@ -326,8 +329,7 @@ class PlayIterator:
|
|||
if self._check_failed_state(state.tasks_child_state):
|
||||
# failed child state, so clear it and move into the rescue portion
|
||||
state.tasks_child_state = None
|
||||
state.fail_state |= self.FAILED_TASKS
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
# get the next task recursively
|
||||
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
|
@ -365,8 +367,7 @@ class PlayIterator:
|
|||
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek)
|
||||
if self._check_failed_state(state.rescue_child_state):
|
||||
state.rescue_child_state = None
|
||||
state.fail_state |= self.FAILED_RESCUE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
state.rescue_child_state = None
|
||||
|
@ -396,8 +397,7 @@ class PlayIterator:
|
|||
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek)
|
||||
if self._check_failed_state(state.always_child_state):
|
||||
state.always_child_state = None
|
||||
state.fail_state |= self.FAILED_ALWAYS
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
state.always_child_state = None
|
||||
|
@ -466,18 +466,25 @@ class PlayIterator:
|
|||
|
||||
def mark_host_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
display.debug("marking host %s failed, current state: %s" % (host, s))
|
||||
s = self._set_failed_state(s)
|
||||
display.debug("^ failed state is now: %s" % s)
|
||||
self._host_states[host.name] = s
|
||||
|
||||
def get_failed_hosts(self):
|
||||
return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
|
||||
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
|
||||
|
||||
def _check_failed_state(self, state):
|
||||
if state is None:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
elif state.fail_state != self.FAILED_NONE:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0 or \
|
||||
state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
@ -487,10 +494,6 @@ class PlayIterator:
|
|||
return False
|
||||
else:
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_failed(self, host):
|
||||
|
|
|
@ -27,7 +27,8 @@ from ansible import constants as C
|
|||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.utils.unicode import to_unicode, to_str
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -70,7 +71,7 @@ class PlaybookExecutor:
|
|||
try:
|
||||
for playbook_path in self._playbooks:
|
||||
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
|
||||
self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
|
||||
self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
|
||||
|
||||
if self._tqm is None: # we are doing a listing
|
||||
entry = {'playbook': playbook_path}
|
||||
|
@ -128,6 +129,10 @@ class PlaybookExecutor:
|
|||
else:
|
||||
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
|
||||
|
||||
previously_failed = len(self._tqm._failed_hosts)
|
||||
previously_unreachable = len(self._tqm._unreachable_hosts)
|
||||
|
||||
break_play = False
|
||||
# we are actually running plays
|
||||
for batch in self._get_serialized_batches(new_play):
|
||||
if len(batch) == 0:
|
||||
|
@ -140,24 +145,31 @@ class PlaybookExecutor:
|
|||
# and run it...
|
||||
result = self._tqm.run(play=play)
|
||||
|
||||
# break the play if the result equals the special return code
|
||||
if result == self._tqm.RUN_FAILED_BREAK_PLAY:
|
||||
result = self._tqm.RUN_FAILED_HOSTS
|
||||
break_play = True
|
||||
|
||||
# check the number of failures here, to see if they're above the maximum
|
||||
# failure percentage allowed, or if any errors are fatal. If either of those
|
||||
# conditions are met, we break out, otherwise we only break out if the entire
|
||||
# batch failed
|
||||
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
|
||||
if new_play.max_fail_percentage is not None and \
|
||||
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
|
||||
break
|
||||
elif len(batch) == failed_hosts_count:
|
||||
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
|
||||
(previously_failed + previously_unreachable)
|
||||
|
||||
if len(batch) == failed_hosts_count:
|
||||
break_play = True
|
||||
break
|
||||
|
||||
# clear the failed hosts dictionaires in the TQM for the next batch
|
||||
# update the previous counts so they don't accumulate incorrectly
|
||||
# over multiple serial batches
|
||||
previously_failed += len(self._tqm._failed_hosts) - previously_failed
|
||||
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
|
||||
|
||||
# save the unreachable hosts from this batch
|
||||
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
|
||||
self._tqm.clear_failed_hosts()
|
||||
|
||||
# if the last result wasn't zero or 3 (some hosts were unreachable),
|
||||
# break out of the serial batch loop
|
||||
if result not in (0, 3):
|
||||
if break_play:
|
||||
break
|
||||
|
||||
i = i + 1 # per play
|
||||
|
@ -174,8 +186,10 @@ class PlaybookExecutor:
|
|||
if len(retries) > 0:
|
||||
if C.RETRY_FILES_SAVE_PATH:
|
||||
basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
|
||||
elif playbook_path:
|
||||
basedir = os.path.dirname(os.path.abspath(playbook_path))
|
||||
else:
|
||||
basedir = os.path.dirname(playbook_path)
|
||||
basedir = '~/'
|
||||
|
||||
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
|
||||
filename = os.path.join(basedir, "%s.retry" % retry_name)
|
||||
|
@ -247,13 +261,13 @@ class PlaybookExecutor:
|
|||
re-running on ONLY the failed hosts. This may duplicate some variable
|
||||
information in group_vars/host_vars but that is ok, and expected.
|
||||
'''
|
||||
|
||||
try:
|
||||
makedirs_safe(os.path.dirname(retry_path))
|
||||
with open(retry_path, 'w') as fd:
|
||||
for x in replay_hosts:
|
||||
fd.write("%s\n" % x)
|
||||
except Exception as e:
|
||||
display.error("Could not create retry file '%s'. The error was: %s" % (retry_path, e))
|
||||
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_str(e)))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
@ -173,7 +173,7 @@ class ResultProcess(multiprocessing.Process):
|
|||
# if this task is registering facts, do that now
|
||||
loop_var = 'item'
|
||||
if result._task.loop_control:
|
||||
loop_var = result._task.loop_control.get('loop_var') or 'item'
|
||||
loop_var = result._task.loop_control.loop_var or 'item'
|
||||
item = result_item.get(loop_var, None)
|
||||
if result._task.action == 'include_vars':
|
||||
for (key, value) in iteritems(result_item['ansible_facts']):
|
||||
|
|
|
@ -77,21 +77,25 @@ class WorkerProcess(multiprocessing.Process):
|
|||
self._variable_manager = variable_manager
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except ValueError:
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
if sys.stdin.isatty():
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except (AttributeError, ValueError):
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
else:
|
||||
# set to /dev/null
|
||||
self._new_stdin = os.devnull
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
|
|
|
@ -71,6 +71,7 @@ class TaskExecutor:
|
|||
self._shared_loader_obj = shared_loader_obj
|
||||
self._connection = None
|
||||
self._rslt_q = rslt_q
|
||||
self._loop_eval_error = None
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
|
@ -90,7 +91,13 @@ class TaskExecutor:
|
|||
roledir = self._task._role._role_path
|
||||
self._job_vars['roledir'] = roledir
|
||||
|
||||
items = self._get_loop_items()
|
||||
try:
|
||||
items = self._get_loop_items()
|
||||
except AnsibleUndefinedVariable as e:
|
||||
# save the error raised here for use later
|
||||
items = None
|
||||
self._loop_eval_error = e
|
||||
|
||||
if items is not None:
|
||||
if len(items) > 0:
|
||||
item_results = self._run_loop(items)
|
||||
|
@ -232,7 +239,7 @@ class TaskExecutor:
|
|||
loop_var = self._task.loop_control.loop_var or 'item'
|
||||
|
||||
if loop_var in task_vars:
|
||||
raise AnsibleError("the loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions" % loop_var)
|
||||
display.warning("The loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior." % loop_var)
|
||||
|
||||
items = self._squash_items(items, loop_var, task_vars)
|
||||
for item in items:
|
||||
|
@ -269,59 +276,68 @@ class TaskExecutor:
|
|||
Squash items down to a comma-separated list for certain modules which support it
|
||||
(typically package management modules).
|
||||
'''
|
||||
# _task.action could contain templatable strings (via action: and
|
||||
# local_action:) Template it before comparing. If we don't end up
|
||||
# optimizing it here, the templatable string might use template vars
|
||||
# that aren't available until later (it could even use vars from the
|
||||
# with_items loop) so don't make the templated string permanent yet.
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
|
||||
task_action = self._task.action
|
||||
if templar._contains_vars(task_action):
|
||||
task_action = templar.template(task_action, fail_on_undefined=False)
|
||||
name = None
|
||||
try:
|
||||
# _task.action could contain templatable strings (via action: and
|
||||
# local_action:) Template it before comparing. If we don't end up
|
||||
# optimizing it here, the templatable string might use template vars
|
||||
# that aren't available until later (it could even use vars from the
|
||||
# with_items loop) so don't make the templated string permanent yet.
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
|
||||
task_action = self._task.action
|
||||
if templar._contains_vars(task_action):
|
||||
task_action = templar.template(task_action, fail_on_undefined=False)
|
||||
|
||||
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
|
||||
if all(isinstance(o, string_types) for o in items):
|
||||
final_items = []
|
||||
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
|
||||
if all(isinstance(o, string_types) for o in items):
|
||||
final_items = []
|
||||
|
||||
name = None
|
||||
for allowed in ['name', 'pkg', 'package']:
|
||||
name = self._task.args.pop(allowed, None)
|
||||
if name is not None:
|
||||
break
|
||||
for allowed in ['name', 'pkg', 'package']:
|
||||
name = self._task.args.pop(allowed, None)
|
||||
if name is not None:
|
||||
break
|
||||
|
||||
# This gets the information to check whether the name field
|
||||
# contains a template that we can squash for
|
||||
template_no_item = template_with_item = None
|
||||
if name:
|
||||
if templar._contains_vars(name):
|
||||
variables[loop_var] = '\0$'
|
||||
template_no_item = templar.template(name, variables, cache=False)
|
||||
variables[loop_var] = '\0@'
|
||||
template_with_item = templar.template(name, variables, cache=False)
|
||||
del variables[loop_var]
|
||||
# This gets the information to check whether the name field
|
||||
# contains a template that we can squash for
|
||||
template_no_item = template_with_item = None
|
||||
if name:
|
||||
if templar._contains_vars(name):
|
||||
variables[loop_var] = '\0$'
|
||||
template_no_item = templar.template(name, variables, cache=False)
|
||||
variables[loop_var] = '\0@'
|
||||
template_with_item = templar.template(name, variables, cache=False)
|
||||
del variables[loop_var]
|
||||
|
||||
# Check if the user is doing some operation that doesn't take
|
||||
# name/pkg or the name/pkg field doesn't have any variables
|
||||
# and thus the items can't be squashed
|
||||
if template_no_item != template_with_item:
|
||||
for item in items:
|
||||
variables[loop_var] = item
|
||||
if self._task.evaluate_conditional(templar, variables):
|
||||
new_item = templar.template(name, cache=False)
|
||||
final_items.append(new_item)
|
||||
self._task.args['name'] = final_items
|
||||
# Wrap this in a list so that the calling function loop
|
||||
# executes exactly once
|
||||
return [final_items]
|
||||
else:
|
||||
# Restore the name parameter
|
||||
self._task.args['name'] = name
|
||||
#elif:
|
||||
# Right now we only optimize single entries. In the future we
|
||||
# could optimize more types:
|
||||
# * lists can be squashed together
|
||||
# * dicts could squash entries that match in all cases except the
|
||||
# name or pkg field.
|
||||
# Check if the user is doing some operation that doesn't take
|
||||
# name/pkg or the name/pkg field doesn't have any variables
|
||||
# and thus the items can't be squashed
|
||||
if template_no_item != template_with_item:
|
||||
for item in items:
|
||||
variables[loop_var] = item
|
||||
if self._task.evaluate_conditional(templar, variables):
|
||||
new_item = templar.template(name, cache=False)
|
||||
final_items.append(new_item)
|
||||
self._task.args['name'] = final_items
|
||||
# Wrap this in a list so that the calling function loop
|
||||
# executes exactly once
|
||||
return [final_items]
|
||||
else:
|
||||
# Restore the name parameter
|
||||
self._task.args['name'] = name
|
||||
#elif:
|
||||
# Right now we only optimize single entries. In the future we
|
||||
# could optimize more types:
|
||||
# * lists can be squashed together
|
||||
# * dicts could squash entries that match in all cases except the
|
||||
# name or pkg field.
|
||||
except:
|
||||
# Squashing is an optimization. If it fails for any reason,
|
||||
# simply use the unoptimized list of items.
|
||||
|
||||
# Restore the name parameter
|
||||
if name is not None:
|
||||
self._task.args['name'] = name
|
||||
pass
|
||||
return items
|
||||
|
||||
def _execute(self, variables=None):
|
||||
|
@ -368,6 +384,11 @@ class TaskExecutor:
|
|||
if not self._task.evaluate_conditional(templar, variables):
|
||||
display.debug("when evaluation failed, skipping this task")
|
||||
return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
|
||||
# since we're not skipping, if there was a loop evaluation error
|
||||
# raised earlier we need to raise it now to halt the execution of
|
||||
# this task
|
||||
if self._loop_eval_error is not None:
|
||||
raise self._loop_eval_error
|
||||
except AnsibleError:
|
||||
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
|
||||
if self._task.action != 'include':
|
||||
|
@ -400,7 +421,17 @@ class TaskExecutor:
|
|||
# get the connection and the handler for this execution
|
||||
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
|
||||
self._connection = self._get_connection(variables=variables, templar=templar)
|
||||
self._connection.set_host_overrides(host=self._host)
|
||||
hostvars = variables.get('hostvars', None)
|
||||
if hostvars:
|
||||
try:
|
||||
target_hostvars = hostvars.raw_get(self._host.name)
|
||||
except:
|
||||
# FIXME: this should catch the j2undefined error here
|
||||
# specifically instead of all exceptions
|
||||
target_hostvars = dict()
|
||||
else:
|
||||
target_hostvars = dict()
|
||||
self._connection.set_host_overrides(host=self._host, hostvars=target_hostvars)
|
||||
else:
|
||||
# if connection is reused, its _play_context is no longer valid and needs
|
||||
# to be replaced with the one templated above, in case other data changed
|
||||
|
@ -414,10 +445,14 @@ class TaskExecutor:
|
|||
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
|
||||
|
||||
# Read some values from the task, so that we can modify them if need be
|
||||
if self._task.until is not None:
|
||||
if self._task.until:
|
||||
retries = self._task.retries
|
||||
if retries <= 0:
|
||||
if retries is None:
|
||||
retries = 3
|
||||
elif retries <= 0:
|
||||
retries = 1
|
||||
else:
|
||||
retries += 1
|
||||
else:
|
||||
retries = 1
|
||||
|
||||
|
@ -431,7 +466,7 @@ class TaskExecutor:
|
|||
|
||||
display.debug("starting attempt loop")
|
||||
result = None
|
||||
for attempt in range(retries):
|
||||
for attempt in range(1, retries + 1):
|
||||
display.debug("running the handler")
|
||||
try:
|
||||
result = self._handler.run(task_vars=variables)
|
||||
|
@ -448,17 +483,8 @@ class TaskExecutor:
|
|||
vars_copy[self._task.register] = wrap_var(result.copy())
|
||||
|
||||
if self._task.async > 0:
|
||||
# the async_wrapper module returns dumped JSON via its stdout
|
||||
# response, so we parse it here and replace the result
|
||||
try:
|
||||
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
|
||||
return result
|
||||
result = json.loads(result.get('stdout'))
|
||||
except (TypeError, ValueError) as e:
|
||||
return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))
|
||||
|
||||
if self._task.poll > 0:
|
||||
result = self._poll_async_result(result=result, templar=templar)
|
||||
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
|
||||
|
||||
# ensure no log is preserved
|
||||
result["_ansible_no_log"] = self._play_context.no_log
|
||||
|
@ -494,23 +520,23 @@ class TaskExecutor:
|
|||
_evaluate_changed_when_result(result)
|
||||
_evaluate_failed_when_result(result)
|
||||
|
||||
if attempt < retries - 1:
|
||||
if retries > 1:
|
||||
cond = Conditional(loader=self._loader)
|
||||
cond.when = self._task.until
|
||||
if cond.evaluate_conditional(templar, vars_copy):
|
||||
break
|
||||
else:
|
||||
# no conditional check, or it failed, so sleep for the specified time
|
||||
result['attempts'] = attempt + 1
|
||||
result['retries'] = retries
|
||||
result['_ansible_retry'] = True
|
||||
display.debug('Retrying task, attempt %d of %d' % (attempt + 1, retries))
|
||||
self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
|
||||
time.sleep(delay)
|
||||
if attempt < retries:
|
||||
result['attempts'] = attempt
|
||||
result['_ansible_retry'] = True
|
||||
result['retries'] = retries
|
||||
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
|
||||
self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
if retries > 1:
|
||||
# we ran out of attempts, so mark the result as failed
|
||||
result['attempts'] = retries
|
||||
result['failed'] = True
|
||||
|
||||
# do the final update of the local variables here, for both registered
|
||||
|
@ -542,11 +568,14 @@ class TaskExecutor:
|
|||
display.debug("attempt loop complete, returning result")
|
||||
return result
|
||||
|
||||
def _poll_async_result(self, result, templar):
|
||||
def _poll_async_result(self, result, templar, task_vars=None):
|
||||
'''
|
||||
Polls for the specified JID to be complete
|
||||
'''
|
||||
|
||||
if task_vars is None:
|
||||
task_vars = self._job_vars
|
||||
|
||||
async_jid = result.get('ansible_job_id')
|
||||
if async_jid is None:
|
||||
return dict(failed=True, msg="No job id was returned by the async task")
|
||||
|
@ -574,14 +603,22 @@ class TaskExecutor:
|
|||
while time_left > 0:
|
||||
time.sleep(self._task.poll)
|
||||
|
||||
async_result = normal_handler.run()
|
||||
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
|
||||
async_result = normal_handler.run(task_vars=task_vars)
|
||||
# We do not bail out of the loop in cases where the failure
|
||||
# is associated with a parsing error. The async_runner can
|
||||
# have issues which result in a half-written/unparseable result
|
||||
# file on disk, which manifests to the user as a timeout happening
|
||||
# before it's time to timeout.
|
||||
if int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result:
|
||||
break
|
||||
|
||||
time_left -= self._task.poll
|
||||
|
||||
if int(async_result.get('finished', 0)) != 1:
|
||||
return dict(failed=True, msg="async task did not complete within the requested time")
|
||||
if async_result.get('_ansible_parsed'):
|
||||
return dict(failed=True, msg="async task did not complete within the requested time")
|
||||
else:
|
||||
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
|
||||
else:
|
||||
return async_result
|
||||
|
||||
|
@ -595,14 +632,14 @@ class TaskExecutor:
|
|||
# since we're delegating, we don't want to use interpreter values
|
||||
# which would have been set for the original target host
|
||||
for i in variables.keys():
|
||||
if i.startswith('ansible_') and i.endswith('_interpreter'):
|
||||
if isinstance(i, string_types) and i.startswith('ansible_') and i.endswith('_interpreter'):
|
||||
del variables[i]
|
||||
# now replace the interpreter values with those that may have come
|
||||
# from the delegated-to host
|
||||
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict())
|
||||
if isinstance(delegated_vars, dict):
|
||||
for i in delegated_vars:
|
||||
if i.startswith("ansible_") and i.endswith("_interpreter"):
|
||||
if isinstance(i, string_types) and i.startswith("ansible_") and i.endswith("_interpreter"):
|
||||
variables[i] = delegated_vars[i]
|
||||
|
||||
conn_type = self._play_context.connection
|
||||
|
@ -629,6 +666,8 @@ class TaskExecutor:
|
|||
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
|
||||
|
||||
if self._play_context.accelerate:
|
||||
# accelerate is deprecated as of 2.1...
|
||||
display.deprecated('Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead')
|
||||
# launch the accelerated daemon here
|
||||
ssh_connection = connection
|
||||
handler = self._shared_loader_obj.action_loader.get(
|
||||
|
|
|
@ -58,6 +58,13 @@ class TaskQueueManager:
|
|||
which dispatches the Play's tasks to hosts.
|
||||
'''
|
||||
|
||||
RUN_OK = 0
|
||||
RUN_ERROR = 1
|
||||
RUN_FAILED_HOSTS = 2
|
||||
RUN_UNREACHABLE_HOSTS = 3
|
||||
RUN_FAILED_BREAK_PLAY = 4
|
||||
RUN_UNKNOWN_ERROR = 255
|
||||
|
||||
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
|
||||
|
||||
self._inventory = inventory
|
||||
|
@ -107,7 +114,7 @@ class TaskQueueManager:
|
|||
self._result_prc = ResultProcess(self._final_q, self._workers)
|
||||
self._result_prc.start()
|
||||
|
||||
def _initialize_notified_handlers(self, handlers):
|
||||
def _initialize_notified_handlers(self, play):
|
||||
'''
|
||||
Clears and initializes the shared notified handlers dict with entries
|
||||
for each handler in the play, which is an empty array that will contain
|
||||
|
@ -116,8 +123,7 @@ class TaskQueueManager:
|
|||
|
||||
# Zero the dictionary first by removing any entries there.
|
||||
# Proxied dicts don't support iteritems, so we have to use keys()
|
||||
for key in self._notified_handlers.keys():
|
||||
del self._notified_handlers[key]
|
||||
self._notified_handlers.clear()
|
||||
|
||||
def _process_block(b):
|
||||
temp_list = []
|
||||
|
@ -129,12 +135,13 @@ class TaskQueueManager:
|
|||
return temp_list
|
||||
|
||||
handler_list = []
|
||||
for handler_block in handlers:
|
||||
for handler_block in play.handlers:
|
||||
handler_list.extend(_process_block(handler_block))
|
||||
|
||||
# then initialize it with the handler names from the handler list
|
||||
# then initialize it with the given handler list
|
||||
for handler in handler_list:
|
||||
self._notified_handlers[handler.get_name()] = []
|
||||
if handler not in self._notified_handlers:
|
||||
self._notified_handlers[handler] = []
|
||||
|
||||
def load_callbacks(self):
|
||||
'''
|
||||
|
@ -199,6 +206,7 @@ class TaskQueueManager:
|
|||
|
||||
new_play = play.copy()
|
||||
new_play.post_validate(templar)
|
||||
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
|
||||
|
||||
self.hostvars = HostVars(
|
||||
inventory=self._inventory,
|
||||
|
@ -219,7 +227,7 @@ class TaskQueueManager:
|
|||
self.send_callback('v2_playbook_on_play_start', new_play)
|
||||
|
||||
# initialize the shared dictionary containing the notified handlers
|
||||
self._initialize_notified_handlers(new_play.handlers)
|
||||
self._initialize_notified_handlers(new_play)
|
||||
|
||||
# load the specified strategy (or the default linear one)
|
||||
strategy = strategy_loader.get(new_play.strategy, self)
|
||||
|
@ -236,6 +244,16 @@ class TaskQueueManager:
|
|||
start_at_done = self._start_at_done,
|
||||
)
|
||||
|
||||
# Because the TQM may survive multiple play runs, we start by marking
|
||||
# any hosts as failed in the iterator here which may have been marked
|
||||
# as failed in previous runs. Then we clear the internal list of failed
|
||||
# hosts so we know what failed this round.
|
||||
for host_name in self._failed_hosts.keys():
|
||||
host = self._inventory.get_host(host_name)
|
||||
iterator.mark_host_failed(host)
|
||||
|
||||
self.clear_failed_hosts()
|
||||
|
||||
# during initialization, the PlayContext will clear the start_at_task
|
||||
# field to signal that a matching task was found, so check that here
|
||||
# and remember it so we don't try to skip tasks on future plays
|
||||
|
@ -244,6 +262,11 @@ class TaskQueueManager:
|
|||
|
||||
# and run the play using the strategy and cleanup on way out
|
||||
play_return = strategy.run(iterator, play_context)
|
||||
|
||||
# now re-save the hosts that failed from the iterator to our internal list
|
||||
for host_name in iterator.get_failed_hosts():
|
||||
self._failed_hosts[host_name] = True
|
||||
|
||||
self._cleanup_processes()
|
||||
return play_return
|
||||
|
||||
|
@ -286,6 +309,18 @@ class TaskQueueManager:
|
|||
def terminate(self):
|
||||
self._terminated = True
|
||||
|
||||
def has_dead_workers(self):
|
||||
|
||||
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
|
||||
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
|
||||
|
||||
defunct = False
|
||||
for idx,x in enumerate(self._workers):
|
||||
if hasattr(x[0], 'exitcode'):
|
||||
if x[0].exitcode in [-9, -15]:
|
||||
defunct = True
|
||||
return defunct
|
||||
|
||||
def send_callback(self, method_name, *args, **kwargs):
|
||||
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
|
||||
# a plugin that set self.disabled to True will not be called
|
||||
|
|
|
@ -40,14 +40,16 @@ class TaskResult:
|
|||
return self._check_key('changed')
|
||||
|
||||
def is_skipped(self):
|
||||
# loop results
|
||||
if 'results' in self._result and self._task.loop:
|
||||
flag = True
|
||||
for res in self._result.get('results', []):
|
||||
if isinstance(res, dict):
|
||||
flag &= res.get('skipped', False)
|
||||
return flag
|
||||
else:
|
||||
return self._result.get('skipped', False)
|
||||
results = self._result['results']
|
||||
# Loop tasks are only considered skipped if all items were skipped.
|
||||
# some squashed results (eg, yum) are not dicts and can't be skipped individually
|
||||
if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
|
||||
return True
|
||||
|
||||
# regular tasks and squashed non-dict results
|
||||
return self._result.get('skipped', False)
|
||||
|
||||
def is_failed(self):
|
||||
if 'failed_when_result' in self._result or \
|
||||
|
@ -60,7 +62,7 @@ class TaskResult:
|
|||
return self._check_key('unreachable')
|
||||
|
||||
def _check_key(self, key):
|
||||
if 'results' in self._result and self._task.loop:
|
||||
if self._result.get('results', []) and self._task.loop:
|
||||
flag = False
|
||||
for res in self._result.get('results', []):
|
||||
if isinstance(res, dict):
|
||||
|
|
|
@ -28,7 +28,6 @@ import json
|
|||
import urllib
|
||||
|
||||
from urllib2 import quote as urlquote, HTTPError
|
||||
from urlparse import urlparse
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
|
@ -41,6 +40,21 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
def g_connect(method):
|
||||
''' wrapper to lazily initialize connection info to galaxy '''
|
||||
def wrapped(self, *args, **kwargs):
|
||||
if not self.initialized:
|
||||
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
|
||||
server_version = self._get_server_api_version()
|
||||
if not server_version in self.SUPPORTED_VERSIONS:
|
||||
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
|
||||
|
||||
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
|
||||
self.version = server_version # for future use
|
||||
display.vvvv("Base API: %s" % self.baseurl)
|
||||
self.initialized = True
|
||||
return method(self, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
class GalaxyAPI(object):
|
||||
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
|
||||
|
@ -51,25 +65,17 @@ class GalaxyAPI(object):
|
|||
self.galaxy = galaxy
|
||||
self.token = GalaxyToken()
|
||||
self._api_server = C.GALAXY_SERVER
|
||||
self._validate_certs = not C.GALAXY_IGNORE_CERTS
|
||||
self._validate_certs = not galaxy.options.ignore_certs
|
||||
self.baseurl = None
|
||||
self.version = None
|
||||
self.initialized = False
|
||||
|
||||
# set validate_certs
|
||||
if galaxy.options.ignore_certs:
|
||||
self._validate_certs = False
|
||||
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
|
||||
display.debug('Validate TLS certificates: %s' % self._validate_certs)
|
||||
|
||||
# set the API server
|
||||
if galaxy.options.api_server != C.GALAXY_SERVER:
|
||||
self._api_server = galaxy.options.api_server
|
||||
display.vvv("Connecting to galaxy_server: %s" % self._api_server)
|
||||
|
||||
server_version = self.get_server_api_version()
|
||||
if not server_version in self.SUPPORTED_VERSIONS:
|
||||
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
|
||||
|
||||
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
|
||||
self.version = server_version # for future use
|
||||
display.vvv("Base API: %s" % self.baseurl)
|
||||
|
||||
def __auth_header(self):
|
||||
token = self.token.get()
|
||||
|
@ -77,6 +83,7 @@ class GalaxyAPI(object):
|
|||
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
|
||||
return {'Authorization': 'Token ' + token}
|
||||
|
||||
@g_connect
|
||||
def __call_galaxy(self, url, args=None, headers=None, method=None):
|
||||
if args and not headers:
|
||||
headers = self.__auth_header()
|
||||
|
@ -91,13 +98,13 @@ class GalaxyAPI(object):
|
|||
|
||||
@property
|
||||
def api_server(self):
|
||||
return self._api_server
|
||||
|
||||
return self._api_server
|
||||
|
||||
@property
|
||||
def validate_certs(self):
|
||||
return self._validate_certs
|
||||
|
||||
def get_server_api_version(self):
|
||||
def _get_server_api_version(self):
|
||||
"""
|
||||
Fetches the Galaxy API current version to ensure
|
||||
the API server is up and reachable.
|
||||
|
@ -107,8 +114,9 @@ class GalaxyAPI(object):
|
|||
data = json.load(open_url(url, validate_certs=self._validate_certs))
|
||||
return data['current_version']
|
||||
except Exception as e:
|
||||
raise AnsibleError("The API server (%s) is not responding, please try again later." % url)
|
||||
|
||||
raise AnsibleError("The API server (%s) is not responding, please try again later" % url)
|
||||
|
||||
@g_connect
|
||||
def authenticate(self, github_token):
|
||||
"""
|
||||
Retrieve an authentication token
|
||||
|
@ -119,6 +127,7 @@ class GalaxyAPI(object):
|
|||
data = json.load(resp)
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def create_import_task(self, github_user, github_repo, reference=None):
|
||||
"""
|
||||
Post an import request
|
||||
|
@ -134,6 +143,7 @@ class GalaxyAPI(object):
|
|||
return data['results']
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
|
||||
"""
|
||||
Check the status of an import task.
|
||||
|
@ -145,10 +155,11 @@ class GalaxyAPI(object):
|
|||
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
|
||||
else:
|
||||
raise AnsibleError("Expected task_id or github_user and github_repo")
|
||||
|
||||
|
||||
data = self.__call_galaxy(url)
|
||||
return data['results']
|
||||
|
||||
|
||||
@g_connect
|
||||
def lookup_role_by_name(self, role_name, notify=True):
|
||||
"""
|
||||
Find a role by name.
|
||||
|
@ -170,6 +181,7 @@ class GalaxyAPI(object):
|
|||
return data["results"][0]
|
||||
return None
|
||||
|
||||
@g_connect
|
||||
def fetch_role_related(self, related, role_id):
|
||||
"""
|
||||
Fetch the list of related items for the given role.
|
||||
|
@ -190,6 +202,7 @@ class GalaxyAPI(object):
|
|||
except:
|
||||
return None
|
||||
|
||||
@g_connect
|
||||
def get_list(self, what):
|
||||
"""
|
||||
Fetch the list of items specified.
|
||||
|
@ -213,6 +226,7 @@ class GalaxyAPI(object):
|
|||
except Exception as error:
|
||||
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
|
||||
|
||||
@g_connect
|
||||
def search_roles(self, search, **kwargs):
|
||||
|
||||
search_url = self.baseurl + '/search/roles/?'
|
||||
|
@ -228,7 +242,7 @@ class GalaxyAPI(object):
|
|||
if tags and isinstance(tags, basestring):
|
||||
tags = tags.split(',')
|
||||
search_url += '&tags_autocomplete=' + '+'.join(tags)
|
||||
|
||||
|
||||
if platforms and isinstance(platforms, basestring):
|
||||
platforms = platforms.split(',')
|
||||
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
|
||||
|
@ -238,10 +252,11 @@ class GalaxyAPI(object):
|
|||
|
||||
if author:
|
||||
search_url += '&username_autocomplete=%s' % author
|
||||
|
||||
|
||||
data = self.__call_galaxy(search_url)
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def add_secret(self, source, github_user, github_repo, secret):
|
||||
url = "%s/notification_secrets/" % self.baseurl
|
||||
args = urllib.urlencode({
|
||||
|
@ -253,16 +268,19 @@ class GalaxyAPI(object):
|
|||
data = self.__call_galaxy(url, args=args)
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def list_secrets(self):
|
||||
url = "%s/notification_secrets" % self.baseurl
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header())
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def remove_secret(self, secret_id):
|
||||
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
|
||||
return data
|
||||
|
||||
@g_connect
|
||||
def delete_role(self, github_user, github_repo):
|
||||
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
|
||||
|
|
|
@ -54,13 +54,9 @@ class GalaxyRole(object):
|
|||
|
||||
self._metadata = None
|
||||
self._install_info = None
|
||||
self._validate_certs = not galaxy.options.ignore_certs
|
||||
|
||||
self._validate_certs = not C.GALAXY_IGNORE_CERTS
|
||||
|
||||
# set validate_certs
|
||||
if galaxy.options.ignore_certs:
|
||||
self._validate_certs = False
|
||||
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
|
||||
display.debug('Validate TLS certificates: %s' % self._validate_certs)
|
||||
|
||||
self.options = galaxy.options
|
||||
self.galaxy = galaxy
|
||||
|
|
|
@ -34,11 +34,10 @@ from ansible.inventory.dir import InventoryDirectory, get_file_parser
|
|||
from ansible.inventory.group import Group
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.plugins import vars_loader
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.utils.unicode import to_unicode, to_bytes
|
||||
from ansible.utils.vars import combine_vars
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
HOSTS_PATTERNS_CACHE = {}
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -46,6 +45,8 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
HOSTS_PATTERNS_CACHE = {}
|
||||
|
||||
class Inventory(object):
|
||||
"""
|
||||
Host inventory for ansible.
|
||||
|
@ -55,9 +56,10 @@ class Inventory(object):
|
|||
|
||||
# the host file file, or script path, or list of hosts
|
||||
# if a list, inventory data will NOT be loaded
|
||||
self.host_list = host_list
|
||||
self.host_list = unfrackpath(host_list, follow=False)
|
||||
self._loader = loader
|
||||
self._variable_manager = variable_manager
|
||||
self.localhost = None
|
||||
|
||||
# caching to avoid repeated calculations, particularly with
|
||||
# external inventory scripts.
|
||||
|
@ -68,6 +70,12 @@ class Inventory(object):
|
|||
self._pattern_cache = {}
|
||||
self._vars_plugins = []
|
||||
|
||||
self._basedir = self.basedir()
|
||||
|
||||
# Contains set of filenames under group_vars directories
|
||||
self._group_vars_files = self._find_group_vars_files(self._basedir)
|
||||
self._host_vars_files = self._find_host_vars_files(self._basedir)
|
||||
|
||||
# to be set by calling set_playbook_basedir by playbook code
|
||||
self._playbook_basedir = None
|
||||
|
||||
|
@ -119,7 +127,15 @@ class Inventory(object):
|
|||
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e))
|
||||
host = h
|
||||
port = None
|
||||
all.add_host(Host(host, port))
|
||||
|
||||
new_host = Host(host, port)
|
||||
if h in C.LOCALHOST:
|
||||
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
|
||||
if self.localhost is not None:
|
||||
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
|
||||
display.vvvv("Set default localhost to %s" % h)
|
||||
self.localhost = new_host
|
||||
all.add_host(new_host)
|
||||
elif self._loader.path_exists(host_list):
|
||||
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
|
||||
if self.is_directory(host_list):
|
||||
|
@ -128,7 +144,7 @@ class Inventory(object):
|
|||
self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
|
||||
else:
|
||||
self.parser = get_file_parser(host_list, self.groups, self._loader)
|
||||
vars_loader.add_directory(self.basedir(), with_subdir=True)
|
||||
vars_loader.add_directory(self._basedir, with_subdir=True)
|
||||
|
||||
if not self.parser:
|
||||
# should never happen, but JIC
|
||||
|
@ -142,10 +158,12 @@ class Inventory(object):
|
|||
for g in self.groups:
|
||||
group = self.groups[g]
|
||||
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
|
||||
self.get_group_vars(group)
|
||||
|
||||
# set host vars from host_vars/ files and vars plugins
|
||||
for host in self.get_hosts():
|
||||
# get host vars from host_vars/ files and vars plugins
|
||||
for host in self.get_hosts(ignore_limits_and_restrictions=True):
|
||||
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
|
||||
self.get_host_vars(host)
|
||||
|
||||
def _match(self, str, pattern_str):
|
||||
try:
|
||||
|
@ -204,7 +222,7 @@ class Inventory(object):
|
|||
|
||||
# exclude hosts mentioned in any restriction (ex: failed hosts)
|
||||
if self._restriction is not None:
|
||||
hosts = [ h for h in hosts if h in self._restriction ]
|
||||
hosts = [ h for h in hosts if h.name in self._restriction ]
|
||||
|
||||
seen = set()
|
||||
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
|
||||
|
@ -439,10 +457,14 @@ class Inventory(object):
|
|||
for group in groups.values():
|
||||
if pattern == 'all':
|
||||
for host in group.get_hosts():
|
||||
if host.implicit:
|
||||
continue
|
||||
__append_host_to_results(host)
|
||||
else:
|
||||
if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
|
||||
for host in group.get_hosts():
|
||||
if host.implicit:
|
||||
continue
|
||||
__append_host_to_results(host)
|
||||
else:
|
||||
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
|
||||
|
@ -455,13 +477,24 @@ class Inventory(object):
|
|||
return results
|
||||
|
||||
def _create_implicit_localhost(self, pattern):
|
||||
new_host = Host(pattern)
|
||||
new_host.address = "127.0.0.1"
|
||||
new_host.vars = self.get_host_vars(new_host)
|
||||
new_host.set_variable("ansible_connection", "local")
|
||||
if "ansible_python_interpreter" not in new_host.vars:
|
||||
new_host.set_variable("ansible_python_interpreter", sys.executable)
|
||||
self.get_group("ungrouped").add_host(new_host)
|
||||
|
||||
if self.localhost:
|
||||
new_host = self.localhost
|
||||
else:
|
||||
new_host = Host(pattern)
|
||||
new_host.address = "127.0.0.1"
|
||||
new_host.implicit = True
|
||||
new_host.vars = self.get_host_vars(new_host)
|
||||
new_host.set_variable("ansible_connection", "local")
|
||||
if "ansible_python_interpreter" not in new_host.vars:
|
||||
py_interp = sys.executable
|
||||
if not py_interp:
|
||||
# sys.executable is not set in some cornercases. #13585
|
||||
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. You can correct this by setting ansible_python_interpreter for localhost')
|
||||
py_interp = '/usr/bin/python'
|
||||
new_host.set_variable("ansible_python_interpreter", py_interp)
|
||||
self.get_group("ungrouped").add_host(new_host)
|
||||
self.localhost = new_host
|
||||
return new_host
|
||||
|
||||
def clear_pattern_cache(self):
|
||||
|
@ -482,23 +515,31 @@ class Inventory(object):
|
|||
def get_host(self, hostname):
|
||||
if hostname not in self._hosts_cache:
|
||||
self._hosts_cache[hostname] = self._get_host(hostname)
|
||||
if hostname in C.LOCALHOST:
|
||||
for host in C.LOCALHOST.difference((hostname,)):
|
||||
self._hosts_cache[host] = self._hosts_cache[hostname]
|
||||
return self._hosts_cache[hostname]
|
||||
|
||||
def _get_host(self, hostname):
|
||||
if hostname in C.LOCALHOST:
|
||||
for host in self.get_group('all').get_hosts():
|
||||
if host.name in C.LOCALHOST:
|
||||
return host
|
||||
return self._create_implicit_localhost(hostname)
|
||||
matching_host = None
|
||||
for group in self.groups.values():
|
||||
for host in group.get_hosts():
|
||||
if hostname == host.name:
|
||||
matching_host = host
|
||||
self._hosts_cache[host.name] = host
|
||||
if hostname in C.LOCALHOST:
|
||||
if self.localhost:
|
||||
matching_host= self.localhost
|
||||
else:
|
||||
for host in self.get_group('all').get_hosts():
|
||||
if host.name in C.LOCALHOST:
|
||||
matching_host = host
|
||||
break
|
||||
if not matching_host:
|
||||
matching_host = self._create_implicit_localhost(hostname)
|
||||
# update caches
|
||||
self._hosts_cache[hostname] = matching_host
|
||||
for host in C.LOCALHOST.difference((hostname,)):
|
||||
self._hosts_cache[host] = self._hosts_cache[hostname]
|
||||
else:
|
||||
for group in self.groups.values():
|
||||
for host in group.get_hosts():
|
||||
if host not in self._hosts_cache:
|
||||
self._hosts_cache[host.name] = host
|
||||
if hostname == host.name:
|
||||
matching_host = host
|
||||
return matching_host
|
||||
|
||||
def get_group(self, groupname):
|
||||
|
@ -567,9 +608,6 @@ class Inventory(object):
|
|||
if self.parser is not None:
|
||||
vars = combine_vars(vars, self.parser.get_host_variables(host))
|
||||
|
||||
# Read host_vars/ files
|
||||
vars = combine_vars(vars, self.get_host_vars(host))
|
||||
|
||||
return vars
|
||||
|
||||
def add_group(self, group):
|
||||
|
@ -600,7 +638,7 @@ class Inventory(object):
|
|||
return
|
||||
elif not isinstance(restriction, list):
|
||||
restriction = [ restriction ]
|
||||
self._restriction = restriction
|
||||
self._restriction = [ h.name for h in restriction ]
|
||||
|
||||
def subset(self, subset_pattern):
|
||||
"""
|
||||
|
@ -680,6 +718,12 @@ class Inventory(object):
|
|||
"""
|
||||
# Only update things if dir is a different playbook basedir
|
||||
if dir_name != self._playbook_basedir:
|
||||
# we're changing the playbook basedir, so if we had set one previously
|
||||
# clear the host/group vars entries from the VariableManager so they're
|
||||
# not incorrectly used by playbooks from different directories
|
||||
if self._playbook_basedir:
|
||||
self._variable_manager.clear_playbook_hostgroup_vars_files(self._playbook_basedir)
|
||||
|
||||
self._playbook_basedir = dir_name
|
||||
# get group vars from group_vars/ files
|
||||
# TODO: excluding the new_pb_basedir directory may result in group_vars
|
||||
|
@ -687,26 +731,51 @@ class Inventory(object):
|
|||
# we do this shouldn't be too much of an issue. Still, this should
|
||||
# be fixed at some point to allow a "first load" to touch all of the
|
||||
# directories, then later runs only touch the new basedir specified
|
||||
for group in self.groups.values():
|
||||
#group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
|
||||
group.vars = combine_vars(group.vars, self.get_group_vars(group))
|
||||
# get host vars from host_vars/ files
|
||||
for host in self.get_hosts():
|
||||
#host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
|
||||
host.vars = combine_vars(host.vars, self.get_host_vars(host))
|
||||
found_group_vars = self._find_group_vars_files(self._playbook_basedir)
|
||||
if found_group_vars:
|
||||
self._group_vars_files = self._group_vars_files.union(found_group_vars)
|
||||
for group in self.groups.values():
|
||||
self.get_group_vars(group)
|
||||
|
||||
found_host_vars = self._find_host_vars_files(self._playbook_basedir)
|
||||
if found_host_vars:
|
||||
self._host_vars_files = self._host_vars_files.union(found_host_vars)
|
||||
# get host vars from host_vars/ files
|
||||
for host in self.get_hosts():
|
||||
self.get_host_vars(host)
|
||||
# invalidate cache
|
||||
self._vars_per_host = {}
|
||||
self._vars_per_group = {}
|
||||
|
||||
def get_host_vars(self, host, new_pb_basedir=False):
|
||||
def get_host_vars(self, host, new_pb_basedir=False, return_results=False):
|
||||
""" Read host_vars/ files """
|
||||
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
|
||||
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir, return_results=return_results)
|
||||
|
||||
def get_group_vars(self, group, new_pb_basedir=False):
|
||||
def get_group_vars(self, group, new_pb_basedir=False, return_results=False):
|
||||
""" Read group_vars/ files """
|
||||
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
|
||||
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir, return_results=return_results)
|
||||
|
||||
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
|
||||
def _find_group_vars_files(self, basedir):
|
||||
""" Find group_vars/ files """
|
||||
if basedir in ('', None):
|
||||
basedir = './'
|
||||
path = os.path.realpath(os.path.join(basedir, 'group_vars'))
|
||||
found_vars = set()
|
||||
if os.path.exists(path):
|
||||
found_vars = set(os.listdir(unicode(path)))
|
||||
return found_vars
|
||||
|
||||
def _find_host_vars_files(self, basedir):
|
||||
""" Find host_vars/ files """
|
||||
if basedir in ('', None):
|
||||
basedir = './'
|
||||
path = os.path.realpath(os.path.join(basedir, 'host_vars'))
|
||||
found_vars = set()
|
||||
if os.path.exists(path):
|
||||
found_vars = set(os.listdir(unicode(path)))
|
||||
return found_vars
|
||||
|
||||
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False, return_results=False):
|
||||
"""
|
||||
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
|
||||
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
|
||||
|
@ -715,14 +784,15 @@ class Inventory(object):
|
|||
|
||||
results = {}
|
||||
scan_pass = 0
|
||||
_basedir = self.basedir()
|
||||
_basedir = self._basedir
|
||||
_playbook_basedir = self._playbook_basedir
|
||||
|
||||
# look in both the inventory base directory and the playbook base directory
|
||||
# unless we do an update for a new playbook base dir
|
||||
if not new_pb_basedir:
|
||||
basedirs = [_basedir, self._playbook_basedir]
|
||||
if not new_pb_basedir and _playbook_basedir:
|
||||
basedirs = [_basedir, _playbook_basedir]
|
||||
else:
|
||||
basedirs = [self._playbook_basedir]
|
||||
basedirs = [_basedir]
|
||||
|
||||
for basedir in basedirs:
|
||||
# this can happen from particular API usages, particularly if not run
|
||||
|
@ -737,17 +807,22 @@ class Inventory(object):
|
|||
continue
|
||||
|
||||
# save work of second scan if the directories are the same
|
||||
if _basedir == self._playbook_basedir and scan_pass != 1:
|
||||
if _basedir == _playbook_basedir and scan_pass != 1:
|
||||
continue
|
||||
|
||||
if group and host is None:
|
||||
# Before trying to load vars from file, check that the directory contains relvant file names
|
||||
if host is None and any(map(lambda ext: group.name + ext in self._group_vars_files, C.YAML_FILENAME_EXTENSIONS)):
|
||||
# load vars in dir/group_vars/name_of_group
|
||||
base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name))
|
||||
results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader))
|
||||
elif host and group is None:
|
||||
base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"group_vars/" + to_bytes(group.name))), errors='strict')
|
||||
host_results = self._variable_manager.add_group_vars_file(base_path, self._loader)
|
||||
if return_results:
|
||||
results = combine_vars(results, host_results)
|
||||
elif group is None and any(map(lambda ext: host.name + ext in self._host_vars_files, C.YAML_FILENAME_EXTENSIONS)):
|
||||
# same for hostvars in dir/host_vars/name_of_host
|
||||
base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name))
|
||||
results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader))
|
||||
base_path = to_unicode(os.path.abspath(os.path.join(to_bytes(basedir), b"host_vars/" + to_bytes(host.name))), errors='strict')
|
||||
group_results = self._variable_manager.add_host_vars_file(base_path, self._loader)
|
||||
if return_results:
|
||||
results = combine_vars(results, group_results)
|
||||
|
||||
# all done, results is a dictionary of variables for this particular host.
|
||||
return results
|
||||
|
|
|
@ -140,10 +140,14 @@ class Group:
|
|||
for kk in kid_hosts:
|
||||
if kk not in seen:
|
||||
seen[kk] = 1
|
||||
if self.name == 'all' and kk.implicit:
|
||||
continue
|
||||
hosts.append(kk)
|
||||
for mine in self.hosts:
|
||||
if mine not in seen:
|
||||
seen[mine] = 1
|
||||
if self.name == 'all' and mine.implicit:
|
||||
continue
|
||||
hosts.append(mine)
|
||||
return hosts
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ class Host:
|
|||
uuid=self._uuid,
|
||||
gathered_facts=self._gathered_facts,
|
||||
groups=groups,
|
||||
implicit=self.implicit,
|
||||
)
|
||||
|
||||
def deserialize(self, data):
|
||||
|
@ -69,6 +70,7 @@ class Host:
|
|||
self.vars = data.get('vars', dict())
|
||||
self.address = data.get('address', '')
|
||||
self._uuid = data.get('uuid', uuid.uuid4())
|
||||
self.implicit= data.get('implicit', False)
|
||||
|
||||
groups = data.get('groups', [])
|
||||
for group_data in groups:
|
||||
|
@ -89,6 +91,7 @@ class Host:
|
|||
|
||||
self._gathered_facts = False
|
||||
self._uuid = uuid.uuid4()
|
||||
self.implicit = False
|
||||
|
||||
def __repr__(self):
|
||||
return self.get_name()
|
||||
|
|
|
@ -124,7 +124,7 @@ class InventoryParser(object):
|
|||
del pending_declarations[groupname]
|
||||
|
||||
continue
|
||||
elif line.startswith('['):
|
||||
elif line.startswith('[') and line.endswith(']'):
|
||||
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + \
|
||||
"in the section entry, and that there are no other invalid characters")
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ from ansible.inventory.group import Group
|
|||
from ansible.inventory.expand_hosts import detect_range
|
||||
from ansible.inventory.expand_hosts import expand_hostname_range
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.compat.six import string_types
|
||||
|
||||
class InventoryParser(object):
|
||||
"""
|
||||
|
@ -77,6 +78,11 @@ class InventoryParser(object):
|
|||
self.groups[group] = Group(name=group)
|
||||
|
||||
if isinstance(group_data, dict):
|
||||
#make sure they are dicts
|
||||
for section in ['vars', 'children', 'hosts']:
|
||||
if section in group_data and isinstance(group_data[section], string_types):
|
||||
group_data[section] = { group_data[section]: None}
|
||||
|
||||
if 'vars' in group_data:
|
||||
for var in group_data['vars']:
|
||||
if var != 'ansible_group_priority':
|
||||
|
|
|
@ -15,3 +15,6 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Note: Do not add any code to this file. module_utils may be a namespace
|
||||
# package when using Ansible-2.1+ Anything in this file may not be available
|
||||
# if one of the other packages in the namespace is loaded first.
|
||||
|
|
|
@ -24,7 +24,10 @@ import os
|
|||
import re
|
||||
import sys
|
||||
import copy
|
||||
import importlib
|
||||
import inspect
|
||||
|
||||
from packaging.version import Version
|
||||
from os.path import expanduser
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
@ -66,33 +69,38 @@ CIDR_PATTERN = re.compile("(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){
|
|||
AZURE_SUCCESS_STATE = "Succeeded"
|
||||
AZURE_FAILED_STATE = "Failed"
|
||||
|
||||
AZURE_MIN_VERSION = "2016-03-30"
|
||||
|
||||
HAS_AZURE = True
|
||||
HAS_AZURE_EXC = None
|
||||
|
||||
HAS_MSRESTAZURE = True
|
||||
HAS_MSRESTAZURE_EXC = None
|
||||
|
||||
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
|
||||
try:
|
||||
from msrest.serialization import Serializer
|
||||
except ImportError as exc:
|
||||
HAS_MSRESTAZURE_EXC = exc
|
||||
HAS_MSRESTAZURE = False
|
||||
|
||||
try:
|
||||
from enum import Enum
|
||||
from msrest.serialization import Serializer
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.compute import __version__ as azure_compute_version
|
||||
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \
|
||||
NetworkInterfaceIPConfiguration, Subnet
|
||||
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
|
||||
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
|
||||
NetworkManagementClientConfiguration
|
||||
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
|
||||
ResourceManagementClientConfiguration
|
||||
from azure.mgmt.storage.storage_management_client import StorageManagementClient,\
|
||||
StorageManagementClientConfiguration
|
||||
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
|
||||
ComputeManagementClientConfiguration
|
||||
from azure.mgmt.network.version import VERSION as network_client_version
|
||||
from azure.mgmt.storage.version import VERSION as storage_client_version
|
||||
from azure.mgmt.compute.version import VERSION as compute_client_version
|
||||
from azure.mgmt.resource.version import VERSION as resource_client_version
|
||||
from azure.mgmt.network.network_management_client import NetworkManagementClient
|
||||
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
|
||||
from azure.mgmt.storage.storage_management_client import StorageManagementClient
|
||||
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
|
||||
from azure.storage.cloudstorageaccount import CloudStorageAccount
|
||||
except ImportError, exc:
|
||||
except ImportError as exc:
|
||||
HAS_AZURE_EXC = exc
|
||||
HAS_AZURE = False
|
||||
|
||||
|
||||
def azure_id_to_dict(id):
|
||||
pieces = re.sub(r'^\/', '', id).split('/')
|
||||
result = {}
|
||||
|
@ -103,6 +111,15 @@ def azure_id_to_dict(id):
|
|||
return result
|
||||
|
||||
|
||||
AZURE_EXPECTED_VERSIONS = dict(
|
||||
storage_client_version="0.30.0rc5",
|
||||
compute_client_version="0.30.0rc5",
|
||||
network_client_version="0.30.0rc5",
|
||||
resource_client_version="0.30.0rc5"
|
||||
)
|
||||
|
||||
AZURE_MIN_RELEASE = '2.0.0rc5'
|
||||
|
||||
class AzureRMModuleBase(object):
|
||||
|
||||
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
|
||||
|
@ -133,12 +150,13 @@ class AzureRMModuleBase(object):
|
|||
supports_check_mode=supports_check_mode,
|
||||
required_if=merged_required_if)
|
||||
|
||||
if not HAS_AZURE:
|
||||
self.fail("The Azure Python SDK is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
|
||||
if not HAS_MSRESTAZURE:
|
||||
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
|
||||
"- {0}".format(HAS_MSRESTAZURE_EXC))
|
||||
|
||||
if azure_compute_version < AZURE_MIN_VERSION:
|
||||
self.fail("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
|
||||
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
if not HAS_AZURE:
|
||||
self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
|
||||
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
|
||||
|
||||
self._network_client = None
|
||||
self._storage_client = None
|
||||
|
@ -146,7 +164,7 @@ class AzureRMModuleBase(object):
|
|||
self._compute_client = None
|
||||
self.check_mode = self.module.check_mode
|
||||
self.facts_module = facts_module
|
||||
self.debug = self.module.params.get('debug')
|
||||
# self.debug = self.module.params.get('debug')
|
||||
|
||||
# authenticate
|
||||
self.credentials = self._get_credentials(self.module.params)
|
||||
|
@ -178,6 +196,13 @@ class AzureRMModuleBase(object):
|
|||
res = self.exec_module(**self.module.params)
|
||||
self.module.exit_json(**res)
|
||||
|
||||
def check_client_version(self, client_name, client_version, expected_version):
|
||||
# Ensure Azure modules are at least 2.0.0rc5.
|
||||
if Version(client_version) < Version(expected_version):
|
||||
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
|
||||
"`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version,
|
||||
AZURE_MIN_RELEASE))
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
|
||||
|
||||
|
@ -194,12 +219,12 @@ class AzureRMModuleBase(object):
|
|||
def log(self, msg, pretty_print=False):
|
||||
pass
|
||||
# Use only during module development
|
||||
# if self.debug:
|
||||
# log_file = open('azure_rm.log', 'a')
|
||||
# if pretty_print:
|
||||
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
|
||||
# else:
|
||||
# log_file.write(msg + u'\n')
|
||||
#if self.debug:
|
||||
# log_file = open('azure_rm.log', 'a')
|
||||
# if pretty_print:
|
||||
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
|
||||
# else:
|
||||
# log_file.write(msg + u'\n')
|
||||
|
||||
def validate_tags(self, tags):
|
||||
'''
|
||||
|
@ -215,52 +240,6 @@ class AzureRMModuleBase(object):
|
|||
if not isinstance(value, str):
|
||||
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
|
||||
|
||||
def _tag_purge(self, tags):
|
||||
'''
|
||||
Remove metadata tags not found in user provided tags parameter. Returns tuple
|
||||
with bool indicating something changed and dict of new tags to be assigned to
|
||||
the object.
|
||||
|
||||
:param tags: object metadata tags
|
||||
:return: bool, dict of tags
|
||||
'''
|
||||
if not self.module.params.get('tags'):
|
||||
# purge all tags
|
||||
return True, dict()
|
||||
new_tags = copy.copy(tags)
|
||||
changed = False
|
||||
for key in tags:
|
||||
if not self.module.params['tags'].get(key):
|
||||
# key not found in user provided parameters
|
||||
new_tags.pop(key)
|
||||
changed = True
|
||||
if changed:
|
||||
self.log('CHANGED: purged tags')
|
||||
return changed, new_tags
|
||||
|
||||
def _tag_update(self, tags):
|
||||
'''
|
||||
Update metadata tags with values in user provided tags parameter. Returns
|
||||
tuple with bool indicating something changed and dict of new tags to be
|
||||
assigned to the object.
|
||||
|
||||
:param tags: object metadata tags
|
||||
:return: bool, dict of tags
|
||||
'''
|
||||
if isinstance(tags, dict):
|
||||
new_tags = copy.copy(tags)
|
||||
else:
|
||||
new_tags = dict()
|
||||
changed = False
|
||||
if self.module.params.get('tags'):
|
||||
for key, value in self.module.params['tags'].items():
|
||||
if not (new_tags.get(key) and new_tags[key] == value):
|
||||
changed = True
|
||||
new_tags[key] = value
|
||||
if changed:
|
||||
self.log('CHANGED: updated tags')
|
||||
return changed, new_tags
|
||||
|
||||
def update_tags(self, tags):
|
||||
'''
|
||||
Call from the module to update metadata tags. Returns tuple
|
||||
|
@ -270,15 +249,18 @@ class AzureRMModuleBase(object):
|
|||
:param tags: metadata tags from the object
|
||||
:return: bool, dict
|
||||
'''
|
||||
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
|
||||
changed = False
|
||||
updated, new_tags = self._tag_update(tags)
|
||||
if updated:
|
||||
changed = True
|
||||
|
||||
if not self.module.params['append_tags']:
|
||||
purged, new_tags = self._tag_purge(new_tags)
|
||||
if purged:
|
||||
changed = True
|
||||
if isinstance(self.module.params.get('tags'), dict):
|
||||
for key, value in self.module.params['tags'].iteritems():
|
||||
if not new_tags.get(key) or new_tags[key] != value:
|
||||
changed = True
|
||||
new_tags[key] = value
|
||||
if isinstance(tags, dict):
|
||||
for key, value in tags.iteritems():
|
||||
if not self.module.params['tags'].get(key):
|
||||
new_tags.pop(key)
|
||||
changed = True
|
||||
return changed, new_tags
|
||||
|
||||
def has_tags(self, obj_tags, tag_list):
|
||||
|
@ -323,7 +305,7 @@ class AzureRMModuleBase(object):
|
|||
return self.rm_client.resource_groups.get(resource_group)
|
||||
except CloudError:
|
||||
self.fail("Parameter error: resource group {0} not found".format(resource_group))
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
|
||||
|
||||
def _get_profile(self, profile="default"):
|
||||
|
@ -331,7 +313,7 @@ class AzureRMModuleBase(object):
|
|||
try:
|
||||
config = ConfigParser.ConfigParser()
|
||||
config.read(path)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Failed to access {0}. Check that the file exists and you have read "
|
||||
"access. {1}".format(path, str(exc)))
|
||||
credentials = dict()
|
||||
|
@ -341,21 +323,21 @@ class AzureRMModuleBase(object):
|
|||
except:
|
||||
pass
|
||||
|
||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||
if credentials.get('subscription_id'):
|
||||
return credentials
|
||||
|
||||
return None
|
||||
|
||||
def _get_env_credentials(self):
|
||||
env_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
env_credentials[attribute] = os.environ.get(env_variable, None)
|
||||
|
||||
if env_credentials['profile'] is not None:
|
||||
if env_credentials['profile']:
|
||||
credentials = self._get_profile(env_credentials['profile'])
|
||||
return credentials
|
||||
|
||||
if env_credentials['client_id'] is not None:
|
||||
if env_credentials.get('subscription_id') is not None:
|
||||
return env_credentials
|
||||
|
||||
return None
|
||||
|
@ -367,7 +349,7 @@ class AzureRMModuleBase(object):
|
|||
self.log('Getting credentials')
|
||||
|
||||
arg_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
|
||||
arg_credentials[attribute] = params.get(attribute, None)
|
||||
|
||||
# try module params
|
||||
|
@ -376,7 +358,7 @@ class AzureRMModuleBase(object):
|
|||
credentials = self._get_profile(arg_credentials['profile'])
|
||||
return credentials
|
||||
|
||||
if arg_credentials['client_id'] is not None:
|
||||
if arg_credentials['subscription_id']:
|
||||
self.log('Received credentials from parameters.')
|
||||
return arg_credentials
|
||||
|
||||
|
@ -394,18 +376,27 @@ class AzureRMModuleBase(object):
|
|||
|
||||
return None
|
||||
|
||||
def serialize_obj(self, obj, class_name):
|
||||
def serialize_obj(self, obj, class_name, enum_modules=[]):
|
||||
'''
|
||||
Return a JSON representation of an Azure object.
|
||||
|
||||
:param obj: Azure object
|
||||
:param class_name: Name of the object's class
|
||||
:param enum_modules: List of module names to build enum dependencies from.
|
||||
:return: serialized result
|
||||
'''
|
||||
serializer = Serializer()
|
||||
dependencies = dict()
|
||||
if enum_modules:
|
||||
for module_name in enum_modules:
|
||||
mod = importlib.import_module(module_name)
|
||||
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
|
||||
dependencies[mod_class_name] = mod_class_obj
|
||||
self.log("dependencies: ");
|
||||
self.log(str(dependencies))
|
||||
serializer = Serializer(classes=dependencies)
|
||||
return serializer.body(obj, class_name)
|
||||
|
||||
def get_poller_result(self, poller, wait=20):
|
||||
def get_poller_result(self, poller, wait=5):
|
||||
'''
|
||||
Consistent method of waiting on and retrieving results from Azure's long poller
|
||||
|
||||
|
@ -418,7 +409,7 @@ class AzureRMModuleBase(object):
|
|||
self.log("Waiting for {0} sec".format(delay))
|
||||
poller.wait(timeout=delay)
|
||||
return poller.result()
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.log(str(exc))
|
||||
raise
|
||||
|
||||
|
@ -463,15 +454,13 @@ class AzureRMModuleBase(object):
|
|||
# Get keys from the storage account
|
||||
self.log('Getting keys')
|
||||
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
|
||||
keys['key1'] = account_keys.key1
|
||||
keys['key2'] = account_keys.key2
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
|
||||
|
||||
try:
|
||||
self.log('Create blob service')
|
||||
return CloudStorageAccount(storage_account_name, keys['key1']).create_block_blob_service()
|
||||
except Exception, exc:
|
||||
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
|
||||
except Exception as exc:
|
||||
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
|
||||
str(exc)))
|
||||
|
||||
|
@ -508,7 +497,7 @@ class AzureRMModuleBase(object):
|
|||
self.log('Creating default public IP {0}'.format(public_ip_name))
|
||||
try:
|
||||
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
|
||||
|
||||
return self.get_poller_result(poller)
|
||||
|
@ -578,7 +567,7 @@ class AzureRMModuleBase(object):
|
|||
poller = self.network_client.network_security_groups.create_or_update(resource_group,
|
||||
security_group_name,
|
||||
parameters)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
|
||||
|
||||
return self.get_poller_result(poller)
|
||||
|
@ -589,16 +578,15 @@ class AzureRMModuleBase(object):
|
|||
# time we attempt to use the requested client.
|
||||
resource_client = self.rm_client
|
||||
resource_client.providers.register(key)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
|
||||
@property
|
||||
def storage_client(self):
|
||||
self.log('Getting storage client...')
|
||||
if not self._storage_client:
|
||||
config = StorageManagementClientConfiguration(self.azure_credentials, self.subscription_id)
|
||||
config.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
self._storage_client = StorageManagementClient(config)
|
||||
self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
|
||||
self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Storage')
|
||||
return self._storage_client
|
||||
|
||||
|
@ -606,9 +594,8 @@ class AzureRMModuleBase(object):
|
|||
def network_client(self):
|
||||
self.log('Getting network client')
|
||||
if not self._network_client:
|
||||
config = NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id)
|
||||
config.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
self._network_client = NetworkManagementClient(config)
|
||||
self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
|
||||
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Network')
|
||||
return self._network_client
|
||||
|
||||
|
@ -616,17 +603,15 @@ class AzureRMModuleBase(object):
|
|||
def rm_client(self):
|
||||
self.log('Getting resource manager client')
|
||||
if not self._resource_client:
|
||||
config = ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id)
|
||||
config.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
self._resource_client = ResourceManagementClient(config)
|
||||
self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
|
||||
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
|
||||
return self._resource_client
|
||||
|
||||
@property
|
||||
def compute_client(self):
|
||||
self.log('Getting compute client')
|
||||
if not self._compute_client:
|
||||
config = ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id)
|
||||
config.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
self._compute_client = ComputeManagementClient(config)
|
||||
self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
|
||||
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._register('Microsoft.Compute')
|
||||
return self._compute_client
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
|
||||
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
|
||||
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 'True', 1, True]
|
||||
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 'False', 0, False]
|
||||
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
|
||||
|
||||
# ansible modules can be written in any language. To simplify
|
||||
|
@ -136,10 +136,10 @@ except ImportError:
|
|||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
print('{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
|
||||
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
|
||||
sys.exit(1)
|
||||
except SyntaxError:
|
||||
print('{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
|
||||
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
|
||||
sys.exit(1)
|
||||
|
||||
HAVE_SELINUX=False
|
||||
|
@ -178,6 +178,8 @@ except ImportError:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
|
||||
|
||||
try:
|
||||
from ast import literal_eval
|
||||
except ImportError:
|
||||
|
@ -219,14 +221,10 @@ except ImportError:
|
|||
|
||||
_literal_eval = literal_eval
|
||||
|
||||
from ansible import __version__
|
||||
# Backwards compat. New code should just import and use __version__
|
||||
ANSIBLE_VERSION = __version__
|
||||
|
||||
# Internal global holding passed in params and constants. This is consulted
|
||||
# in case multiple AnsibleModules are created. Otherwise each AnsibleModule
|
||||
# would attempt to read from stdin. Other code should not use this directly
|
||||
# as it is an internal implementation detail
|
||||
# Internal global holding passed in params. This is consulted in case
|
||||
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
|
||||
# attempt to read from stdin. Other code should not use this directly as it
|
||||
# is an internal implementation detail
|
||||
_ANSIBLE_ARGS = None
|
||||
|
||||
FILE_COMMON_ARGUMENTS=dict(
|
||||
|
@ -524,9 +522,59 @@ def is_executable(path):
|
|||
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
|
||||
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
|
||||
|
||||
def _load_params():
|
||||
''' read the modules parameters and store them globally.
|
||||
|
||||
class AnsibleFallbackNotFound(Exception):
|
||||
pass
|
||||
This function may be needed for certain very dynamic custom modules which
|
||||
want to process the parameters that are being handed the module. Since
|
||||
this is so closely tied to the implementation of modules we cannot
|
||||
guarantee API stability for it (it may change between versions) however we
|
||||
will try not to break it gratuitously. It is certainly more future-proof
|
||||
to call this function and consume its outputs than to implement the logic
|
||||
inside it as a copy in your own code.
|
||||
'''
|
||||
global _ANSIBLE_ARGS
|
||||
if _ANSIBLE_ARGS is not None:
|
||||
buffer = _ANSIBLE_ARGS
|
||||
else:
|
||||
# debug overrides to read args from file or cmdline
|
||||
|
||||
# Avoid tracebacks when locale is non-utf8
|
||||
# We control the args and we pass them as utf8
|
||||
if len(sys.argv) > 1:
|
||||
if os.path.isfile(sys.argv[1]):
|
||||
fd = open(sys.argv[1], 'rb')
|
||||
buffer = fd.read()
|
||||
fd.close()
|
||||
else:
|
||||
buffer = sys.argv[1]
|
||||
if sys.version_info >= (3,):
|
||||
buffer = buffer.encode('utf-8', errors='surrogateescape')
|
||||
# default case, read from stdin
|
||||
else:
|
||||
if sys.version_info < (3,):
|
||||
buffer = sys.stdin.read()
|
||||
else:
|
||||
buffer = sys.stdin.buffer.read()
|
||||
_ANSIBLE_ARGS = buffer
|
||||
|
||||
try:
|
||||
params = json.loads(buffer.decode('utf-8'))
|
||||
except ValueError:
|
||||
# This helper used too early for fail_json to work.
|
||||
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
|
||||
sys.exit(1)
|
||||
|
||||
if sys.version_info < (3,):
|
||||
params = json_dict_unicode_to_bytes(params)
|
||||
|
||||
try:
|
||||
return params['ANSIBLE_MODULE_ARGS']
|
||||
except KeyError:
|
||||
# This helper does not have access to fail_json so we have to print
|
||||
# json output on our own.
|
||||
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
|
||||
sys.exit(1)
|
||||
|
||||
def env_fallback(*args, **kwargs):
|
||||
''' Load value from environment '''
|
||||
|
@ -536,6 +584,23 @@ def env_fallback(*args, **kwargs):
|
|||
else:
|
||||
raise AnsibleFallbackNotFound
|
||||
|
||||
def _lenient_lowercase(lst):
|
||||
"""Lowercase elements of a list.
|
||||
|
||||
If an element is not a string, pass it through untouched.
|
||||
"""
|
||||
lowered = []
|
||||
for value in lst:
|
||||
try:
|
||||
lowered.append(value.lower())
|
||||
except AttributeError:
|
||||
lowered.append(value)
|
||||
return lowered
|
||||
|
||||
|
||||
class AnsibleFallbackNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleModule(object):
|
||||
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
|
||||
|
@ -562,7 +627,7 @@ class AnsibleModule(object):
|
|||
self.run_command_environ_update = {}
|
||||
|
||||
self.aliases = {}
|
||||
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity']
|
||||
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_version', '_ansible_syslog_facility']
|
||||
|
||||
if add_file_common_args:
|
||||
for k, v in FILE_COMMON_ARGUMENTS.items():
|
||||
|
@ -578,7 +643,7 @@ class AnsibleModule(object):
|
|||
except Exception:
|
||||
e = get_exception()
|
||||
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
|
||||
print('{"failed": true, "msg": "Module alias error: %s"}' % str(e))
|
||||
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
|
||||
sys.exit(1)
|
||||
|
||||
# Save parameter values that should never be logged
|
||||
|
@ -613,6 +678,7 @@ class AnsibleModule(object):
|
|||
'float': self._check_type_float,
|
||||
'path': self._check_type_path,
|
||||
'raw': self._check_type_raw,
|
||||
'jsonarg': self._check_type_jsonarg,
|
||||
}
|
||||
if not bypass_checks:
|
||||
self._check_required_arguments()
|
||||
|
@ -764,7 +830,7 @@ class AnsibleModule(object):
|
|||
return (uid, gid)
|
||||
|
||||
def find_mount_point(self, path):
|
||||
path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
|
||||
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
|
||||
while not os.path.ismount(path):
|
||||
path = os.path.dirname(path)
|
||||
return path
|
||||
|
@ -785,7 +851,7 @@ class AnsibleModule(object):
|
|||
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
|
||||
|
||||
if path_mount_point == mount_point:
|
||||
for fs in self.constants['SELINUX_SPECIAL_FS']:
|
||||
for fs in self._selinux_special_fs:
|
||||
if fs in fstype:
|
||||
special_context = self.selinux_context(path_mount_point)
|
||||
return (True, special_context)
|
||||
|
@ -1178,7 +1244,8 @@ class AnsibleModule(object):
|
|||
return aliases_results
|
||||
|
||||
def _check_arguments(self, check_invalid_arguments):
|
||||
for (k,v) in self.params.items():
|
||||
self._syslog_facility = 'LOG_USER'
|
||||
for (k,v) in list(self.params.items()):
|
||||
|
||||
if k == '_ansible_check_mode' and v:
|
||||
if not self.supports_check_mode:
|
||||
|
@ -1197,6 +1264,15 @@ class AnsibleModule(object):
|
|||
elif k == '_ansible_verbosity':
|
||||
self._verbosity = v
|
||||
|
||||
elif k == '_ansible_selinux_special_fs':
|
||||
self._selinux_special_fs = v
|
||||
|
||||
elif k == '_ansible_syslog_facility':
|
||||
self._syslog_facility = v
|
||||
|
||||
elif k == '_ansible_version':
|
||||
self.ansible_version = v
|
||||
|
||||
elif check_invalid_arguments and k not in self._legal_inputs:
|
||||
self.fail_json(msg="unsupported parameter for module: %s" % k)
|
||||
|
||||
|
@ -1270,9 +1346,28 @@ class AnsibleModule(object):
|
|||
if type(choices) == list:
|
||||
if k in self.params:
|
||||
if self.params[k] not in choices:
|
||||
choices_str=",".join([str(c) for c in choices])
|
||||
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
|
||||
self.fail_json(msg=msg)
|
||||
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking the value. If we can't figure this out, module author is responsible.
|
||||
lowered_choices = None
|
||||
if self.params[k] == 'False':
|
||||
lowered_choices = _lenient_lowercase(choices)
|
||||
FALSEY = frozenset(BOOLEANS_FALSE)
|
||||
overlap = FALSEY.intersection(choices)
|
||||
if len(overlap) == 1:
|
||||
# Extract from a set
|
||||
(self.params[k],) = overlap
|
||||
|
||||
if self.params[k] == 'True':
|
||||
if lowered_choices is None:
|
||||
lowered_choices = _lenient_lowercase(choices)
|
||||
TRUTHY = frozenset(BOOLEANS_TRUE)
|
||||
overlap = TRUTHY.intersection(choices)
|
||||
if len(overlap) == 1:
|
||||
(self.params[k],) = overlap
|
||||
|
||||
if self.params[k] not in choices:
|
||||
choices_str=",".join([str(c) for c in choices])
|
||||
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
|
||||
self.fail_json(msg=msg)
|
||||
else:
|
||||
self.fail_json(msg="internal error: do not know how to interpret argument_spec")
|
||||
|
||||
|
@ -1390,7 +1485,7 @@ class AnsibleModule(object):
|
|||
if isinstance(value, float):
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
if isinstance(value, (bytes, unicode, int)):
|
||||
return float(value)
|
||||
|
||||
raise TypeError('%s cannot be converted to a float' % type(value))
|
||||
|
@ -1399,6 +1494,16 @@ class AnsibleModule(object):
|
|||
value = self._check_type_str(value)
|
||||
return os.path.expanduser(os.path.expandvars(value))
|
||||
|
||||
def _check_type_jsonarg(self, value):
|
||||
# Return a jsonified string. Sometimes the controller turns a json
|
||||
# string into a dict/list so transform it back into json here
|
||||
if isinstance(value, (unicode, bytes)):
|
||||
return value.strip()
|
||||
else:
|
||||
if isinstance(value (list, tuple, dict)):
|
||||
return json.dumps(value)
|
||||
raise TypeError('%s cannot be converted to a json string' % type(value))
|
||||
|
||||
def _check_type_raw(self, value):
|
||||
return value
|
||||
|
||||
|
@ -1460,54 +1565,18 @@ class AnsibleModule(object):
|
|||
continue
|
||||
|
||||
def _load_params(self):
|
||||
''' read the input and set the params attribute. Sets the constants as well.'''
|
||||
''' read the input and set the params attribute.
|
||||
|
||||
This method is for backwards compatibility. The guts of the function
|
||||
were moved out in 2.1 so that custom modules could read the parameters.
|
||||
'''
|
||||
# debug overrides to read args from file or cmdline
|
||||
|
||||
global _ANSIBLE_ARGS
|
||||
if _ANSIBLE_ARGS is not None:
|
||||
buffer = _ANSIBLE_ARGS
|
||||
else:
|
||||
# Avoid tracebacks when locale is non-utf8
|
||||
# We control the args and we pass them as utf8
|
||||
if len(sys.argv) > 1:
|
||||
if os.path.isfile(sys.argv[1]):
|
||||
fd = open(sys.argv[1], 'rb')
|
||||
buffer = fd.read()
|
||||
fd.close()
|
||||
else:
|
||||
buffer = sys.argv[1]
|
||||
if sys.version_info >= (3,):
|
||||
buffer = buffer.encode('utf-8', errors='surrogateescape')
|
||||
# default case, read from stdin
|
||||
else:
|
||||
if sys.version_info < (3,):
|
||||
buffer = sys.stdin.read()
|
||||
else:
|
||||
buffer = sys.stdin.buffer.read()
|
||||
_ANSIBLE_ARGS = buffer
|
||||
|
||||
try:
|
||||
params = json.loads(buffer.decode('utf-8'))
|
||||
except ValueError:
|
||||
# This helper used too early for fail_json to work.
|
||||
print('{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
|
||||
sys.exit(1)
|
||||
|
||||
if sys.version_info < (3,):
|
||||
params = json_dict_unicode_to_bytes(params)
|
||||
|
||||
try:
|
||||
self.params = params['ANSIBLE_MODULE_ARGS']
|
||||
self.constants = params['ANSIBLE_MODULE_CONSTANTS']
|
||||
except KeyError:
|
||||
# This helper used too early for fail_json to work.
|
||||
print('{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS and ANSIBLE_MODULE_CONSTANTS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
|
||||
sys.exit(1)
|
||||
self.params = _load_params()
|
||||
|
||||
def _log_to_syslog(self, msg):
|
||||
if HAS_SYSLOG:
|
||||
module = 'ansible-%s' % os.path.basename(__file__)
|
||||
facility = getattr(syslog, self.constants.get('SYSLOG_FACILITY', 'LOG_USER'), syslog.LOG_USER)
|
||||
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
|
||||
syslog.openlog(str(module), 0, facility)
|
||||
syslog.syslog(syslog.LOG_INFO, msg)
|
||||
|
||||
|
@ -1560,16 +1629,17 @@ class AnsibleModule(object):
|
|||
# TODO: generalize a separate log function and make log_invocation use it
|
||||
# Sanitize possible password argument when logging.
|
||||
log_args = dict()
|
||||
passwd_keys = ['password', 'login_password']
|
||||
|
||||
for param in self.params:
|
||||
canon = self.aliases.get(param, param)
|
||||
arg_opts = self.argument_spec.get(canon, {})
|
||||
no_log = arg_opts.get('no_log', False)
|
||||
arg_type = arg_opts.get('type', 'str')
|
||||
|
||||
if self.boolean(no_log):
|
||||
log_args[param] = 'NOT_LOGGING_PARAMETER'
|
||||
elif param in passwd_keys:
|
||||
# try to capture all passwords/passphrase named fields
|
||||
elif arg_type != 'bool' and PASSWORD_MATCH.search(param):
|
||||
log_args[param] = 'NOT_LOGGING_PASSWORD'
|
||||
else:
|
||||
param_val = self.params[param]
|
||||
|
@ -1693,7 +1763,7 @@ class AnsibleModule(object):
|
|||
kwargs['invocation'] = {'module_args': self.params}
|
||||
kwargs = remove_values(kwargs, self.no_log_values)
|
||||
self.do_cleanup_files()
|
||||
print(self.jsonify(kwargs))
|
||||
print('\n%s' % self.jsonify(kwargs))
|
||||
sys.exit(0)
|
||||
|
||||
def fail_json(self, **kwargs):
|
||||
|
@ -1705,7 +1775,7 @@ class AnsibleModule(object):
|
|||
kwargs['invocation'] = {'module_args': self.params}
|
||||
kwargs = remove_values(kwargs, self.no_log_values)
|
||||
self.do_cleanup_files()
|
||||
print(self.jsonify(kwargs))
|
||||
print('\n%s' % self.jsonify(kwargs))
|
||||
sys.exit(1)
|
||||
|
||||
def fail_on_missing_params(self, required_params=None):
|
||||
|
@ -1739,7 +1809,7 @@ class AnsibleModule(object):
|
|||
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
|
||||
|
||||
blocksize = 64 * 1024
|
||||
infile = open(filename, 'rb')
|
||||
infile = open(os.path.realpath(filename), 'rb')
|
||||
block = infile.read(blocksize)
|
||||
while block:
|
||||
digest_method.update(block)
|
||||
|
@ -1975,6 +2045,21 @@ class AnsibleModule(object):
|
|||
old_env_vals['PATH'] = os.environ['PATH']
|
||||
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
|
||||
|
||||
# If using test-module and explode, the remote lib path will resemble ...
|
||||
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
|
||||
# If using ansible or ansible-playbook with a remote system ...
|
||||
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
|
||||
|
||||
# Clean out python paths set by ziploader
|
||||
if 'PYTHONPATH' in os.environ:
|
||||
pypaths = os.environ['PYTHONPATH'].split(':')
|
||||
pypaths = [x for x in pypaths \
|
||||
if not x.endswith('/ansible_modlib.zip') \
|
||||
and not x.endswith('/debug_dir')]
|
||||
os.environ['PYTHONPATH'] = ':'.join(pypaths)
|
||||
if not os.environ['PYTHONPATH']:
|
||||
del os.environ['PYTHONPATH']
|
||||
|
||||
# create a printable version of the command for use
|
||||
# in reporting later, which strips out things like
|
||||
# passwords from the args list
|
||||
|
@ -2016,7 +2101,6 @@ class AnsibleModule(object):
|
|||
stdin=st_in,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=os.environ,
|
||||
)
|
||||
|
||||
if cwd and os.path.isdir(cwd):
|
||||
|
|
|
@ -93,6 +93,9 @@ class AnsibleCloudStack(object):
|
|||
# these keys will be compared case sensitive in self.has_changed()
|
||||
self.case_sensitive_keys = [
|
||||
'id',
|
||||
'displaytext',
|
||||
'displayname',
|
||||
'description',
|
||||
]
|
||||
|
||||
self.module = module
|
||||
|
@ -154,12 +157,27 @@ class AnsibleCloudStack(object):
|
|||
continue
|
||||
|
||||
if key in current_dict:
|
||||
if self.case_sensitive_keys and key in self.case_sensitive_keys:
|
||||
if str(value) != str(current_dict[key]):
|
||||
if isinstance(value, (int, float, long, complex)):
|
||||
# ensure we compare the same type
|
||||
if isinstance(value, int):
|
||||
current_dict[key] = int(current_dict[key])
|
||||
elif isinstance(value, float):
|
||||
current_dict[key] = float(current_dict[key])
|
||||
elif isinstance(value, long):
|
||||
current_dict[key] = long(current_dict[key])
|
||||
elif isinstance(value, complex):
|
||||
current_dict[key] = complex(current_dict[key])
|
||||
|
||||
if value != current_dict[key]:
|
||||
return True
|
||||
else:
|
||||
if self.case_sensitive_keys and key in self.case_sensitive_keys:
|
||||
if value != current_dict[key].encode('utf-8'):
|
||||
return True
|
||||
|
||||
# Test for diff in case insensitive way
|
||||
elif value.lower() != current_dict[key].encode('utf-8').lower():
|
||||
return True
|
||||
# Test for diff in case insensitive way
|
||||
elif str(value).lower() != str(current_dict[key]).lower():
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
@ -245,6 +263,9 @@ class AnsibleCloudStack(object):
|
|||
zone = self.module.params.get('zone')
|
||||
zones = self.cs.listZones()
|
||||
|
||||
if not zones:
|
||||
self.module.fail_json(msg="No zones available. Please create a zone first")
|
||||
|
||||
# use the first zone if no zone param given
|
||||
if not zone:
|
||||
self.zone = zones['zone'][0]
|
||||
|
|
|
@ -22,6 +22,7 @@ import json
|
|||
import sys
|
||||
import copy
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from urlparse import urlparse
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
@ -37,7 +38,7 @@ try:
|
|||
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
|
||||
from docker.utils.types import Ulimit, LogConfig
|
||||
from docker import auth
|
||||
except ImportError, exc:
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc)
|
||||
HAS_DOCKER_PY = False
|
||||
|
||||
|
@ -151,7 +152,7 @@ class AnsibleDockerClient(Client):
|
|||
if not HAS_DOCKER_PY:
|
||||
self.fail("Failed to import docker-py - %s. Try `pip install docker-py`" % HAS_DOCKER_ERROR)
|
||||
|
||||
if docker_version < MIN_DOCKER_VERSION:
|
||||
if LooseVersion(docker_version) < LooseVersion(MIN_DOCKER_VERSION):
|
||||
self.fail("Error: docker-py version is %s. Minimum version required is %s." % (docker_version,
|
||||
MIN_DOCKER_VERSION))
|
||||
|
||||
|
@ -161,9 +162,9 @@ class AnsibleDockerClient(Client):
|
|||
|
||||
try:
|
||||
super(AnsibleDockerClient, self).__init__(**self._connect_params)
|
||||
except APIError, exc:
|
||||
except APIError as exc:
|
||||
self.fail("Docker API error: %s" % exc)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error connecting: %s" % exc)
|
||||
|
||||
def log(self, msg, pretty_print=False):
|
||||
|
@ -233,7 +234,7 @@ class AnsibleDockerClient(Client):
|
|||
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
|
||||
'DOCKER_TLS_HOSTNAME', 'localhost'),
|
||||
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
|
||||
DEFAULT_DOCKER_API_VERSION),
|
||||
'auto'),
|
||||
cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
|
||||
cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
|
||||
key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
|
||||
|
@ -262,7 +263,7 @@ class AnsibleDockerClient(Client):
|
|||
try:
|
||||
tls_config = TLSConfig(**kwargs)
|
||||
return tls_config
|
||||
except TLSParameterError, exc:
|
||||
except TLSParameterError as exc:
|
||||
self.fail("TLS config error: %s" % exc)
|
||||
|
||||
def _get_connect_params(self):
|
||||
|
@ -372,9 +373,9 @@ class AnsibleDockerClient(Client):
|
|||
if container['Id'] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError, exc:
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving container list: %s" % exc)
|
||||
|
||||
if result is not None:
|
||||
|
@ -382,7 +383,7 @@ class AnsibleDockerClient(Client):
|
|||
self.log("Inspecting container Id %s" % result['Id'])
|
||||
result = self.inspect_container(container=result['Id'])
|
||||
self.log("Completed container inspection")
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting container: %s" % exc)
|
||||
|
||||
return result
|
||||
|
@ -411,7 +412,7 @@ class AnsibleDockerClient(Client):
|
|||
if len(images) == 1:
|
||||
try:
|
||||
inspection = self.inspect_image(images[0]['Id'])
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
|
||||
return inspection
|
||||
|
||||
|
@ -431,9 +432,10 @@ class AnsibleDockerClient(Client):
|
|||
images = response
|
||||
if tag:
|
||||
lookup = "%s:%s" % (name, tag)
|
||||
images = []
|
||||
for image in response:
|
||||
self.log(image, pretty_print=True)
|
||||
if image.get('RepoTags') and lookup in image.get('RepoTags'):
|
||||
tags = image.get('RepoTags')
|
||||
if tags and lookup in tags:
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
@ -444,8 +446,7 @@ class AnsibleDockerClient(Client):
|
|||
'''
|
||||
self.log("Pulling image %s:%s" % (name, tag))
|
||||
try:
|
||||
for line in self.pull(name, tag=tag, stream=True):
|
||||
line = json.loads(line)
|
||||
for line in self.pull(name, tag=tag, stream=True, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
|
@ -455,7 +456,7 @@ class AnsibleDockerClient(Client):
|
|||
error_detail.get('message')))
|
||||
else:
|
||||
self.fail("Error pulling %s - %s" % (name, line.get('error')))
|
||||
except Exception, exc:
|
||||
except Exception as exc:
|
||||
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
|
||||
|
||||
return self.find_image(name, tag)
|
||||
|
|
|
@ -1,20 +1,29 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
|
|
|
@ -31,7 +31,13 @@ import struct
|
|||
import datetime
|
||||
import getpass
|
||||
import pwd
|
||||
import ConfigParser
|
||||
|
||||
try:
|
||||
# python2
|
||||
import ConfigParser as configparser
|
||||
except ImportError:
|
||||
# python3
|
||||
import configparser
|
||||
from ansible.module_utils.basic import get_all_subclasses
|
||||
|
||||
# py2 vs py3; replace with six via ziploader
|
||||
|
@ -40,7 +46,12 @@ try:
|
|||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
from string import maketrans
|
||||
try:
|
||||
# python2
|
||||
from string import maketrans
|
||||
except ImportError:
|
||||
# python3
|
||||
maketrans = str.maketrans
|
||||
|
||||
try:
|
||||
import selinux
|
||||
|
@ -147,6 +158,7 @@ class Facts(object):
|
|||
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
|
||||
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
|
||||
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
|
||||
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
|
||||
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
|
||||
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
|
||||
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
|
||||
|
@ -260,10 +272,10 @@ class Facts(object):
|
|||
fact = json.loads(out)
|
||||
except ValueError:
|
||||
# load raw ini
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp = configparser.ConfigParser()
|
||||
try:
|
||||
cp.readfp(StringIO(out))
|
||||
except ConfigParser.Error:
|
||||
except configparser.Error:
|
||||
fact = "error loading fact - please check content"
|
||||
else:
|
||||
fact = {}
|
||||
|
@ -1127,6 +1139,8 @@ class LinuxHardware(Hardware):
|
|||
mtab = get_file_content('/etc/mtab', '')
|
||||
for line in mtab.split('\n'):
|
||||
fields = line.rstrip('\n').split()
|
||||
if len(fields) < 4:
|
||||
continue
|
||||
if fields[0].startswith('/') or ':/' in fields[0]:
|
||||
if(fields[2] != 'none'):
|
||||
size_total, size_available = self._get_mount_size_facts(fields[1])
|
||||
|
@ -1176,7 +1190,8 @@ class LinuxHardware(Hardware):
|
|||
sysfs_no_links = 0
|
||||
try:
|
||||
path = os.readlink(os.path.join("/sys/block/", block))
|
||||
except OSError, e:
|
||||
except OSError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.errno == errno.EINVAL:
|
||||
path = block
|
||||
sysfs_no_links = 1
|
||||
|
|
|
@ -27,18 +27,29 @@
|
|||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
import libcloud
|
||||
from libcloud.compute.providers import get_driver
|
||||
HAS_LIBCLOUD_BASE = True
|
||||
except ImportError:
|
||||
HAS_LIBCLOUD_BASE = False
|
||||
|
||||
USER_AGENT_PRODUCT="Ansible-gce"
|
||||
USER_AGENT_VERSION="v1"
|
||||
|
||||
def gce_connect(module, provider=None):
|
||||
"""Return a Google Cloud Engine connection."""
|
||||
if not HAS_LIBCLOUD_BASE:
|
||||
module.fail_json(msg='libcloud must be installed to use this module')
|
||||
|
||||
service_account_email = module.params.get('service_account_email', None)
|
||||
credentials_file = module.params.get('credentials_file', None)
|
||||
pem_file = module.params.get('pem_file', None)
|
||||
project_id = module.params.get('project_id', None)
|
||||
|
||||
|
@ -50,6 +61,8 @@ def gce_connect(module, provider=None):
|
|||
project_id = os.environ.get('GCE_PROJECT', None)
|
||||
if not pem_file:
|
||||
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
|
||||
if not credentials_file:
|
||||
credentials_file = os.environ.get('GCE_CREDENTIALS_FILE_PATH', pem_file)
|
||||
|
||||
# If we still don't have one or more of our credentials, attempt to
|
||||
# get the remaining values from the libcloud secrets file.
|
||||
|
@ -62,25 +75,41 @@ def gce_connect(module, provider=None):
|
|||
if hasattr(secrets, 'GCE_PARAMS'):
|
||||
if not service_account_email:
|
||||
service_account_email = secrets.GCE_PARAMS[0]
|
||||
if not pem_file:
|
||||
pem_file = secrets.GCE_PARAMS[1]
|
||||
if not credentials_file:
|
||||
credentials_file = secrets.GCE_PARAMS[1]
|
||||
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
if not project_id:
|
||||
project_id = keyword_params.get('project', None)
|
||||
|
||||
# If we *still* don't have the credentials we need, then it's time to
|
||||
# just fail out.
|
||||
if service_account_email is None or pem_file is None or project_id is None:
|
||||
if service_account_email is None or credentials_file is None or project_id is None:
|
||||
module.fail_json(msg='Missing GCE connection parameters in libcloud '
|
||||
'secrets file.')
|
||||
return None
|
||||
else:
|
||||
# We have credentials but lets make sure that if they are JSON we have the minimum
|
||||
# libcloud requirement met
|
||||
try:
|
||||
# Try to read credentials as JSON
|
||||
with open(credentials_file) as credentials:
|
||||
json.loads(credentials.read())
|
||||
# If the credentials are proper JSON and we do not have the minimum
|
||||
# required libcloud version, bail out and return a descriptive error
|
||||
if LooseVersion(libcloud.__version__) < '0.17.0':
|
||||
module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
|
||||
'Upgrade to libcloud>=0.17.0.')
|
||||
return None
|
||||
except ValueError, e:
|
||||
# Not JSON
|
||||
pass
|
||||
|
||||
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
|
||||
if provider is None:
|
||||
provider = Provider.GCE
|
||||
|
||||
try:
|
||||
gce = get_driver(provider)(service_account_email, pem_file,
|
||||
gce = get_driver(provider)(service_account_email, credentials_file,
|
||||
datacenter=module.params.get('zone', None),
|
||||
project=project_id)
|
||||
gce.connection.user_agent_append("%s/%s" % (
|
||||
|
|
|
@ -77,9 +77,14 @@ class Cli(object):
|
|||
key_filename = self.module.params['ssh_keyfile']
|
||||
timeout = self.module.params['timeout']
|
||||
|
||||
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
|
||||
|
||||
try:
|
||||
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE, errors_re=CLI_ERRORS_RE)
|
||||
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename, timeout=timeout)
|
||||
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE,
|
||||
errors_re=CLI_ERRORS_RE)
|
||||
self.shell.open(host, port=port, username=username,
|
||||
password=password, key_filename=key_filename,
|
||||
allow_agent=allow_agent, timeout=timeout)
|
||||
except ShellError:
|
||||
e = get_exception()
|
||||
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))
|
||||
|
|
|
@ -1,20 +1,29 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
|
|
|
@ -93,9 +93,12 @@ class Cli(object):
|
|||
password = self.module.params['password']
|
||||
key_filename = self.module.params['ssh_keyfile']
|
||||
|
||||
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
|
||||
|
||||
try:
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename)
|
||||
self.shell.open(host, port=port, username=username, password=password,
|
||||
key_filename=key_filename, allow_agent=allow_agent)
|
||||
except ShellError:
|
||||
e = get_exception()
|
||||
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))
|
||||
|
@ -152,9 +155,10 @@ class Netconf(object):
|
|||
|
||||
user = self.module.params['username']
|
||||
passwd = self.module.params['password']
|
||||
key_filename = self.module.params['ssh_keyfile']
|
||||
|
||||
self.device = Device(host, user=user, passwd=passwd, port=port,
|
||||
gather_facts=False).open()
|
||||
gather_facts=False, ssh_private_key_file=key_filename).open()
|
||||
|
||||
self.config = Config(self.device)
|
||||
|
||||
|
@ -350,6 +354,8 @@ def get_module(**kwargs):
|
|||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
elif module.params['transport'] == 'netconf' and not HAS_PYEZ:
|
||||
module.fail_json(msg='junos-eznc >= 1.2.2 is required but does not appear to be installed')
|
||||
elif module.params['transport'] == 'netconf' and not HAS_JXMLEASE:
|
||||
module.fail_json(msg='jxmlease is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
return module
|
||||
|
|
|
@ -1,24 +1,32 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
import collections
|
||||
import itertools
|
||||
import shlex
|
||||
|
||||
|
@ -109,10 +117,9 @@ class NetworkConfig(object):
|
|||
return self._config
|
||||
|
||||
def __str__(self):
|
||||
config = collections.OrderedDict()
|
||||
for item in self._config:
|
||||
self.expand(item, config)
|
||||
return '\n'.join(self.flatten(config))
|
||||
if self._device_os == 'junos':
|
||||
return self.to_lines(self.expand(self.items))
|
||||
return self.to_block(self.expand(self.items))
|
||||
|
||||
def load(self, contents):
|
||||
self._config = parse(contents, indent=self.indent)
|
||||
|
@ -154,26 +161,29 @@ class NetworkConfig(object):
|
|||
regexp = r'%s' % regexp
|
||||
return re.findall(regexp, str(self))
|
||||
|
||||
def expand(self, obj, items):
|
||||
block = [item.raw for item in obj.parents]
|
||||
block.append(obj.raw)
|
||||
def to_lines(self, section):
|
||||
lines = list()
|
||||
for entry in section[1:]:
|
||||
line = ['set']
|
||||
line.extend([p.text for p in entry.parents])
|
||||
line.append(entry.text)
|
||||
lines.append(' '.join(line))
|
||||
return lines
|
||||
|
||||
current_level = items
|
||||
for b in block:
|
||||
if b not in current_level:
|
||||
current_level[b] = collections.OrderedDict()
|
||||
current_level = current_level[b]
|
||||
for c in obj.children:
|
||||
if c.raw not in current_level:
|
||||
current_level[c.raw] = collections.OrderedDict()
|
||||
def to_block(self, section):
|
||||
return '\n'.join([item.raw for item in section])
|
||||
|
||||
def flatten(self, data, obj=None):
|
||||
if obj is None:
|
||||
obj = list()
|
||||
for k, v in data.items():
|
||||
obj.append(k)
|
||||
self.flatten(v, obj)
|
||||
return obj
|
||||
def expand(self, objs):
|
||||
visited = set()
|
||||
expanded = list()
|
||||
for o in objs:
|
||||
for p in o.parents:
|
||||
if p not in visited:
|
||||
visited.add(p)
|
||||
expanded.append(p)
|
||||
expanded.append(o)
|
||||
visited.add(o)
|
||||
return expanded
|
||||
|
||||
def get_object(self, path):
|
||||
for item in self.items:
|
||||
|
@ -229,13 +239,20 @@ class NetworkConfig(object):
|
|||
if self._device_os == 'junos':
|
||||
return updates
|
||||
|
||||
diffs = dict()
|
||||
changes = list()
|
||||
for update in updates:
|
||||
if replace == 'block' and update.parents:
|
||||
update = update.parents[-1]
|
||||
self.expand(update, diffs)
|
||||
if replace == 'block':
|
||||
if update.parents:
|
||||
changes.append(update.parents[-1])
|
||||
for child in update.parents[-1].children:
|
||||
changes.append(child)
|
||||
else:
|
||||
changes.append(update)
|
||||
else:
|
||||
changes.append(update)
|
||||
updates = self.expand(changes)
|
||||
|
||||
return self.flatten(diffs)
|
||||
return [item.text for item in self.expand(updates)]
|
||||
|
||||
def _build_children(self, children, parents=None, offset=0):
|
||||
for item in children:
|
||||
|
@ -259,6 +276,8 @@ class NetworkConfig(object):
|
|||
config.append(line)
|
||||
if parent:
|
||||
parent.children.append(line)
|
||||
if parent.parents:
|
||||
line.parents.append(*parent.parents)
|
||||
line.parents.append(parent)
|
||||
parent = line
|
||||
offset += self.indent
|
||||
|
@ -382,7 +401,7 @@ class Conditional(object):
|
|||
return self.number(value) <= self.value
|
||||
|
||||
def contains(self, value):
|
||||
return self.value in value
|
||||
return str(self.value) in value
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ NET_COMMON_ARGS = dict(
|
|||
transport=dict(default='cli', choices=['cli', 'nxapi']),
|
||||
use_ssl=dict(default=False, type='bool'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
provider=dict(type='dict')
|
||||
provider=dict(type='dict'),
|
||||
timeout=dict(default=10, type='int')
|
||||
)
|
||||
|
||||
NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
|
||||
|
@ -168,11 +169,17 @@ class Cli(object):
|
|||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
timeout = self.module.params['timeout']
|
||||
key_filename = self.module.params['ssh_keyfile']
|
||||
|
||||
allow_agent = (key_filename is not None) or (key_filename is None and password is None)
|
||||
|
||||
try:
|
||||
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE, errors_re=CLI_ERRORS_RE)
|
||||
self.shell.open(host, port=port, username=username, password=password, key_filename=key_filename)
|
||||
self.shell = Shell(kickstart=False, prompts_re=CLI_PROMPTS_RE,
|
||||
errors_re=CLI_ERRORS_RE)
|
||||
self.shell.open(host, port=port, username=username,
|
||||
password=password, key_filename=key_filename,
|
||||
allow_agent=allow_agent, timeout=timeout)
|
||||
except ShellError:
|
||||
e = get_exception()
|
||||
msg = 'failed to connect to %s:%s - %s' % (host, port, str(e))
|
||||
|
|
|
@ -114,7 +114,20 @@ class Rest(object):
|
|||
if not port:
|
||||
port = 80
|
||||
|
||||
self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port)
|
||||
baseurl = '%s://%s:%s' % (proto, host, port)
|
||||
headers = dict({'Content-Type': 'application/x-www-form-urlencoded'})
|
||||
# Get a cookie and save it the rest of the operations.
|
||||
url = '%s/%s' % (baseurl, 'login')
|
||||
data = 'username=%s&password=%s' % (self.module.params['username'],
|
||||
self.module.params['password'])
|
||||
resp, hdrs = fetch_url(self.module, url, data=data,
|
||||
headers=headers, method='POST')
|
||||
|
||||
# Update the base url for the rest of the operations.
|
||||
self.baseurl = '%s/rest/v1' % (baseurl)
|
||||
self.headers = dict({'Content-Type': 'application/json',
|
||||
'Accept': 'application/json',
|
||||
'Cookie': resp.headers.get('Set-Cookie')})
|
||||
|
||||
def _url_builder(self, path):
|
||||
if path[0] == '/':
|
||||
|
@ -127,7 +140,7 @@ class Rest(object):
|
|||
|
||||
if headers is None:
|
||||
headers = dict()
|
||||
headers.update({'Content-Type': 'application/json'})
|
||||
headers.update(self.headers)
|
||||
|
||||
resp, hdrs = fetch_url(self.module, url, data=data, headers=headers,
|
||||
method=method)
|
||||
|
|
|
@ -32,7 +32,6 @@ import os
|
|||
import re
|
||||
from uuid import UUID
|
||||
|
||||
from ansible import __version__
|
||||
from ansible.module_utils.basic import BOOLEANS
|
||||
|
||||
FINAL_STATUSES = ('ACTIVE', 'ERROR')
|
||||
|
@ -264,7 +263,7 @@ def rax_required_together():
|
|||
|
||||
def setup_rax_module(module, rax_module, region_required=True):
|
||||
"""Set up pyrax in a standard way for all modules"""
|
||||
rax_module.USER_AGENT = 'ansible/%s %s' % (__version__,
|
||||
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
|
||||
rax_module.USER_AGENT)
|
||||
|
||||
api_key = module.params.get('api_key')
|
||||
|
|
|
@ -27,6 +27,7 @@ except ImportError:
|
|||
|
||||
try:
|
||||
import paramiko
|
||||
from paramiko.ssh_exception import AuthenticationException
|
||||
HAS_PARAMIKO = True
|
||||
except ImportError:
|
||||
HAS_PARAMIKO = False
|
||||
|
@ -101,12 +102,17 @@ class Shell(object):
|
|||
if not look_for_keys:
|
||||
look_for_keys = password is None
|
||||
|
||||
self.ssh.connect(host, port=port, username=username, password=password,
|
||||
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
|
||||
key_filename=key_filename, allow_agent=allow_agent)
|
||||
try:
|
||||
self.ssh.connect(host, port=port, username=username, password=password,
|
||||
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
|
||||
key_filename=key_filename, allow_agent=allow_agent)
|
||||
|
||||
self.shell = self.ssh.invoke_shell()
|
||||
self.shell.settimeout(timeout)
|
||||
self.shell = self.ssh.invoke_shell()
|
||||
self.shell.settimeout(timeout)
|
||||
except socket.gaierror:
|
||||
raise ShellError("unable to resolve host name")
|
||||
except AuthenticationException:
|
||||
raise ShellError('Unable to authenticate to remote device')
|
||||
|
||||
if self.kickstart:
|
||||
self.shell.sendall("\n")
|
||||
|
|
|
@ -81,7 +81,6 @@
|
|||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
|
||||
import httplib
|
||||
import netrc
|
||||
import os
|
||||
import re
|
||||
|
@ -91,7 +90,13 @@ import platform
|
|||
import tempfile
|
||||
import base64
|
||||
|
||||
from ansible.module_utils.basic import get_distribution
|
||||
from ansible.module_utils.basic import get_distribution, get_exception
|
||||
|
||||
try:
|
||||
import httplib
|
||||
except ImportError:
|
||||
# Python 3
|
||||
import http.client as httplib
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
|
@ -626,6 +631,13 @@ class SSLValidationHandler(urllib2.BaseHandler):
|
|||
use_proxy = self.detect_no_proxy(req.get_full_url())
|
||||
|
||||
if not use_proxy:
|
||||
try:
|
||||
# cleanup the temp file created, don't worry
|
||||
# if it fails for some reason
|
||||
os.remove(tmp_ca_cert_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
# ignore proxy settings for this host request
|
||||
return req
|
||||
|
||||
|
@ -664,7 +676,8 @@ class SSLValidationHandler(urllib2.BaseHandler):
|
|||
# close the ssl connection
|
||||
#ssl_s.unwrap()
|
||||
s.close()
|
||||
except (ssl.SSLError, socket.error), e:
|
||||
except (ssl.SSLError, socket.error):
|
||||
e = get_exception()
|
||||
# fail if we tried all of the certs but none worked
|
||||
if 'connection refused' in str(e).lower():
|
||||
raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port))
|
||||
|
@ -887,27 +900,34 @@ def fetch_url(module, url, data=None, headers=None, method=None,
|
|||
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
|
||||
follow_redirects=follow_redirects)
|
||||
info.update(r.info())
|
||||
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.getcode()))
|
||||
except NoSSLError, e:
|
||||
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
|
||||
except NoSSLError:
|
||||
e = get_exception()
|
||||
distribution = get_distribution()
|
||||
if distribution is not None and distribution.lower() == 'redhat':
|
||||
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e))
|
||||
else:
|
||||
module.fail_json(msg='%s' % str(e))
|
||||
except (ConnectionError, ValueError), e:
|
||||
except (ConnectionError, ValueError):
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
except urllib2.HTTPError, e:
|
||||
except urllib2.HTTPError:
|
||||
e = get_exception()
|
||||
try:
|
||||
body = e.read()
|
||||
except AttributeError:
|
||||
body = ''
|
||||
info.update(dict(msg=str(e), status=e.code, body=body, **e.info()))
|
||||
except urllib2.URLError, e:
|
||||
info.update(dict(msg=str(e), body=body, **e.info()))
|
||||
info['status'] = e.code
|
||||
except urllib2.URLError:
|
||||
e = get_exception()
|
||||
code = int(getattr(e, 'code', -1))
|
||||
info.update(dict(msg="Request failed: %s" % str(e), status=code))
|
||||
except socket.error, e:
|
||||
except socket.error:
|
||||
e = get_exception()
|
||||
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
|
||||
except Exception, e:
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
|
||||
|
||||
return r, info
|
||||
|
|
|
@ -46,7 +46,8 @@ def vca_argument_spec():
|
|||
api_version=dict(default=DEFAULT_VERSION),
|
||||
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
|
||||
vdc_name=dict(),
|
||||
gateway_name=dict(default='gateway')
|
||||
gateway_name=dict(default='gateway'),
|
||||
verify_certs=dict(type='bool', default=True)
|
||||
)
|
||||
|
||||
class VcaAnsibleModule(AnsibleModule):
|
||||
|
@ -110,7 +111,7 @@ class VcaAnsibleModule(AnsibleModule):
|
|||
|
||||
def create_instance(self):
|
||||
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
|
||||
if service_type == 'vcd':
|
||||
if service_type == 'vcd':
|
||||
host = self.params['host']
|
||||
else:
|
||||
host = LOGIN_HOST[service_type]
|
||||
|
@ -130,8 +131,12 @@ class VcaAnsibleModule(AnsibleModule):
|
|||
service_type = self.params['service_type']
|
||||
password = self.params['password']
|
||||
|
||||
if not self.vca.login(password=password):
|
||||
self.fail('Login to VCA failed', response=self.vca.response.content)
|
||||
login_org = None
|
||||
if service_type == 'vcd':
|
||||
login_org = self.params['org']
|
||||
|
||||
if not self.vca.login(password=password, org=login_org):
|
||||
self.fail('Login to VCA failed', response=self.vca.response)
|
||||
|
||||
try:
|
||||
method_name = 'login_%s' % service_type
|
||||
|
@ -140,7 +145,7 @@ class VcaAnsibleModule(AnsibleModule):
|
|||
except AttributeError:
|
||||
self.fail('no login method exists for service_type %s' % service_type)
|
||||
except VcaError, e:
|
||||
self.fail(e.message, response=self.vca.response.content, **e.kwargs)
|
||||
self.fail(e.message, response=self.vca.response, **e.kwargs)
|
||||
|
||||
def login_vca(self):
|
||||
instance_id = self.params['instance_id']
|
||||
|
@ -155,14 +160,14 @@ class VcaAnsibleModule(AnsibleModule):
|
|||
|
||||
org = self.params['org']
|
||||
if not org:
|
||||
raise VcaError('missing required or for service_type vchs')
|
||||
raise VcaError('missing required org for service_type vchs')
|
||||
|
||||
self.vca.login_to_org(service_id, org)
|
||||
|
||||
def login_vcd(self):
|
||||
org = self.params['org']
|
||||
if not org:
|
||||
raise VcaError('missing required or for service_type vchs')
|
||||
raise VcaError('missing required org for service_type vcd')
|
||||
|
||||
if not self.vca.token:
|
||||
raise VcaError('unable to get token for service_type vcd')
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 9eb2b557cd08f2a6d381ec0360fa47750146b65a
|
||||
Subproject commit 29dfc6a5a14d4f9bda21276d04c492149b81b8b8
|
|
@ -1 +1 @@
|
|||
Subproject commit d9caac037cf10f0abaeff1430605387ab011d54f
|
||||
Subproject commit 72f961ab96e863ad6bd1ca22fbc04563e783c6c5
|
|
@ -160,12 +160,13 @@ class DataLoader():
|
|||
if not file_name or not isinstance(file_name, string_types):
|
||||
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
|
||||
|
||||
if not self.path_exists(file_name) or not self.is_file(file_name):
|
||||
b_file_name = to_bytes(file_name)
|
||||
if not self.path_exists(b_file_name) or not self.is_file(b_file_name):
|
||||
raise AnsibleFileNotFound("the file_name '%s' does not exist, or is not readable" % file_name)
|
||||
|
||||
show_content = True
|
||||
try:
|
||||
with open(file_name, 'rb') as f:
|
||||
with open(b_file_name, 'rb') as f:
|
||||
data = f.read()
|
||||
if self._vault.is_encrypted(data):
|
||||
data = self._vault.decrypt(data)
|
||||
|
@ -330,11 +331,11 @@ class DataLoader():
|
|||
|
||||
try:
|
||||
with open(to_bytes(real_path), 'rb') as f:
|
||||
data = f.read()
|
||||
if self._vault.is_encrypted(data):
|
||||
if self._vault.is_encrypted(f):
|
||||
# if the file is encrypted and no password was specified,
|
||||
# the decrypt call would throw an error, but we check first
|
||||
# since the decrypt function doesn't know the file name
|
||||
data = f.read()
|
||||
if not self._vault_password:
|
||||
raise AnsibleParserError("A vault password must be specified to decrypt %s" % file_path)
|
||||
|
||||
|
|
|
@ -30,6 +30,12 @@ from hashlib import sha256
|
|||
from binascii import hexlify
|
||||
from binascii import unhexlify
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
# Note: Only used for loading obsolete VaultAES files. All files are written
|
||||
# using the newer VaultAES256 which does not require md5
|
||||
from hashlib import md5
|
||||
|
@ -70,6 +76,10 @@ try:
|
|||
HAS_PBKDF2HMAC = True
|
||||
except ImportError:
|
||||
pass
|
||||
except Exception as e:
|
||||
display.warning("Optional dependency 'cryptography' raised an exception, falling back to 'Crypto'")
|
||||
import traceback
|
||||
display.debug("Traceback from import of cryptography was {0}".format(traceback.format_exc()))
|
||||
|
||||
from ansible.compat.six import PY3
|
||||
from ansible.utils.unicode import to_unicode, to_bytes
|
||||
|
@ -105,6 +115,12 @@ class VaultLib:
|
|||
:returns: True if it is recognized. Otherwise, False.
|
||||
"""
|
||||
|
||||
if hasattr(data, 'read'):
|
||||
current_position = data.tell()
|
||||
header_part = data.read(len(b_HEADER))
|
||||
data.seek(current_position)
|
||||
return self.is_encrypted(header_part)
|
||||
|
||||
if to_bytes(data, errors='strict', encoding='utf-8').startswith(b_HEADER):
|
||||
return True
|
||||
return False
|
||||
|
@ -445,7 +461,7 @@ class VaultEditor:
|
|||
os.chown(dest, prev.st_uid, prev.st_gid)
|
||||
|
||||
def _editor_shell_command(self, filename):
|
||||
EDITOR = os.environ.get('EDITOR','vim')
|
||||
EDITOR = os.environ.get('EDITOR','vi')
|
||||
editor = shlex.split(EDITOR)
|
||||
editor.append(filename)
|
||||
|
||||
|
@ -471,7 +487,7 @@ class VaultFile(object):
|
|||
# VaultFile a context manager instead (implement __enter__ and __exit__)
|
||||
def __del__(self):
|
||||
self.filehandle.close()
|
||||
os.unlink(self.tmplfile)
|
||||
os.unlink(self.tmpfile)
|
||||
|
||||
def is_encrypted(self):
|
||||
peak = self.filehandle.readline()
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from yaml.constructor import Constructor, ConstructorError
|
||||
from yaml.constructor import SafeConstructor, ConstructorError
|
||||
from yaml.nodes import MappingNode
|
||||
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
|
||||
from ansible.vars.unsafe_proxy import wrap_var
|
||||
|
@ -31,7 +31,7 @@ except ImportError:
|
|||
display = Display()
|
||||
|
||||
|
||||
class AnsibleConstructor(Constructor):
|
||||
class AnsibleConstructor(SafeConstructor):
|
||||
def __init__(self, file_name=None):
|
||||
self._ansible_file_name = file_name
|
||||
super(AnsibleConstructor, self).__init__()
|
||||
|
|
|
@ -24,6 +24,7 @@ from ansible.compat.six import PY3
|
|||
|
||||
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping
|
||||
from ansible.vars.hostvars import HostVars
|
||||
from ansible.vars.unsafe_proxy import AnsibleUnsafeText
|
||||
|
||||
class AnsibleDumper(yaml.SafeDumper):
|
||||
'''
|
||||
|
@ -45,6 +46,11 @@ AnsibleDumper.add_representer(
|
|||
represent_unicode,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
AnsibleUnsafeText,
|
||||
represent_unicode,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
HostVars,
|
||||
represent_hostvars,
|
||||
|
|
|
@ -61,6 +61,7 @@ class Playbook:
|
|||
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
|
||||
|
||||
# set the loaders basedir
|
||||
cur_basedir = self._loader.get_basedir()
|
||||
self._loader.set_basedir(self._basedir)
|
||||
|
||||
self._file_name = file_name
|
||||
|
@ -74,6 +75,8 @@ class Playbook:
|
|||
|
||||
ds = self._loader.load_from_file(os.path.basename(file_name))
|
||||
if not isinstance(ds, list):
|
||||
# restore the basedir in case this error is caught and handled
|
||||
self._loader.set_basedir(cur_basedir)
|
||||
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
|
||||
|
||||
# Parse the playbook entries. For plays, we simply parse them
|
||||
|
@ -81,6 +84,8 @@ class Playbook:
|
|||
# PlaybookInclude() object
|
||||
for entry in ds:
|
||||
if not isinstance(entry, dict):
|
||||
# restore the basedir in case this error is caught and handled
|
||||
self._loader.set_basedir(cur_basedir)
|
||||
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
|
||||
|
||||
if 'include' in entry:
|
||||
|
@ -93,6 +98,9 @@ class Playbook:
|
|||
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
|
||||
self._entries.append(entry_obj)
|
||||
|
||||
# we're done, so restore the old basedir in the loader
|
||||
self._loader.set_basedir(cur_basedir)
|
||||
|
||||
def get_loader(self):
|
||||
return self._loader
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ from ansible.compat.six import iteritems, string_types
|
|||
|
||||
from jinja2.exceptions import UndefinedError
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||
from ansible.utils.boolean import boolean
|
||||
|
@ -86,6 +86,19 @@ class Base:
|
|||
# and init vars, avoid using defaults in field declaration as it lives across plays
|
||||
self.vars = dict()
|
||||
|
||||
def dump_me(self, depth=0):
|
||||
if depth == 0:
|
||||
print("DUMPING OBJECT ------------------------------------------------------")
|
||||
print("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
|
||||
if hasattr(self, '_block') and self.__class__.__name__ == 'Task' and self._block:
|
||||
self._block.dump_me(depth+2)
|
||||
for attr_name in ('_parent_block', '_task_include'):
|
||||
if hasattr(self, attr_name):
|
||||
attr = getattr(self, attr_name)
|
||||
if attr is not None:
|
||||
attr.dump_me(depth+2)
|
||||
if hasattr(self, '_play') and self._play:
|
||||
self._play.dump_me(depth+2)
|
||||
|
||||
# The following three functions are used to programatically define data
|
||||
# descriptors (aka properties) for the Attributes of all of the playbook
|
||||
|
@ -386,7 +399,7 @@ class Base:
|
|||
except (TypeError, ValueError) as e:
|
||||
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
|
||||
" Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
|
||||
except UndefinedError as e:
|
||||
except (AnsibleUndefinedVariable, UndefinedError) as e:
|
||||
if templar._fail_on_undefined_errors and name != 'name':
|
||||
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined."
|
||||
" The error was: %s" % (name,e), obj=self.get_ds())
|
||||
|
|
|
@ -65,8 +65,6 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
|
||||
all_vars = self.vars.copy()
|
||||
|
||||
if self._role:
|
||||
all_vars.update(self._role.get_vars(self._dep_chain))
|
||||
if self._parent_block:
|
||||
all_vars.update(self._parent_block.get_vars())
|
||||
if self._task_include:
|
||||
|
@ -271,9 +269,6 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
if self._parent_block is not None:
|
||||
if not self._parent_block.evaluate_conditional(templar, all_vars):
|
||||
return False
|
||||
elif self._role is not None:
|
||||
if not self._role.evaluate_conditional(templar, all_vars):
|
||||
return False
|
||||
return super(Block, self).evaluate_conditional(templar, all_vars)
|
||||
|
||||
def set_loader(self, loader):
|
||||
|
@ -388,3 +383,24 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
def has_tasks(self):
|
||||
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
|
||||
|
||||
def get_include_params(self):
|
||||
if self._parent:
|
||||
return self._parent.get_include_params()
|
||||
else:
|
||||
return dict()
|
||||
|
||||
def all_parents_static(self):
|
||||
'''
|
||||
Determine if all of the parents of this block were statically loaded
|
||||
or not. Since Task/TaskInclude objects may be in the chain, they simply
|
||||
call their parents all_parents_static() method. Only Block objects in
|
||||
the chain check the statically_loaded value of the parent.
|
||||
'''
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
if self._task_include and not self._task_include.statically_loaded:
|
||||
return False
|
||||
elif self._parent_block:
|
||||
return self._parent_block.all_parents_static()
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
@ -19,6 +19,10 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import ast
|
||||
import re
|
||||
|
||||
from jinja2.compiler import generate
|
||||
from jinja2.exceptions import UndefinedError
|
||||
|
||||
from ansible.compat.six import text_type
|
||||
|
@ -26,6 +30,10 @@ from ansible.errors import AnsibleError, AnsibleUndefinedVariable
|
|||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.template import Templar
|
||||
|
||||
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
|
||||
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
|
||||
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
|
||||
|
||||
class Conditional:
|
||||
|
||||
'''
|
||||
|
@ -50,6 +58,18 @@ class Conditional:
|
|||
if not isinstance(value, list):
|
||||
setattr(self, name, [ value ])
|
||||
|
||||
def extract_defined_undefined(self, conditional):
|
||||
results = []
|
||||
|
||||
cond = conditional
|
||||
m = DEFINED_REGEX.search(cond)
|
||||
while m:
|
||||
results.append(m.groups())
|
||||
cond = cond[m.end():]
|
||||
m = DEFINED_REGEX.search(cond)
|
||||
|
||||
return results
|
||||
|
||||
def evaluate_conditional(self, templar, all_vars):
|
||||
'''
|
||||
Loops through the conditionals set on this object, returning
|
||||
|
@ -83,21 +103,75 @@ class Conditional:
|
|||
if conditional is None or conditional == '':
|
||||
return True
|
||||
|
||||
if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
|
||||
# pull the "bare" var out, which allows for nested conditionals
|
||||
# and things like:
|
||||
# - assert:
|
||||
# that:
|
||||
# - item
|
||||
# with_items:
|
||||
# - 1 == 1
|
||||
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
|
||||
conditional = all_vars[conditional]
|
||||
|
||||
# make sure the templar is using the variables specified with this method
|
||||
templar.set_available_variables(variables=all_vars)
|
||||
|
||||
try:
|
||||
conditional = templar.template(conditional)
|
||||
# if the conditional is "unsafe", disable lookups
|
||||
disable_lookups = hasattr(conditional, '__UNSAFE__')
|
||||
conditional = templar.template(conditional, disable_lookups=disable_lookups)
|
||||
if not isinstance(conditional, text_type) or conditional == "":
|
||||
return conditional
|
||||
|
||||
# a Jinja2 evaluation that results in something Python can eval!
|
||||
# update the lookups flag, as the string returned above may now be unsafe
|
||||
# and we don't want future templating calls to do unsafe things
|
||||
disable_lookups |= hasattr(conditional, '__UNSAFE__')
|
||||
|
||||
# First, we do some low-level jinja2 parsing involving the AST format of the
|
||||
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
|
||||
class CleansingNodeVisitor(ast.NodeVisitor):
|
||||
def generic_visit(self, node, inside_call=False, inside_yield=False):
|
||||
if isinstance(node, ast.Call):
|
||||
inside_call = True
|
||||
elif isinstance(node, ast.Yield):
|
||||
inside_yield = True
|
||||
elif isinstance(node, ast.Str):
|
||||
if disable_lookups:
|
||||
if inside_call and node.s.startswith("__"):
|
||||
# calling things with a dunder is generally bad at this point...
|
||||
raise AnsibleError(
|
||||
"Invalid access found in the conditional: '%s'" % conditional
|
||||
)
|
||||
elif inside_yield:
|
||||
# we're inside a yield, so recursively parse and traverse the AST
|
||||
# of the result to catch forbidden syntax from executing
|
||||
parsed = ast.parse(node.s, mode='exec')
|
||||
cnv = CleansingNodeVisitor()
|
||||
cnv.visit(parsed)
|
||||
# iterate over all child nodes
|
||||
for child_node in ast.iter_child_nodes(node):
|
||||
self.generic_visit(
|
||||
child_node,
|
||||
inside_call=inside_call,
|
||||
inside_yield=inside_yield
|
||||
)
|
||||
try:
|
||||
e = templar.environment.overlay()
|
||||
e.filters.update(templar._get_filters())
|
||||
e.tests.update(templar._get_tests())
|
||||
|
||||
res = e._parse(conditional, None, None)
|
||||
res = generate(res, e, None, None)
|
||||
parsed = ast.parse(res, mode='exec')
|
||||
|
||||
cnv = CleansingNodeVisitor()
|
||||
cnv.visit(parsed)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Invalid conditional detected: %s" % e)
|
||||
|
||||
# and finally we generate and template the presented string and look at the resulting string
|
||||
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
|
||||
conditional = templar.template(presented)
|
||||
val = conditional.strip()
|
||||
val = templar.template(presented, disable_lookups=disable_lookups).strip()
|
||||
if val == "True":
|
||||
return True
|
||||
elif val == "False":
|
||||
|
@ -105,14 +179,33 @@ class Conditional:
|
|||
else:
|
||||
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
||||
except (AnsibleUndefinedVariable, UndefinedError) as e:
|
||||
# the templating failed, meaning most likely a
|
||||
# variable was undefined. If we happened to be
|
||||
# looking for an undefined variable, return True,
|
||||
# otherwise fail
|
||||
if "is undefined" in original:
|
||||
return True
|
||||
elif "is defined" in original:
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
|
||||
# the templating failed, meaning most likely a variable was undefined. If we happened
|
||||
# to be looking for an undefined variable, return True, otherwise fail
|
||||
try:
|
||||
# first we extract the variable name from the error message
|
||||
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
|
||||
# next we extract all defined/undefined tests from the conditional string
|
||||
def_undef = self.extract_defined_undefined(conditional)
|
||||
# then we loop through these, comparing the error variable name against
|
||||
# each def/undef test we found above. If there is a match, we determine
|
||||
# whether the logic/state mean the variable should exist or not and return
|
||||
# the corresponding True/False
|
||||
for (du_var, logic, state) in def_undef:
|
||||
# when we compare the var names, normalize quotes because something
|
||||
# like hostvars['foo'] may be tested against hostvars["foo"]
|
||||
if var_name.replace("'", '"') == du_var.replace("'", '"'):
|
||||
# the should exist is a xor test between a negation in the logic portion
|
||||
# against the state (defined or undefined)
|
||||
should_exist = ('not' in logic) != (state == 'defined')
|
||||
if should_exist:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
# as nothing above matched the failed var name, re-raise here to
|
||||
# trigger the AnsibleUndefinedVariable exception again below
|
||||
raise
|
||||
except Exception as new_e:
|
||||
raise AnsibleUndefinedVariable(
|
||||
"error while evaluating conditional (%s): %s" % (original, e)
|
||||
)
|
||||
|
||||
|
|
33
lib/ansible/playbook/handler_task_include.py
Normal file
33
lib/ansible/playbook/handler_task_include.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
#from ansible.inventory.host import Host
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
from ansible.playbook.handler import Handler
|
||||
|
||||
class HandlerTaskInclude(Handler, TaskInclude):
|
||||
|
||||
@staticmethod
|
||||
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
|
||||
t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
|
||||
return t.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||
|
|
@ -38,7 +38,7 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
|
|||
return a list of Block() objects, where implicit blocks
|
||||
are created for each bare Task.
|
||||
'''
|
||||
|
||||
|
||||
# we import here to prevent a circular dependency with imports
|
||||
from ansible.playbook.block import Block
|
||||
|
||||
|
@ -81,6 +81,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
from ansible.playbook.handler_task_include import HandlerTaskInclude
|
||||
from ansible.template import Templar
|
||||
|
||||
assert isinstance(ds, list)
|
||||
|
@ -95,7 +96,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
play=play,
|
||||
parent_block=block,
|
||||
role=role,
|
||||
task_include=task_include,
|
||||
task_include=None,
|
||||
use_handlers=use_handlers,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
|
@ -103,50 +104,56 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
task_list.append(t)
|
||||
else:
|
||||
if 'include' in task_ds:
|
||||
t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
if use_handlers:
|
||||
t = HandlerTaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
else:
|
||||
t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
all_vars = variable_manager.get_vars(loader=loader, play=play, task=t)
|
||||
templar = Templar(loader=loader, variables=all_vars)
|
||||
|
||||
# check to see if this include is static, which can be true if:
|
||||
# 1. the user set the 'static' option to true
|
||||
# check to see if this include is dynamic or static:
|
||||
# 1. the user has set the 'static' option to false or true
|
||||
# 2. one of the appropriate config options was set
|
||||
# 3. the included file name contains no variables, and has no loop
|
||||
is_static = t.static or \
|
||||
C.DEFAULT_TASK_INCLUDES_STATIC or \
|
||||
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
|
||||
not templar._contains_vars(t.args.get('_raw_params')) and t.loop is None
|
||||
if t.static is not None:
|
||||
is_static = t.static
|
||||
else:
|
||||
is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
|
||||
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
|
||||
(not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)
|
||||
|
||||
if is_static:
|
||||
if t.loop is not None:
|
||||
raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
|
||||
|
||||
# FIXME: all of this code is very similar (if not identical) to that in
|
||||
# plugins/strategy/__init__.py, and should be unified to avoid
|
||||
# patches only being applied to one or the other location
|
||||
if task_include:
|
||||
# handle relative includes by walking up the list of parent include
|
||||
# tasks and checking the relative result to see if it exists
|
||||
parent_include = task_include
|
||||
cumulative_path = None
|
||||
while parent_include is not None:
|
||||
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
|
||||
if cumulative_path is None:
|
||||
cumulative_path = parent_include_dir
|
||||
elif not os.path.isabs(cumulative_path):
|
||||
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
|
||||
include_target = templar.template(t.args['_raw_params'])
|
||||
if t._role:
|
||||
new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path)
|
||||
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
|
||||
else:
|
||||
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
|
||||
# we set a flag to indicate this include was static
|
||||
t.statically_loaded = True
|
||||
|
||||
if os.path.exists(include_file):
|
||||
break
|
||||
else:
|
||||
parent_include = parent_include._task_include
|
||||
else:
|
||||
# handle relative includes by walking up the list of parent include
|
||||
# tasks and checking the relative result to see if it exists
|
||||
parent_include = task_include
|
||||
cumulative_path = None
|
||||
|
||||
found = False
|
||||
while parent_include is not None:
|
||||
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
|
||||
if cumulative_path is None:
|
||||
cumulative_path = parent_include_dir
|
||||
elif not os.path.isabs(cumulative_path):
|
||||
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
|
||||
include_target = templar.template(t.args['_raw_params'])
|
||||
if t._role:
|
||||
new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path)
|
||||
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
|
||||
else:
|
||||
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
|
||||
|
||||
if os.path.exists(include_file):
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
parent_include = parent_include._task_include
|
||||
|
||||
if not found:
|
||||
try:
|
||||
include_target = templar.template(t.args['_raw_params'])
|
||||
except AnsibleUndefinedVariable as e:
|
||||
|
@ -171,7 +178,13 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
if data is None:
|
||||
return []
|
||||
elif not isinstance(data, list):
|
||||
raise AnsibleError("included task files must contain a list of tasks", obj=data)
|
||||
raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
|
||||
|
||||
# since we can't send callbacks here, we display a message directly in
|
||||
# the same fashion used by the on_include callback. We also do it here,
|
||||
# because the recursive nature of helper methods means we may be loading
|
||||
# nested includes, and we want the include order printed correctly
|
||||
display.vv("statically included: %s" % include_file)
|
||||
except AnsibleFileNotFound as e:
|
||||
if t.static or \
|
||||
C.DEFAULT_TASK_INCLUDES_STATIC or \
|
||||
|
@ -223,7 +236,6 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
b.tags = list(set(b.tags).union(tags))
|
||||
# END FIXME
|
||||
|
||||
# FIXME: send callback here somehow...
|
||||
# FIXME: handlers shouldn't need this special handling, but do
|
||||
# right now because they don't iterate blocks correctly
|
||||
if use_handlers:
|
||||
|
@ -233,11 +245,11 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
|
|||
task_list.extend(included_blocks)
|
||||
else:
|
||||
task_list.append(t)
|
||||
elif use_handlers:
|
||||
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
task_list.append(t)
|
||||
else:
|
||||
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
if use_handlers:
|
||||
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
else:
|
||||
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
task_list.append(t)
|
||||
|
||||
return task_list
|
||||
|
|
|
@ -136,28 +136,6 @@ class Play(Base, Taggable, Become):
|
|||
|
||||
return super(Play, self).preprocess_data(ds)
|
||||
|
||||
def _load_hosts(self, attr, ds):
|
||||
'''
|
||||
Loads the hosts from the given datastructure, which might be a list
|
||||
or a simple string. We also switch integers in this list back to strings,
|
||||
as the YAML parser will turn things that look like numbers into numbers.
|
||||
'''
|
||||
|
||||
if isinstance(ds, (string_types, int)):
|
||||
ds = [ ds ]
|
||||
|
||||
if not isinstance(ds, list):
|
||||
raise AnsibleParserError("'hosts' must be specified as a list or a single pattern", obj=ds)
|
||||
|
||||
# YAML parsing of things that look like numbers may have
|
||||
# resulted in integers showing up in the list, so convert
|
||||
# them back to strings to prevent problems
|
||||
for idx,item in enumerate(ds):
|
||||
if isinstance(item, int):
|
||||
ds[idx] = "%s" % item
|
||||
|
||||
return ds
|
||||
|
||||
def _load_tasks(self, attr, ds):
|
||||
'''
|
||||
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
||||
|
@ -265,7 +243,7 @@ class Play(Base, Taggable, Become):
|
|||
|
||||
if len(self.roles) > 0:
|
||||
for r in self.roles:
|
||||
block_list.extend(r.get_handler_blocks())
|
||||
block_list.extend(r.get_handler_blocks(play=self))
|
||||
|
||||
return block_list
|
||||
|
||||
|
|
|
@ -21,7 +21,9 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import pipes
|
||||
import pwd
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
|
@ -29,11 +31,9 @@ import string
|
|||
from ansible.compat.six import iteritems, string_types
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.playbook.base import Base
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
__all__ = ['PlayContext']
|
||||
|
||||
|
@ -356,7 +356,7 @@ class PlayContext(Base):
|
|||
|
||||
# and likewise for the remote user
|
||||
for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
|
||||
if user_var in delegated_vars:
|
||||
if user_var in delegated_vars and delegated_vars[user_var]:
|
||||
break
|
||||
else:
|
||||
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
|
||||
|
@ -411,6 +411,12 @@ class PlayContext(Base):
|
|||
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
|
||||
new_info.port = int(C.DEFAULT_REMOTE_PORT)
|
||||
|
||||
# if the final connection type is local, reset the remote_user value
|
||||
# to that of the currently logged in user, to ensure any become settings
|
||||
# are obeyed correctly
|
||||
if new_info.connection == 'local':
|
||||
new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name
|
||||
|
||||
# special overrides for the connection setting
|
||||
if len(delegated_vars) > 0:
|
||||
# in the event that we were using local before make sure to reset the
|
||||
|
@ -447,16 +453,21 @@ class PlayContext(Base):
|
|||
success_key = None
|
||||
self.prompt = None
|
||||
|
||||
if executable is None:
|
||||
executable = self.executable
|
||||
|
||||
if self.become:
|
||||
|
||||
if not executable:
|
||||
executable = self.executable
|
||||
|
||||
becomecmd = None
|
||||
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
|
||||
success_key = 'BECOME-SUCCESS-%s' % randbits
|
||||
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
|
||||
|
||||
if executable:
|
||||
command = '%s -c %s' % (executable, success_cmd)
|
||||
else:
|
||||
command = success_cmd
|
||||
|
||||
# set executable to use for the privilege escalation method, with various overrides
|
||||
exe = self.become_exe or \
|
||||
getattr(self, '%s_exe' % self.become_method, None) or \
|
||||
|
@ -485,9 +496,9 @@ class PlayContext(Base):
|
|||
# force quick error if password is required but not supplied, should prevent sudo hangs.
|
||||
if self.become_pass:
|
||||
prompt = '[sudo via ansible, key=%s] password: ' % randbits
|
||||
becomecmd = '%s %s -p "%s" -u %s %s -c %s' % (exe, flags.replace('-n',''), prompt, self.become_user, executable, success_cmd)
|
||||
becomecmd = '%s %s -p "%s" -u %s %s' % (exe, flags.replace('-n',''), prompt, self.become_user, command)
|
||||
else:
|
||||
becomecmd = '%s %s -u %s %s -c %s' % (exe, flags, self.become_user, executable, success_cmd)
|
||||
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)
|
||||
|
||||
|
||||
elif self.become_method == 'su':
|
||||
|
@ -498,7 +509,7 @@ class PlayContext(Base):
|
|||
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
|
||||
prompt = detect_su_prompt
|
||||
|
||||
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote('%s -c %s' % (executable, success_cmd)))
|
||||
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote(command))
|
||||
|
||||
elif self.become_method == 'pbrun':
|
||||
|
||||
|
@ -534,7 +545,7 @@ class PlayContext(Base):
|
|||
|
||||
exe = self.become_exe or 'dzdo'
|
||||
|
||||
becomecmd = '%s -u %s %s -c %s' % (exe, self.become_user, executable, success_cmd)
|
||||
becomecmd = '%s -u %s %s' % (exe, self.become_user, command)
|
||||
|
||||
else:
|
||||
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
|
||||
|
|
|
@ -96,7 +96,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
|||
# plays. If so, we can take a shortcut here and simply prepend them to
|
||||
# those attached to each block (if any)
|
||||
if forward_conditional:
|
||||
for task_block in entry.tasks:
|
||||
for task_block in entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks:
|
||||
task_block.when = self.when[:] + task_block.when
|
||||
|
||||
return pb
|
||||
|
|
|
@ -118,16 +118,6 @@ class Role(Base, Become, Conditional, Taggable):
|
|||
if role_include.role not in play.ROLE_CACHE:
|
||||
play.ROLE_CACHE[role_include.role] = dict()
|
||||
|
||||
if parent_role:
|
||||
if parent_role.when:
|
||||
new_when = parent_role.when[:]
|
||||
new_when.extend(r.when or [])
|
||||
r.when = new_when
|
||||
if parent_role.tags:
|
||||
new_tags = parent_role.tags[:]
|
||||
new_tags.extend(r.tags or [])
|
||||
r.tags = new_tags
|
||||
|
||||
play.ROLE_CACHE[role_include.role][hashed_params] = r
|
||||
return r
|
||||
|
||||
|
@ -311,12 +301,24 @@ class Role(Base, Become, Conditional, Taggable):
|
|||
def get_task_blocks(self):
|
||||
return self._task_blocks[:]
|
||||
|
||||
def get_handler_blocks(self):
|
||||
def get_handler_blocks(self, play, dep_chain=None):
|
||||
block_list = []
|
||||
|
||||
# update the dependency chain here
|
||||
if dep_chain is None:
|
||||
dep_chain = []
|
||||
new_dep_chain = dep_chain + [self]
|
||||
|
||||
for dep in self.get_direct_dependencies():
|
||||
dep_blocks = dep.get_handler_blocks()
|
||||
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
|
||||
block_list.extend(dep_blocks)
|
||||
block_list.extend(self._handler_blocks)
|
||||
|
||||
for task_block in self._handler_blocks:
|
||||
new_task_block = task_block.copy()
|
||||
new_task_block._dep_chain = new_dep_chain
|
||||
new_task_block._play = play
|
||||
block_list.append(new_task_block)
|
||||
|
||||
return block_list
|
||||
|
||||
def has_run(self, host):
|
||||
|
|
|
@ -138,18 +138,22 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
|
|||
# we always start the search for roles in the base directory of the playbook
|
||||
role_search_paths = [
|
||||
os.path.join(self._loader.get_basedir(), u'roles'),
|
||||
self._loader.get_basedir(),
|
||||
]
|
||||
|
||||
# also search in the configured roles path
|
||||
if C.DEFAULT_ROLES_PATH:
|
||||
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
|
||||
|
||||
# finally, append the roles basedir, if it was set, so we can
|
||||
# next, append the roles basedir, if it was set, so we can
|
||||
# search relative to that directory for dependent roles
|
||||
if self._role_basedir:
|
||||
role_search_paths.append(self._role_basedir)
|
||||
|
||||
# finally as a last resort we look in the current basedir as set
|
||||
# in the loader (which should be the playbook dir itself) but without
|
||||
# the roles/ dir appended
|
||||
role_search_paths.append(self._loader.get_basedir())
|
||||
|
||||
# create a templar class to template the dependency names, in
|
||||
# case they contain variables
|
||||
if self._variable_manager is not None:
|
||||
|
|
|
@ -84,7 +84,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
_notify = FieldAttribute(isa='list')
|
||||
_poll = FieldAttribute(isa='int')
|
||||
_register = FieldAttribute(isa='string')
|
||||
_retries = FieldAttribute(isa='int', default=3)
|
||||
_retries = FieldAttribute(isa='int')
|
||||
_until = FieldAttribute(isa='list', default=[])
|
||||
|
||||
def __init__(self, block=None, role=None, task_include=None):
|
||||
|
@ -105,7 +105,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
def get_name(self):
|
||||
''' return the name of the task '''
|
||||
|
||||
if self._role and self.name:
|
||||
if self._role and self.name and ("%s : " % self._role._role_name) not in self.name:
|
||||
return "%s : %s" % (self._role.get_name(), self.name)
|
||||
elif self.name:
|
||||
return self.name
|
||||
|
@ -196,7 +196,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
if 'vars' in ds:
|
||||
# _load_vars is defined in Base, and is used to load a dictionary
|
||||
# or list of dictionaries in a standard way
|
||||
new_ds['vars'] = self._load_vars(None, ds.pop('vars'))
|
||||
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
|
||||
else:
|
||||
new_ds['vars'] = dict()
|
||||
|
||||
|
@ -245,13 +245,6 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
|
||||
super(Task, self).post_validate(templar)
|
||||
|
||||
def _post_validate_register(self, attr, value, templar):
|
||||
'''
|
||||
Override post validation for the register args field, which is not
|
||||
supposed to be templated
|
||||
'''
|
||||
return value
|
||||
|
||||
def _post_validate_loop_args(self, attr, value, templar):
|
||||
'''
|
||||
Override post validation for the loop args field, which is templated
|
||||
|
@ -454,3 +447,42 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
'''
|
||||
return self._get_parent_attribute('any_errors_fatal')
|
||||
|
||||
def _get_attr_loop(self):
|
||||
return self._attributes['loop']
|
||||
|
||||
def _get_attr_loop_control(self):
|
||||
return self._attributes['loop_control']
|
||||
|
||||
def get_dep_chain(self):
|
||||
if self._parent:
|
||||
return self._parent.get_dep_chain()
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_search_path(self):
|
||||
'''
|
||||
Return the list of paths you should search for files, in order.
|
||||
This follows role/playbook dependency chain.
|
||||
'''
|
||||
path_stack = []
|
||||
|
||||
dep_chain = self.get_dep_chain()
|
||||
# inside role: add the dependency chain from current to dependant
|
||||
if dep_chain:
|
||||
path_stack.extend(reversed([x._role_path for x in dep_chain]))
|
||||
|
||||
# add path of task itself, unless it is already in the list
|
||||
task_dir = os.path.dirname(self.get_path())
|
||||
if task_dir not in path_stack:
|
||||
path_stack.append(task_dir)
|
||||
|
||||
return path_stack
|
||||
|
||||
def all_parents_static(self):
|
||||
if self._task_include and not self._task_include.statically_loaded:
|
||||
return False
|
||||
elif self._block:
|
||||
return self._block.all_parents_static()
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
@ -41,13 +41,22 @@ class TaskInclude(Task):
|
|||
# =================================================================================
|
||||
# ATTRIBUTES
|
||||
|
||||
_static = FieldAttribute(isa='bool', default=False)
|
||||
_static = FieldAttribute(isa='bool', default=None)
|
||||
|
||||
def __init__(self, block=None, role=None, task_include=None):
|
||||
super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
|
||||
self.statically_loaded = False
|
||||
|
||||
@staticmethod
|
||||
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
|
||||
t = TaskInclude(block=block, role=role, task_include=task_include)
|
||||
return t.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
def copy(self, exclude_block=False):
|
||||
new_me = super(TaskInclude, self).copy(exclude_block=exclude_block)
|
||||
new_me.statically_loaded = self.statically_loaded
|
||||
return new_me
|
||||
|
||||
def get_vars(self):
|
||||
'''
|
||||
We override the parent Task() classes get_vars here because
|
||||
|
|
|
@ -145,15 +145,15 @@ class PluginLoader:
|
|||
def _get_package_paths(self):
|
||||
''' Gets the path of a Python package '''
|
||||
|
||||
paths = []
|
||||
if not self.package:
|
||||
return []
|
||||
if not hasattr(self, 'package_path'):
|
||||
m = __import__(self.package)
|
||||
parts = self.package.split('.')[1:]
|
||||
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
|
||||
paths.extend(self._all_directories(self.package_path))
|
||||
return paths
|
||||
for parent_mod in parts:
|
||||
m = getattr(m, parent_mod)
|
||||
self.package_path = os.path.dirname(m.__file__)
|
||||
return self._all_directories(self.package_path)
|
||||
|
||||
def _get_paths(self):
|
||||
''' Return a list of paths to search for plugins in '''
|
||||
|
@ -353,6 +353,7 @@ class PluginLoader:
|
|||
def all(self, *args, **kwargs):
|
||||
''' instantiates all plugins with the same arguments '''
|
||||
|
||||
path_only = kwargs.pop('path_only', False)
|
||||
class_only = kwargs.pop('class_only', False)
|
||||
all_matches = []
|
||||
|
||||
|
@ -364,6 +365,10 @@ class PluginLoader:
|
|||
if '__init__' in name:
|
||||
continue
|
||||
|
||||
if path_only:
|
||||
yield path
|
||||
continue
|
||||
|
||||
if path not in self._module_cache:
|
||||
self._module_cache[path] = self._load_module_source(name, path)
|
||||
|
||||
|
|
|
@ -30,13 +30,16 @@ import tempfile
|
|||
import time
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.compat.six import binary_type, string_types, text_type, iteritems, with_metaclass
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.executor.module_common import modify_module
|
||||
from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING
|
||||
from ansible.release import __version__
|
||||
from ansible.parsing.utils.jsonify import jsonify
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
from ansible.vars.unsafe_proxy import wrap_var
|
||||
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -240,7 +243,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
raise AnsibleConnectionFailure(output)
|
||||
|
||||
try:
|
||||
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
|
||||
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
|
||||
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
|
||||
except IndexError:
|
||||
# stdout was empty or just space, set to / to trigger error in next if
|
||||
rc = '/'
|
||||
|
@ -291,7 +295,29 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
return remote_path
|
||||
|
||||
def _fixup_perms(self, remote_path, remote_user, execute=False, recursive=True):
|
||||
def _fixup_perms(self, remote_path, remote_user, execute=True, recursive=True):
|
||||
"""
|
||||
We need the files we upload to be readable (and sometimes executable)
|
||||
by the user being sudo'd to but we want to limit other people's access
|
||||
(because the files could contain passwords or other private
|
||||
information.
|
||||
|
||||
Deprecated in favor of _fixup_perms2. Ansible code has been updated to
|
||||
use _fixup_perms2. This code is maintained to provide partial support
|
||||
for custom actions (non-recursive mode only).
|
||||
|
||||
"""
|
||||
|
||||
display.deprecated('_fixup_perms is deprecated. Use _fixup_perms2 instead.', version='2.4', removed=False)
|
||||
|
||||
if recursive:
|
||||
raise AnsibleError('_fixup_perms with recursive=True (the default) is no longer supported. ' +
|
||||
'Use _fixup_perms2 if support for previous releases is not required. '
|
||||
'Otherwise use fixup_perms with recursive=False.')
|
||||
|
||||
return self._fixup_perms2([remote_path], remote_user, execute)
|
||||
|
||||
def _fixup_perms2(self, remote_paths, remote_user, execute=True):
|
||||
"""
|
||||
We need the files we upload to be readable (and sometimes executable)
|
||||
by the user being sudo'd to but we want to limit other people's access
|
||||
|
@ -299,17 +325,17 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
information. We achieve this in one of these ways:
|
||||
|
||||
* If no sudo is performed or the remote_user is sudo'ing to
|
||||
themselves, we don't have to change permisions.
|
||||
themselves, we don't have to change permissions.
|
||||
* If the remote_user sudo's to a privileged user (for instance, root),
|
||||
we don't have to change permissions
|
||||
* If the remote_user is a privileged user and sudo's to an
|
||||
unprivileged user then we change the owner of the file to the
|
||||
unprivileged user so they can read it.
|
||||
* If the remote_user is an unprivieged user and we're sudo'ing to
|
||||
a second unprivileged user then we attempt to grant the second
|
||||
unprivileged user access via file system acls.
|
||||
* If granting file system acls fails we can set the file to be world
|
||||
readable so that the second unprivileged user can read the file.
|
||||
* If the remote_user sudo's to an unprivileged user then we attempt to
|
||||
grant the unprivileged user access via file system acls.
|
||||
* If granting file system acls fails we try to change the owner of the
|
||||
file with chown which only works in case the remote_user is
|
||||
privileged or the remote systems allows chown calls by unprivileged
|
||||
users (e.g. HP-UX)
|
||||
* If the chown fails we can set the file to be world readable so that
|
||||
the second unprivileged user can read the file.
|
||||
Since this could allow other users to get access to private
|
||||
information we only do this ansible is configured with
|
||||
"allow_world_readable_tmpfiles" in the ansible.cfg
|
||||
|
@ -317,51 +343,39 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
if self._connection._shell.SHELL_FAMILY == 'powershell':
|
||||
# This won't work on Powershell as-is, so we'll just completely skip until
|
||||
# we have a need for it, at which point we'll have to do something different.
|
||||
return remote_path
|
||||
|
||||
if remote_path is None:
|
||||
# Sometimes code calls us naively -- it has a var which could
|
||||
# contain a path to a tmp dir but doesn't know if it needs to
|
||||
# exist or not. If there's no path, then there's no need for us
|
||||
# to do work
|
||||
self._display.debug('_fixup_perms called with remote_path==None. Sure this is correct?')
|
||||
return remote_path
|
||||
return remote_paths
|
||||
|
||||
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
|
||||
# Unprivileged user that's different than the ssh user. Let's get
|
||||
# to work!
|
||||
|
||||
# Try chown'ing the file. This will only work if our SSH user has
|
||||
# root privileges, but since we can't reliably determine that from
|
||||
# the username (think "toor" on FreeBSD), let's just try first and
|
||||
# apologize later:
|
||||
res = self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive)
|
||||
if res['rc'] == 0:
|
||||
# root can read things that don't have read bit but can't
|
||||
# execute them without the execute bit, so we might need to
|
||||
# set that even if we're root. We just ran chown successfully,
|
||||
# so apparently we are root.
|
||||
# Try to use file system acls to make the files readable for sudo'd
|
||||
# user
|
||||
if execute:
|
||||
mode = 'rx'
|
||||
else:
|
||||
mode = 'rX'
|
||||
|
||||
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, mode)
|
||||
if res['rc'] != 0:
|
||||
# File system acls failed; let's try to use chown next
|
||||
# Set executable bit first as on some systems an
|
||||
# unprivileged user can use chown
|
||||
if execute:
|
||||
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
|
||||
res = self._remote_chmod(remote_paths, 'u+x')
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
|
||||
elif remote_user == 'root':
|
||||
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root. Unprivileged become user would be unable to read the file.')
|
||||
else:
|
||||
# Chown'ing failed. We're probably lacking root privileges; let's try something else.
|
||||
if execute:
|
||||
mode = 'rx'
|
||||
else:
|
||||
mode = 'rX'
|
||||
# Try to use fs acls to solve this problem
|
||||
res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False)
|
||||
if res['rc'] != 0:
|
||||
res = self._remote_chown(remote_paths, self._play_context.become_user)
|
||||
if res['rc'] != 0 and remote_user == 'root':
|
||||
# chown failed even if remove_user is root
|
||||
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root. Unprivileged become user would be unable to read the file.')
|
||||
elif res['rc'] != 0:
|
||||
if C.ALLOW_WORLD_READABLE_TMPFILES:
|
||||
# fs acls failed -- do things this insecure way only
|
||||
# if the user opted in in the config file
|
||||
self._display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
|
||||
res = self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive)
|
||||
# chown and fs acls failed -- do things this insecure
|
||||
# way only if the user opted in in the config file
|
||||
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
|
||||
res = self._remote_chmod(remote_paths, 'a+%s' % mode)
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
else:
|
||||
|
@ -370,33 +384,33 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# Can't depend on the file being transferred with execute
|
||||
# permissions. Only need user perms because no become was
|
||||
# used here
|
||||
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
|
||||
res = self._remote_chmod(remote_paths, 'u+x')
|
||||
if res['rc'] != 0:
|
||||
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
|
||||
|
||||
return remote_path
|
||||
return remote_paths
|
||||
|
||||
def _remote_chmod(self, mode, path, recursive=True, sudoable=False):
|
||||
def _remote_chmod(self, paths, mode, sudoable=False):
|
||||
'''
|
||||
Issue a remote chmod command
|
||||
'''
|
||||
cmd = self._connection._shell.chmod(mode, path, recursive=recursive)
|
||||
cmd = self._connection._shell.chmod(paths, mode)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
return res
|
||||
|
||||
def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False):
|
||||
def _remote_chown(self, paths, user, sudoable=False):
|
||||
'''
|
||||
Issue a remote chown command
|
||||
'''
|
||||
cmd = self._connection._shell.chown(path, user, group, recursive=recursive)
|
||||
cmd = self._connection._shell.chown(paths, user)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
return res
|
||||
|
||||
def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False):
|
||||
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
|
||||
'''
|
||||
Issue a remote call to setfacl
|
||||
'''
|
||||
cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive)
|
||||
cmd = self._connection._shell.set_user_facl(paths, user, mode)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
return res
|
||||
|
||||
|
@ -423,10 +437,12 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# happens sometimes when it is a dir and not on bsd
|
||||
if not 'checksum' in mystat['stat']:
|
||||
mystat['stat']['checksum'] = ''
|
||||
elif not isinstance(mystat['stat']['checksum'], string_types):
|
||||
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
|
||||
|
||||
return mystat['stat']
|
||||
|
||||
def _remote_checksum(self, path, all_vars):
|
||||
def _remote_checksum(self, path, all_vars, follow=False):
|
||||
'''
|
||||
Produces a remote checksum given a path,
|
||||
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
|
||||
|
@ -438,7 +454,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
'''
|
||||
x = "0" # unknown error has occured
|
||||
try:
|
||||
remote_stat = self._execute_remote_stat(path, all_vars, follow=False)
|
||||
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
|
||||
if remote_stat['exists'] and remote_stat['isdir']:
|
||||
x = "3" # its a directory not a file
|
||||
else:
|
||||
|
@ -480,21 +496,49 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
else:
|
||||
return initial_fragment
|
||||
|
||||
def _filter_leading_non_json_lines(self, data):
|
||||
@staticmethod
|
||||
def _filter_non_json_lines(data):
|
||||
'''
|
||||
Used to avoid random output from SSH at the top of JSON output, like messages from
|
||||
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
|
||||
|
||||
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
|
||||
filter only leading lines since multiline JSON is valid.
|
||||
need to filter anything which does not start with '{', '[', or is an empty line.
|
||||
Have to be careful how we filter trailing junk as multiline JSON is valid.
|
||||
'''
|
||||
idx = 0
|
||||
for line in data.splitlines(True):
|
||||
if line.startswith((u'{', u'[')):
|
||||
# Filter initial junk
|
||||
lines = data.splitlines()
|
||||
for start, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
if line.startswith(u'{'):
|
||||
endchar = u'}'
|
||||
break
|
||||
idx = idx + len(line)
|
||||
elif line.startswith(u'['):
|
||||
endchar = u']'
|
||||
break
|
||||
else:
|
||||
display.debug('No start of json char found')
|
||||
raise ValueError('No start of json char found')
|
||||
|
||||
return data[idx:]
|
||||
# Filter trailing junk
|
||||
lines = lines[start:]
|
||||
lines.reverse()
|
||||
for end, line in enumerate(lines):
|
||||
if line.strip().endswith(endchar):
|
||||
break
|
||||
else:
|
||||
display.debug('No end of json char found')
|
||||
raise ValueError('No end of json char found')
|
||||
|
||||
if end < len(lines) - 1:
|
||||
# Trailing junk is uncommon and can point to things the user might
|
||||
# want to change. So print a warning if we find any
|
||||
trailing_junk = lines[:end]
|
||||
trailing_junk.reverse()
|
||||
display.warning('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
|
||||
|
||||
lines = lines[end:]
|
||||
lines.reverse()
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _strip_success_message(self, data):
|
||||
'''
|
||||
|
@ -539,7 +583,16 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
module_args['_ansible_diff'] = self._play_context.diff
|
||||
|
||||
# let module know our verbosity
|
||||
module_args['_ansible_verbosity'] = self._display.verbosity
|
||||
module_args['_ansible_verbosity'] = display.verbosity
|
||||
|
||||
# give the module information about the ansible version
|
||||
module_args['_ansible_version'] = __version__
|
||||
|
||||
# set the syslog facility to be used in the module
|
||||
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
|
||||
|
||||
# let module know about filesystems that selinux treats specially
|
||||
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
|
||||
|
||||
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
|
||||
if not shebang:
|
||||
|
@ -566,7 +619,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# the remote system, which can be read and parsed by the module
|
||||
args_data = ""
|
||||
for k,v in iteritems(module_args):
|
||||
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
|
||||
args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
|
||||
self._transfer_data(args_file_path, args_data)
|
||||
elif module_style == 'non_native_want_json':
|
||||
self._transfer_data(args_file_path, json.dumps(module_args))
|
||||
|
@ -574,9 +627,17 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
environment_string = self._compute_environment_string()
|
||||
|
||||
remote_files = None
|
||||
|
||||
if args_file_path:
|
||||
remote_files = tmp, remote_module_path, args_file_path
|
||||
elif remote_module_path:
|
||||
remote_files = tmp, remote_module_path
|
||||
|
||||
# Fix permissions of the tmp path and tmp files. This should be
|
||||
# called after all files have been transferred.
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
if remote_files:
|
||||
self._fixup_perms2(remote_files, remote_user)
|
||||
|
||||
cmd = ""
|
||||
in_data = None
|
||||
|
@ -625,12 +686,52 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _clean_returned_data(self, data):
|
||||
remove_keys = set()
|
||||
fact_keys = set(data.keys())
|
||||
# first we add all of our magic variable names to the set of
|
||||
# keys we want to remove from facts
|
||||
for magic_var in MAGIC_VARIABLE_MAPPING:
|
||||
remove_keys.update(fact_keys.intersection(MAGIC_VARIABLE_MAPPING[magic_var]))
|
||||
# next we remove any connection plugin specific vars
|
||||
for conn_path in self._shared_loader_obj.connection_loader.all(path_only=True):
|
||||
try:
|
||||
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
|
||||
re_key = re.compile('^ansible_%s_' % conn_name)
|
||||
for fact_key in fact_keys:
|
||||
if re_key.match(fact_key):
|
||||
remove_keys.add(fact_key)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# remove some KNOWN keys
|
||||
for hard in ['ansible_rsync_path', 'ansible_playbook_python']:
|
||||
if hard in fact_keys:
|
||||
remove_keys.add(hard)
|
||||
|
||||
# finally, we search for interpreter keys to remove
|
||||
re_interp = re.compile('^ansible_.*_interpreter$')
|
||||
for fact_key in fact_keys:
|
||||
if re_interp.match(fact_key):
|
||||
remove_keys.add(fact_key)
|
||||
# then we remove them (except for ssh host keys)
|
||||
for r_key in remove_keys:
|
||||
if not r_key.startswith('ansible_ssh_host_key_'):
|
||||
del data[r_key]
|
||||
|
||||
def _parse_returned_data(self, res):
|
||||
try:
|
||||
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
|
||||
data = json.loads(self._filter_non_json_lines(res.get('stdout', u'')))
|
||||
data['_ansible_parsed'] = True
|
||||
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
|
||||
self._clean_returned_data(data['ansible_facts'])
|
||||
data['ansible_facts'] = wrap_var(data['ansible_facts'])
|
||||
if 'add_host' in data and isinstance(data['add_host'].get('host_vars', None), dict):
|
||||
self._clean_returned_data(data['add_host']['host_vars'])
|
||||
data['add_host'] = wrap_var(data['add_host'])
|
||||
except ValueError:
|
||||
# not valid json, lets try to capture error
|
||||
data = dict(failed=True, parsed=False)
|
||||
data = dict(failed=True, _ansible_parsed=False)
|
||||
data['msg'] = "MODULE FAILURE"
|
||||
data['module_stdout'] = res.get('stdout', u'')
|
||||
if 'stderr' in res:
|
||||
|
@ -669,11 +770,23 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
if self._connection.allow_executable:
|
||||
if executable is None:
|
||||
executable = self._play_context.executable
|
||||
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
|
||||
# only applied for the default executable to avoid interfering with the raw action
|
||||
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
|
||||
if executable:
|
||||
cmd = executable + ' -c ' + pipes.quote(cmd)
|
||||
|
||||
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
|
||||
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# Change directory to basedir of task for command execution when connection is local
|
||||
if self._connection.transport == 'local':
|
||||
cwd = os.getcwd()
|
||||
os.chdir(self._loader.get_basedir())
|
||||
try:
|
||||
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
finally:
|
||||
if self._connection.transport == 'local':
|
||||
os.chdir(cwd)
|
||||
|
||||
# stdout and stderr may be either a file-like or a bytes object.
|
||||
# Convert either one to a text type
|
||||
|
|
|
@ -153,7 +153,7 @@ class ActionModule(ActionBase):
|
|||
xfered = self._transfer_file(path, remote_path)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
self._fixup_perms2((tmp, remote_path), remote_user)
|
||||
|
||||
new_module_args.update( dict( src=xfered,))
|
||||
|
||||
|
|
|
@ -70,17 +70,13 @@ class ActionModule(ActionBase):
|
|||
args_data += '%s="%s" ' % (k, pipes.quote(to_unicode(v)))
|
||||
argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), args_data)
|
||||
|
||||
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
|
||||
# Only the following two files need to be executable but we'd have to
|
||||
# make three remote calls if we wanted to just set them executable.
|
||||
# There's not really a problem with marking too many of the temp files
|
||||
# executable so we go ahead and mark them all as executable in the
|
||||
# line above (the line above is needed in any case [although
|
||||
# execute=False is okay if we uncomment the lines below] so that all
|
||||
# the files are readable in case the remote_user and become_user are
|
||||
# different and both unprivileged)
|
||||
#self._fixup_perms(remote_module_path, remote_user, execute=True, recursive=False)
|
||||
#self._fixup_perms(async_module_path, remote_user, execute=True, recursive=False)
|
||||
remote_paths = tmp, remote_module_path, async_module_path
|
||||
|
||||
# argsfile doesn't need to be executable, but this saves an extra call to the remote host
|
||||
if argsfile:
|
||||
remote_paths += argsfile,
|
||||
|
||||
self._fixup_perms2(remote_paths, remote_user, execute=True)
|
||||
|
||||
async_limit = self._task.async
|
||||
async_jid = str(random.randint(0, 999999999999))
|
||||
|
@ -96,4 +92,11 @@ class ActionModule(ActionBase):
|
|||
|
||||
result['changed'] = True
|
||||
|
||||
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
|
||||
return result
|
||||
|
||||
# the async_wrapper module returns dumped JSON via its stdout
|
||||
# response, so we parse it here and replace the result
|
||||
result = self._parse_returned_data(result)
|
||||
|
||||
return result
|
||||
|
|
|
@ -217,8 +217,10 @@ class ActionModule(ActionBase):
|
|||
# Define a remote directory that we will copy the file to.
|
||||
tmp_src = self._connection._shell.join_path(tmp, 'source')
|
||||
|
||||
remote_path = None
|
||||
|
||||
if not raw:
|
||||
self._transfer_file(source_full, tmp_src)
|
||||
remote_path = self._transfer_file(source_full, tmp_src)
|
||||
else:
|
||||
self._transfer_file(source_full, dest_file)
|
||||
|
||||
|
@ -227,7 +229,8 @@ class ActionModule(ActionBase):
|
|||
self._loader.cleanup_tmp_file(source_full)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
if remote_path:
|
||||
self._fixup_perms2((tmp, remote_path), remote_user)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
|
@ -245,6 +248,8 @@ class ActionModule(ActionBase):
|
|||
original_basename=source_rel,
|
||||
)
|
||||
)
|
||||
if 'content' in new_module_args:
|
||||
del new_module_args['content']
|
||||
|
||||
module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
|
||||
module_executed = True
|
||||
|
|
|
@ -54,7 +54,11 @@ class ActionModule(ActionBase):
|
|||
try:
|
||||
results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False)
|
||||
if results == self._task.args['var']:
|
||||
raise AnsibleUndefinedVariable
|
||||
# if results is not str/unicode type, raise an exception
|
||||
if type(results) not in [str, unicode]:
|
||||
raise AnsibleUndefinedVariable
|
||||
# If var name is same as result, try to template it
|
||||
results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
|
||||
except AnsibleUndefinedVariable:
|
||||
results = "VARIABLE IS NOT DEFINED!"
|
||||
|
||||
|
|
|
@ -64,7 +64,8 @@ class ActionModule(ActionBase):
|
|||
remote_checksum = None
|
||||
if not self._play_context.become:
|
||||
# calculate checksum for the remote file, don't bother if using become as slurp will be used
|
||||
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
|
||||
# Force remote_checksum to follow symlinks because fetch always follows symlinks
|
||||
remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
|
||||
|
||||
# use slurp if permissions are lacking or privilege escalation is needed
|
||||
remote_data = None
|
||||
|
|
|
@ -34,6 +34,8 @@ class ActionModule(ActionBase):
|
|||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
source = self._task.args.get('_raw_params')
|
||||
if source is None:
|
||||
raise AnsibleError("No filename was specified to include.", self._task._ds)
|
||||
|
||||
if self._task._role:
|
||||
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
|
||||
|
|
|
@ -75,6 +75,9 @@ class ActionModule(ActionBase):
|
|||
|
||||
def _handle_template(self):
|
||||
src = self._task.args.get('src')
|
||||
if not src:
|
||||
return
|
||||
|
||||
working_path = self._get_working_path()
|
||||
|
||||
if os.path.isabs(src) or urlparse.urlsplit('src').scheme:
|
||||
|
@ -93,6 +96,17 @@ class ActionModule(ActionBase):
|
|||
except IOError:
|
||||
return dict(failed=True, msg='unable to load src file')
|
||||
|
||||
# Create a template search path in the following order:
|
||||
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
|
||||
searchpath = [working_path]
|
||||
if self._task._role is not None:
|
||||
searchpath.append(self._task._role._role_path)
|
||||
dep_chain = self._task._block.get_dep_chain()
|
||||
if dep_chain is not None:
|
||||
for role in dep_chain:
|
||||
searchpath.append(role._role_path)
|
||||
searchpath.append(os.path.dirname(source))
|
||||
self._templar.environment.loader.searchpath = searchpath
|
||||
self._task.args['src'] = self._templar.template(template_data)
|
||||
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ class ActionModule(ActionBase):
|
|||
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
|
||||
self._transfer_file(src, tmp_src)
|
||||
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
self._fixup_perms2((tmp, tmp_src), remote_user)
|
||||
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
|
|
|
@ -123,8 +123,9 @@ class ActionModule(ActionBase):
|
|||
fd = None
|
||||
try:
|
||||
fd = self._connection._new_stdin.fileno()
|
||||
except ValueError:
|
||||
# someone is using a closed file descriptor as stdin
|
||||
except (ValueError, AttributeError):
|
||||
# ValueError: someone is using a closed file descriptor as stdin
|
||||
# AttributeError: someone is using a null file descriptor as stdin on windoez
|
||||
pass
|
||||
if fd is not None:
|
||||
if isatty(fd):
|
||||
|
|
|
@ -37,7 +37,7 @@ class ActionModule(ActionBase):
|
|||
result['skipped'] = True
|
||||
return result
|
||||
|
||||
executable = self._task.args.get('executable')
|
||||
executable = self._task.args.get('executable', False)
|
||||
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
|
||||
|
||||
return result
|
||||
|
|
|
@ -19,7 +19,6 @@ __metaclass__ = type
|
|||
|
||||
import os
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
|
||||
|
@ -79,9 +78,8 @@ class ActionModule(ActionBase):
|
|||
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
|
||||
self._transfer_file(source, tmp_src)
|
||||
|
||||
sudoable = True
|
||||
# set file permissions, more permissive when the copy is done as a different user
|
||||
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
|
||||
self._fixup_perms2((tmp, tmp_src), remote_user, execute=True)
|
||||
|
||||
# add preparation steps to one ssh roundtrip executing the script
|
||||
env_string = self._compute_environment_string()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue