mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 09:56:53 +00:00
Compare commits
482 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7276b152b | ||
|
|
5e503bfcc7 | ||
|
|
d2b4151b7a | ||
|
|
57b73e0d5e | ||
|
|
5f47d47f27 | ||
|
|
7e2f20f482 | ||
|
|
3023312334 | ||
|
|
fe3de232f0 | ||
|
|
2024dc37af | ||
|
|
b8469a5c28 | ||
|
|
2a0ec9c572 | ||
|
|
fa92f8efb0 | ||
|
|
03e068e298 | ||
|
|
8f084ac065 | ||
|
|
e02568d28a | ||
|
|
73580d09e0 | ||
|
|
093036a2ae | ||
|
|
7f1de4869e | ||
|
|
8df1b93531 | ||
|
|
c055ea2bcc | ||
|
|
27c094a095 | ||
|
|
86a5b4f28c | ||
|
|
4e14c429c7 | ||
|
|
c34fb01462 | ||
|
|
2086977af6 | ||
|
|
587d221376 | ||
|
|
5ed6b38477 | ||
|
|
2c0cfe4d16 | ||
|
|
c344d20a9a | ||
|
|
53480b25c8 | ||
|
|
cfffaa5b6f | ||
|
|
726918930b | ||
|
|
614a84d0f2 | ||
|
|
a11022e896 | ||
|
|
491196937d | ||
|
|
af7a6dc29f | ||
|
|
16ffb4ba10 | ||
|
|
31c3865251 | ||
|
|
53e0bf8297 | ||
|
|
9b80b14956 | ||
|
|
be763e6ed2 | ||
|
|
4375280497 | ||
|
|
ebda14ba41 | ||
|
|
c16a5f3780 | ||
|
|
f6c1566924 | ||
|
|
bffed2fda5 | ||
|
|
440804fd62 | ||
|
|
a915a4b7c5 | ||
|
|
ed69bde7a9 | ||
|
|
77700e7110 | ||
|
|
91d445ab35 | ||
|
|
19c2af03b7 | ||
|
|
58a5463ddb | ||
|
|
84941d0a7f | ||
|
|
87880da6da | ||
|
|
7acc0b897a | ||
|
|
5174fc98d2 | ||
|
|
d9ad386a13 | ||
|
|
739719a3b1 | ||
|
|
311b618016 | ||
|
|
70820cab5d | ||
|
|
a75a12227f | ||
|
|
6959847701 | ||
|
|
ad93c40d40 | ||
|
|
5bfbd65115 | ||
|
|
71de1ee1d5 | ||
|
|
ad4efaeb31 | ||
|
|
786ea68016 | ||
|
|
dd878f931f | ||
|
|
8f03511d9c | ||
|
|
004e6d06c3 | ||
|
|
25f46caefb | ||
|
|
0a733c60ca | ||
|
|
f006aa4cf6 | ||
|
|
72e0d8c310 | ||
|
|
b96aaffeae | ||
|
|
5bd5de4281 | ||
|
|
4aebefcf9e | ||
|
|
62f9a5b0a9 | ||
|
|
3d03eda99e | ||
|
|
c01ce10b4b | ||
|
|
16aa776c93 | ||
|
|
d7d1659e34 | ||
|
|
5b9b99384f | ||
|
|
f898279c8c | ||
|
|
2215c6d360 | ||
|
|
ca3948858a | ||
|
|
f14e566cc7 | ||
|
|
a2c93f5e99 | ||
|
|
67a2abcab2 | ||
|
|
2e4864db7f | ||
|
|
1f0b2a5173 | ||
|
|
25482000f0 | ||
|
|
c0f3aa14cf | ||
|
|
1ef104be61 | ||
|
|
773df88a41 | ||
|
|
d77e256088 | ||
|
|
2917389779 | ||
|
|
59af80235b | ||
|
|
aec52198e3 | ||
|
|
cbe4490c9e | ||
|
|
9de059b44d | ||
|
|
c72a23a5f1 | ||
|
|
0b9d9c0fdb | ||
|
|
67eaf9405f | ||
|
|
5de05a6243 | ||
|
|
46b4b9a6de | ||
|
|
10146aae1c | ||
|
|
d2ec7053c5 | ||
|
|
51fcacae08 | ||
|
|
29211b970c | ||
|
|
5c1fa53558 | ||
|
|
2348f3d439 | ||
|
|
46a051d168 | ||
|
|
b2212bc8ef | ||
|
|
e05e3aed67 | ||
|
|
a13541299e | ||
|
|
221067e708 | ||
|
|
db6458bd93 | ||
|
|
f342243fb0 | ||
|
|
37f2b06c3c | ||
|
|
0a8a41966d | ||
|
|
263c5ba9de | ||
|
|
cfc28a3f6a | ||
|
|
b495035923 | ||
|
|
d4637e9b1c | ||
|
|
7842dc0dea | ||
|
|
314a0bc553 | ||
|
|
34b7876e4f | ||
|
|
65c10de630 | ||
|
|
a86f31ac0f | ||
|
|
bc82fe36be | ||
|
|
7c810a6186 | ||
|
|
9d468fb078 | ||
|
|
2c79d42eb4 | ||
|
|
d95c3a738f | ||
|
|
839880d711 | ||
|
|
bc0edf7d55 | ||
|
|
68458fd8aa | ||
|
|
4aa70ab48f | ||
|
|
739210c6b9 | ||
|
|
8c23d0e345 | ||
|
|
cde4a1a099 | ||
|
|
10b3381f21 | ||
|
|
0ccd52b63a | ||
|
|
c76e598d61 | ||
|
|
ad3efa9719 | ||
|
|
b9c8d2bee5 | ||
|
|
2c167547f6 | ||
|
|
73de447489 | ||
|
|
134f6132ce | ||
|
|
f229c800da | ||
|
|
52a0970ef8 | ||
|
|
11e0797650 | ||
|
|
e6bbbac6a0 | ||
|
|
d78f3dd7c4 | ||
|
|
737f8340e4 | ||
|
|
f87ab7046d | ||
|
|
4c100aef47 | ||
|
|
2b6bbd9f91 | ||
|
|
0484abdddd | ||
|
|
3f119aa9b6 | ||
|
|
277329a6fe | ||
|
|
955eb531a3 | ||
|
|
9ac2918d49 | ||
|
|
740180d4a5 | ||
|
|
5d6c539373 | ||
|
|
09e2f77289 | ||
|
|
7aaf8cf496 | ||
|
|
664bd70294 | ||
|
|
9651bca396 | ||
|
|
f3375c638e | ||
|
|
14d663029a | ||
|
|
9297802089 | ||
|
|
5b4dc4ace2 | ||
|
|
15a72418ac | ||
|
|
7278bdcf9d | ||
|
|
b9aab568f7 | ||
|
|
e01d014c36 | ||
|
|
e5bdc028c4 | ||
|
|
67f7184234 | ||
|
|
b42ab6b45d | ||
|
|
67eafdd20b | ||
|
|
3cc11bfd42 | ||
|
|
a367fba315 | ||
|
|
00583448e2 | ||
|
|
c0cae2b27e | ||
|
|
07a9efd54f | ||
|
|
9dc8f2b05d | ||
|
|
baf726b389 | ||
|
|
2d7302ba12 | ||
|
|
7caefbd420 | ||
|
|
1a3c221995 | ||
|
|
0eecd48ea8 | ||
|
|
d71c10da27 | ||
|
|
664a09b277 | ||
|
|
e11bf7d788 | ||
|
|
889989aa96 | ||
|
|
0feb38f2b1 | ||
|
|
e6323433ff | ||
|
|
0a9a853abf | ||
|
|
3da1119e41 | ||
|
|
4255c0d2fc | ||
|
|
066975e5d1 | ||
|
|
8a59d6306c | ||
|
|
c4dc911d26 | ||
|
|
e30bb0958a | ||
|
|
67640e5431 | ||
|
|
9e7bcae370 | ||
|
|
c461e3cf71 | ||
|
|
3495823a72 | ||
|
|
f493110651 | ||
|
|
548758a878 | ||
|
|
f915cf5df2 | ||
|
|
6d147d748f | ||
|
|
1349d38c73 | ||
|
|
552207ea13 | ||
|
|
9cbe572c22 | ||
|
|
b485d23a05 | ||
|
|
a59a15e56c | ||
|
|
98251abfa9 | ||
|
|
e16bd2d015 | ||
|
|
bdafa31851 | ||
|
|
231f9c0283 | ||
|
|
0a5b29a744 | ||
|
|
405b4f34c0 | ||
|
|
c51b10eb9b | ||
|
|
6e172f37af | ||
|
|
c31424a924 | ||
|
|
6f0be41e1e | ||
|
|
8d035be233 | ||
|
|
35de2377f2 | ||
|
|
ad8cd8efb3 | ||
|
|
1dc03685b5 | ||
|
|
c81ea00a97 | ||
|
|
ca39c45bd4 | ||
|
|
2326d72cf7 | ||
|
|
9bd3627796 | ||
|
|
8a8e6c8058 | ||
|
|
d85d31ba3c | ||
|
|
15998c9f72 | ||
|
|
93027a33b9 | ||
|
|
c5f17f2184 | ||
|
|
172e8bb161 | ||
|
|
8da1ff3c90 | ||
|
|
7aa1c1a338 | ||
|
|
378687503c | ||
|
|
90be1cc838 | ||
|
|
8cee29b8f6 | ||
|
|
5a71909770 | ||
|
|
9d0af30702 | ||
|
|
9dc21447cc | ||
|
|
940130c959 | ||
|
|
0b239199e7 | ||
|
|
f0d6fcb3fa | ||
|
|
e1aad0db30 | ||
|
|
7701ea0293 | ||
|
|
9afb84c8f3 | ||
|
|
1746d11749 | ||
|
|
ea3b8eeee7 | ||
|
|
8c9add3d15 | ||
|
|
9244d0ae47 | ||
|
|
22591fb6e1 | ||
|
|
166fa1a7fa | ||
|
|
9e541a6f11 | ||
|
|
dbb37194d4 | ||
|
|
3cd7b0ec25 | ||
|
|
1c84389f50 | ||
|
|
61de9ce51c | ||
|
|
7ccd5c9116 | ||
|
|
e3cea35f2c | ||
|
|
94f58d1920 | ||
|
|
0f884bbadc | ||
|
|
6ca3e78d11 | ||
|
|
a09d70daa0 | ||
|
|
c2a3cf35c7 | ||
|
|
ee5ff3b31b | ||
|
|
18b7333f93 | ||
|
|
3197ef2e38 | ||
|
|
5f971e677a | ||
|
|
1b5d91153b | ||
|
|
2ce9ea8c54 | ||
|
|
eec4861c36 | ||
|
|
82e7e931a8 | ||
|
|
4b59174063 | ||
|
|
58d8469759 | ||
|
|
6d5dbfd455 | ||
|
|
6357048068 | ||
|
|
a861149a0e | ||
|
|
9a9b0b04a5 | ||
|
|
0a4e9379e2 | ||
|
|
7d1abf5d6a | ||
|
|
7ef25be10c | ||
|
|
0d0194fdf8 | ||
|
|
36f64367cf | ||
|
|
d827601c95 | ||
|
|
2efd31bacf | ||
|
|
6eaf047739 | ||
|
|
80268b0828 | ||
|
|
1e848c56f2 | ||
|
|
f74756d7fc | ||
|
|
138b57230a | ||
|
|
d32193afef | ||
|
|
ef8aa73dab | ||
|
|
28007079a4 | ||
|
|
f0b7233e8d | ||
|
|
48cc39a2b1 | ||
|
|
c34dc24d3a | ||
|
|
0760f60ca5 | ||
|
|
48b1bc7d47 | ||
|
|
769233808d | ||
|
|
7361ca5430 | ||
|
|
2322937a4a | ||
|
|
82225e5850 | ||
|
|
2d237987ae | ||
|
|
dc14070e08 | ||
|
|
feb1c1081e | ||
|
|
20bda07aaf | ||
|
|
1f6aa62210 | ||
|
|
5308f61b78 | ||
|
|
29636c1cc8 | ||
|
|
830734d6cf | ||
|
|
0296c200c7 | ||
|
|
2b435a591d | ||
|
|
ec2c793b08 | ||
|
|
a6bffa274c | ||
|
|
b653a9a84a | ||
|
|
8c209bdedc | ||
|
|
d4b4370ec4 | ||
|
|
e34276fa92 | ||
|
|
59d7850900 | ||
|
|
151b482fe6 | ||
|
|
ea04bb97cb | ||
|
|
5dd64a45de | ||
|
|
4df33d26b1 | ||
|
|
766f2dfe46 | ||
|
|
8a7128997d | ||
|
|
b598ca28f9 | ||
|
|
c943f7aa56 | ||
|
|
9565be5e50 | ||
|
|
725e670b47 | ||
|
|
ffdef00a6a | ||
|
|
92ccc6f013 | ||
|
|
f0c1b1065a | ||
|
|
a44356c966 | ||
|
|
33f9f0b05f | ||
|
|
f0f0704d64 | ||
|
|
55fe140230 | ||
|
|
ac543f5ef0 | ||
|
|
dbc0fe8859 | ||
|
|
42a1318fe3 | ||
|
|
d25352dc06 | ||
|
|
55682c52df | ||
|
|
46781d9fd1 | ||
|
|
4545d1c91e | ||
|
|
6570dfeb7d | ||
|
|
94c368f7df | ||
|
|
4cba1e60d9 | ||
|
|
321fb6c974 | ||
|
|
eb4d7a4199 | ||
|
|
4b07d45b7e | ||
|
|
d4a33433b4 | ||
|
|
e30b91cb8d | ||
|
|
b2b65c431b | ||
|
|
9ade4f6dd6 | ||
|
|
635d4f2138 | ||
|
|
6549e41ab8 | ||
|
|
6faface39e | ||
|
|
3b893ec421 | ||
|
|
65805e2dd6 | ||
|
|
297b50fb96 | ||
|
|
2edadb42fb | ||
|
|
4e1bf2d4ba | ||
|
|
b1a4a0ff21 | ||
|
|
e74ea7c8b8 | ||
|
|
6590f5e082 | ||
|
|
7483f71d31 | ||
|
|
6b215e3a9c | ||
|
|
3723e458d3 | ||
|
|
0f8bb43723 | ||
|
|
f33530dd61 | ||
|
|
8f3043058e | ||
|
|
3987b8a291 | ||
|
|
f7403a0b34 | ||
|
|
0a676406b3 | ||
|
|
5a7d234d80 | ||
|
|
fb9730f75e | ||
|
|
928aeafe1d | ||
|
|
5b68665571 | ||
|
|
e6b84acd1e | ||
|
|
c242993291 | ||
|
|
4f3de5658e | ||
|
|
301fcc3b7e | ||
|
|
0f0e9b2dca | ||
|
|
ed0636dc27 | ||
|
|
057321c6c6 | ||
|
|
1a4814de53 | ||
|
|
89b67a014b | ||
|
|
57bfbdc407 | ||
|
|
e19dffbf29 | ||
|
|
113e7cdfa0 | ||
|
|
c12be67a69 | ||
|
|
3a076fd585 | ||
|
|
4ef05a6483 | ||
|
|
936dd28395 | ||
|
|
e3b47899c5 | ||
|
|
fd8193e0bd | ||
|
|
fa477ebb35 | ||
|
|
43e766dd44 | ||
|
|
b25e0f360c | ||
|
|
658e95c5ca | ||
|
|
26c2876f50 | ||
|
|
62043463f3 | ||
|
|
f1dab6d4a7 | ||
|
|
d43764da79 | ||
|
|
de2feb2567 | ||
|
|
6e56bae0f3 | ||
|
|
1f7047e725 | ||
|
|
b2e4485567 | ||
|
|
b78254fe24 | ||
|
|
38aa0ec8ad | ||
|
|
42f28048a8 | ||
|
|
b699aaff7b | ||
|
|
af85b6c203 | ||
|
|
ec2e7cad3e | ||
|
|
7753fa4219 | ||
|
|
69ea487005 | ||
|
|
048f15fe68 | ||
|
|
aa1aa1d540 | ||
|
|
e78517ca93 | ||
|
|
bf185573a6 | ||
|
|
145435cdd9 | ||
|
|
6013c77c2b | ||
|
|
ad5482f63d | ||
|
|
f5594aefd5 | ||
|
|
ab5b379b30 | ||
|
|
1c5e44c649 | ||
|
|
23da67cc72 | ||
|
|
4032dd6b08 | ||
|
|
4cb6f39a80 | ||
|
|
3539957bac | ||
|
|
e05769d4bf | ||
|
|
19c03cff96 | ||
|
|
703660c81d | ||
|
|
fd32af1ac3 | ||
|
|
80fbcf2f98 | ||
|
|
a722e038cc | ||
|
|
19c8d2164d | ||
|
|
d4656ffca2 | ||
|
|
b49607f12d | ||
|
|
af0ce4284f | ||
|
|
f5f862617a | ||
|
|
a1a4ba4337 | ||
|
|
b0b783f8ff | ||
|
|
e670ca666a | ||
|
|
49b991527e | ||
|
|
e6cc671a0d | ||
|
|
797ea23e50 | ||
|
|
4d23b7a48b | ||
|
|
020b47a1a9 | ||
|
|
0da9d956a0 | ||
|
|
5691e3aff3 | ||
|
|
007333dbfe | ||
|
|
05666b0e4d | ||
|
|
c934d9aeb5 | ||
|
|
5b15e4089a | ||
|
|
a6379e45ce | ||
|
|
b95176dbc8 | ||
|
|
b752fea121 | ||
|
|
cf50990fed | ||
|
|
45343e6bc0 | ||
|
|
51540f6345 | ||
|
|
74eba52028 | ||
|
|
b920e8abf2 | ||
|
|
75c0004e1e | ||
|
|
be42fd4af7 | ||
|
|
1c05908ff6 | ||
|
|
ea42b75378 | ||
|
|
0330f4b52c | ||
|
|
1d8c659ba2 | ||
|
|
e784254679 | ||
|
|
d5e1edd284 |
@@ -206,14 +206,14 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
- name: FreeBSD 12.3
|
||||
test: freebsd/12.3
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
|
||||
68
.github/BOTMETA.yml
vendored
68
.github/BOTMETA.yml
vendored
@@ -118,8 +118,6 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/counter.py:
|
||||
maintainers: keilr
|
||||
$filters/dict.py:
|
||||
maintainers: felixfontein
|
||||
$filters/dict_kv.py:
|
||||
@@ -165,8 +163,6 @@ files:
|
||||
keywords: opennebula dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/xen_orchestra.py:
|
||||
maintainers: ddelnano shinuza
|
||||
$inventories/icinga2.py:
|
||||
maintainers: BongoEADGC6
|
||||
$inventories/scaleway.py:
|
||||
@@ -179,8 +175,6 @@ files:
|
||||
labels: lookups
|
||||
$lookups/cartesian.py: {}
|
||||
$lookups/chef_databag.py: {}
|
||||
$lookups/collection_version.py:
|
||||
maintainers: felixfontein
|
||||
$lookups/consul_kv.py: {}
|
||||
$lookups/credstash.py: {}
|
||||
$lookups/cyberarkpassword.py:
|
||||
@@ -211,6 +205,9 @@ files:
|
||||
$lookups/manifold.py:
|
||||
maintainers: galanoff
|
||||
labels: manifold
|
||||
$lookups/nios:
|
||||
maintainers: $team_networking sganesh-infoblox
|
||||
labels: infoblox networking
|
||||
$lookups/onepass:
|
||||
maintainers: samdoran
|
||||
labels: onepassword
|
||||
@@ -223,12 +220,8 @@ files:
|
||||
maintainers: Akasurde
|
||||
$lookups/random_string.py:
|
||||
maintainers: Akasurde
|
||||
$lookups/random_words.py:
|
||||
maintainers: konstruktoid
|
||||
$lookups/redis.py:
|
||||
maintainers: $team_ansible_core jpmens
|
||||
$lookups/revbitspss.py:
|
||||
maintainers: RevBits
|
||||
$lookups/shelvefile.py: {}
|
||||
$lookups/tss.py:
|
||||
maintainers: amigus endlesstrax
|
||||
@@ -260,6 +253,9 @@ files:
|
||||
$module_utils/module_helper.py:
|
||||
maintainers: russoz
|
||||
labels: module_helper
|
||||
$module_utils/net_tools/nios/api.py:
|
||||
maintainers: $team_networking sganesh-infoblox
|
||||
labels: infoblox networking
|
||||
$module_utils/oracle/oci_utils.py:
|
||||
maintainers: $team_oracle
|
||||
labels: cloud
|
||||
@@ -485,16 +481,11 @@ files:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/redis_data.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/redis_data_incr.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/database/mssql/mssql_db.py:
|
||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||
labels: mssql_db
|
||||
$modules/database/mssql/mssql_script.py:
|
||||
maintainers: kbudde
|
||||
labels: mssql_script
|
||||
$modules/database/saphana/hana_query.py:
|
||||
maintainers: rainerleber
|
||||
$modules/database/vertica/:
|
||||
@@ -540,8 +531,6 @@ files:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/keycloak/keycloak_realm_info.py:
|
||||
maintainers: fynncfchen
|
||||
$modules/identity/keycloak/keycloak_realm.py:
|
||||
maintainers: kris2kris
|
||||
$modules/identity/keycloak/keycloak_role.py:
|
||||
@@ -627,8 +616,6 @@ files:
|
||||
labels: cloudflare_dns
|
||||
$modules/net_tools/dnsimple.py:
|
||||
maintainers: drcapulet
|
||||
$modules/net_tools/dnsimple_info.py:
|
||||
maintainers: edhilgendorf
|
||||
$modules/net_tools/dnsmadeeasy.py:
|
||||
maintainers: briceburg
|
||||
$modules/net_tools/gandi_livedns.py:
|
||||
@@ -664,6 +651,31 @@ files:
|
||||
maintainers: amasolov nerzhul
|
||||
$modules/net_tools/pritunl/:
|
||||
maintainers: Lowess
|
||||
$modules/net_tools/nios/:
|
||||
maintainers: $team_networking
|
||||
labels: infoblox networking
|
||||
$modules/net_tools/nios/nios_a_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_aaaa_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_cname_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_fixed_address.py:
|
||||
maintainers: sjaiswal
|
||||
$modules/net_tools/nios/nios_member.py:
|
||||
maintainers: krisvasudevan
|
||||
$modules/net_tools/nios/nios_mx_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_naptr_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_nsgroup.py:
|
||||
maintainers: ebirn sjaiswal
|
||||
$modules/net_tools/nios/nios_ptr_record.py:
|
||||
maintainers: clementtrebuchet
|
||||
$modules/net_tools/nios/nios_srv_record.py:
|
||||
maintainers: brampling
|
||||
$modules/net_tools/nios/nios_txt_record.py:
|
||||
maintainers: coreywan
|
||||
$modules/net_tools/nmcli.py:
|
||||
maintainers: alcamie101
|
||||
$modules/net_tools/snmp_facts.py:
|
||||
@@ -730,8 +742,6 @@ files:
|
||||
maintainers: mwarkentin
|
||||
$modules/packaging/language/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/packaging/language/cargo.py:
|
||||
maintainers: radek-sprta
|
||||
$modules/packaging/language/composer.py:
|
||||
maintainers: dmtrs
|
||||
ignore: resmo
|
||||
@@ -769,8 +779,6 @@ files:
|
||||
maintainers: evgkrsk
|
||||
$modules/packaging/os/copr.py:
|
||||
maintainers: schlupov
|
||||
$modules/packaging/os/dnf_versionlock.py:
|
||||
maintainers: moreda
|
||||
$modules/packaging/os/flatpak.py:
|
||||
maintainers: $team_flatpak
|
||||
$modules/packaging/os/flatpak_remote.py:
|
||||
@@ -867,9 +875,6 @@ files:
|
||||
$modules/packaging/os/snap.py:
|
||||
maintainers: angristan vcarceler
|
||||
labels: snap
|
||||
$modules/packaging/os/snap_alias.py:
|
||||
maintainers: russoz
|
||||
labels: snap
|
||||
$modules/packaging/os/sorcery.py:
|
||||
maintainers: vaygr
|
||||
$modules/packaging/os/svr4pkg.py:
|
||||
@@ -967,8 +972,6 @@ files:
|
||||
maintainers: SamyCoenen
|
||||
$modules/source_control/gitlab/gitlab_user.py:
|
||||
maintainers: LennertMertens stgrace
|
||||
$modules/source_control/gitlab/gitlab_branch.py:
|
||||
maintainers: paytroff
|
||||
$modules/source_control/hg.py:
|
||||
maintainers: yeukhon
|
||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||
@@ -1022,8 +1025,6 @@ files:
|
||||
$modules/system/gconftool2.py:
|
||||
maintainers: Akasurde kevensen
|
||||
labels: gconftool2
|
||||
$modules/system/homectl.py:
|
||||
maintainers: jameslivulpi
|
||||
$modules/system/interfaces_file.py:
|
||||
maintainers: obourdon hryamzik
|
||||
labels: interfaces_file
|
||||
@@ -1106,8 +1107,6 @@ files:
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/system/ssh_config.py:
|
||||
maintainers: gaqzi Akasurde
|
||||
$modules/system/sudoers.py:
|
||||
maintainers: JonEllis
|
||||
$modules/system/svc.py:
|
||||
maintainers: bcoca
|
||||
$modules/system/syspatch.py:
|
||||
@@ -1197,8 +1196,6 @@ files:
|
||||
maintainers: inetfuture mattupstate
|
||||
$modules/web_infrastructure/taiga_issue.py:
|
||||
maintainers: lekum
|
||||
$tests/a_module.py:
|
||||
maintainers: felixfontein
|
||||
#########################
|
||||
tests/:
|
||||
labels: tests
|
||||
@@ -1225,7 +1222,6 @@ macros:
|
||||
module_utils: plugins/module_utils
|
||||
modules: plugins/modules
|
||||
terminals: plugins/terminal
|
||||
tests: plugins/test
|
||||
team_ansible_core:
|
||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||
@@ -1246,7 +1242,7 @@ macros:
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: remyleone abarbare
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -62,20 +62,6 @@ body:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Community.general Version
|
||||
description: >-
|
||||
Paste verbatim output from "ansible-galaxy collection list community.general"
|
||||
between tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-galaxy collection list community.general
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -62,20 +62,6 @@ body:
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Community.general Version
|
||||
description: >-
|
||||
Paste verbatim output from "ansible-galaxy collection list community.general"
|
||||
between tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-galaxy collection list community.general
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,7 +21,7 @@ body:
|
||||
placeholder: >-
|
||||
I am trying to do X with the collection from the main branch on GitHub and
|
||||
I think that implementing a feature Y would be very helpful for me and
|
||||
every other user of community.general because of Z.
|
||||
every other user of ansible-core because of Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
interval:
|
||||
schedule: "weekly"
|
||||
1768
CHANGELOG.rst
1768
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -24,7 +24,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
@@ -36,54 +36,6 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co
|
||||
|
||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
||||
|
||||
## Run sanity, unit or integration tests locally
|
||||
|
||||
You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is:
|
||||
|
||||
```.bash
|
||||
mkdir -p ~/dev/ansible_collections/community
|
||||
git clone https://github.com/ansible-collections/community.general.git ~/dev/ansible_collections/community/general
|
||||
cd ~/dev/ansible_collections/community/general
|
||||
```
|
||||
|
||||
Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+.
|
||||
|
||||
The following commands show how to run sanity tests:
|
||||
|
||||
```.bash
|
||||
# Run sanity tests for all files in the collection:
|
||||
ansible-test sanity --docker -v
|
||||
|
||||
# Run sanity tests for the given files and directories:
|
||||
ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/
|
||||
```
|
||||
|
||||
The following commands show how to run unit tests:
|
||||
|
||||
```.bash
|
||||
# Run all unit tests:
|
||||
ansible-test units --docker -v
|
||||
|
||||
# Run all unit tests for one Python version (a lot faster):
|
||||
ansible-test units --docker -v --python 3.8
|
||||
|
||||
# Run a specific unit test (for the nmcli module) for one Python version:
|
||||
ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py
|
||||
```
|
||||
|
||||
The following commands show how to run integration tests:
|
||||
|
||||
```.bash
|
||||
# Run integration tests for the interfaces_files module in a Docker container using the
|
||||
# fedora35 operating system image (the supported images depend on your ansible-core version):
|
||||
ansible-test integration --docker fedora35 -v interfaces_file
|
||||
|
||||
# Run integration tests for the flattened lookup **without any isolation**:
|
||||
ansible-test integration -v lookup_flattened
|
||||
```
|
||||
|
||||
If you are unsure about the integration test target name for a module or plugin, you can take a look in `tests/integration/targets/`. Tests for plugins have the plugin type prepended.
|
||||
|
||||
## Creating new modules or plugins
|
||||
|
||||
Creating new modules and plugins requires a bit more work than other Pull Requests.
|
||||
|
||||
12
README.md
12
README.md
@@ -1,6 +1,6 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
@@ -64,13 +64,13 @@ We are actively accepting new contributors.
|
||||
|
||||
All types of contributions are very welcome.
|
||||
|
||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md)!
|
||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)!
|
||||
|
||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md).
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Running tests
|
||||
|
||||
@@ -80,7 +80,7 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
||||
|
||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md).
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||
|
||||
It is necessary for maintainers of this collection to be subscribed to:
|
||||
@@ -108,7 +108,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-4/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,4 +3,3 @@ sections:
|
||||
- title: Guides
|
||||
toctree:
|
||||
- filter_guide
|
||||
- test_guide
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{% for i in examples %}
|
||||
{{ i.label }}
|
||||
|
||||
.. code-block:: {{ i.lang }}
|
||||
|
||||
{{ lookup('file', source_path ~ i.file)|indent(2) }}
|
||||
|
||||
{% endfor %}
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
examples:
|
||||
- label: 'Example ``list_merge=replace`` (default):'
|
||||
file: example-003.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-003.out
|
||||
lang: 'yaml'
|
||||
- label: 'Example ``list_merge=keep``:'
|
||||
file: example-004.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-004.out
|
||||
lang: 'yaml'
|
||||
- label: 'Example ``list_merge=append``:'
|
||||
file: example-005.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-005.out
|
||||
lang: 'yaml'
|
||||
- label: 'Example ``list_merge=prepend``:'
|
||||
file: example-006.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-006.out
|
||||
lang: 'yaml'
|
||||
- label: 'Example ``list_merge=append_rp``:'
|
||||
file: example-007.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-007.out
|
||||
lang: 'yaml'
|
||||
- label: 'Example ``list_merge=prepend_rp``:'
|
||||
file: example-008.yml
|
||||
lang: 'yaml+jinja'
|
||||
- label: 'This produces:'
|
||||
file: example-008.out
|
||||
lang: 'yaml'
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
# The following runs all examples:
|
||||
#
|
||||
# ANSIBLE_STDOUT_CALLBACK=community.general.yaml ansible-playbook playbook.yml -e examples=true
|
||||
#
|
||||
# You need to copy the YAML output of example-XXX.yml into example-XXX.out.
|
||||
#
|
||||
# The following generates examples.rst out of the .out files:
|
||||
#
|
||||
# ansible-playbook playbook.yml -e template=true
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
source_path: ../../rst/examples/lists_mergeby/
|
||||
tasks:
|
||||
|
||||
- block:
|
||||
- import_tasks: '{{ source_path }}example-001.yml'
|
||||
tags: t001
|
||||
- import_tasks: '{{ source_path }}example-002.yml'
|
||||
tags: t002
|
||||
- import_tasks: '{{ source_path }}example-003.yml'
|
||||
tags: t003
|
||||
- import_tasks: '{{ source_path }}example-004.yml'
|
||||
tags: t004
|
||||
- import_tasks: '{{ source_path }}example-005.yml'
|
||||
tags: t005
|
||||
- import_tasks: '{{ source_path }}example-006.yml'
|
||||
tags: t006
|
||||
- import_tasks: '{{ source_path }}example-007.yml'
|
||||
tags: t007
|
||||
- import_tasks: '{{ source_path }}example-008.yml'
|
||||
tags: t008
|
||||
when: examples|d(false)|bool
|
||||
|
||||
- block:
|
||||
- include_vars: examples.yml
|
||||
- template:
|
||||
src: examples.rst.j2
|
||||
dest: examples.rst
|
||||
when: template|d(false)|bool
|
||||
@@ -1,10 +0,0 @@
|
||||
list3:
|
||||
- extra: false
|
||||
name: bar
|
||||
- name: baz
|
||||
path: /baz
|
||||
- extra: true
|
||||
name: foo
|
||||
path: /foo
|
||||
- extra: true
|
||||
name: meh
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
set_fact:
|
||||
list3: "{{ list1|
|
||||
community.general.lists_mergeby(list2, 'name') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /baz
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,10 +0,0 @@
|
||||
list3:
|
||||
- extra: false
|
||||
name: bar
|
||||
- name: baz
|
||||
path: /baz
|
||||
- extra: true
|
||||
name: foo
|
||||
path: /foo
|
||||
- extra: true
|
||||
name: meh
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /baz
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,14 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', replace lists (default)
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true) }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,14 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', keep lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='keep') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,19 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', append lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='append') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,19 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', prepend lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='prepend') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,18 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', append lists 'remove present'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='append_rp') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,18 +0,0 @@
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Merge recursive by 'name', prepend lists 'remove present'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='prepend_rp') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
.. _ansible_collections.community.general.docsite.filter_guide:
|
||||
|
||||
community.general Filter Guide
|
||||
@@ -6,14 +5,780 @@ community.general Filter Guide
|
||||
|
||||
The :ref:`community.general collection <plugins_in_community.general>` offers several useful filter plugins.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
.. contents:: Topics
|
||||
|
||||
filter_guide_paths
|
||||
filter_guide_abstract_informations
|
||||
filter_guide_working_with_times
|
||||
filter_guide_working_with_versions
|
||||
filter_guide_creating_identifiers
|
||||
filter_guide_conversions
|
||||
filter_guide_selecting_json_data
|
||||
filter_guide_working_with_unicode
|
||||
Paths
|
||||
-----
|
||||
|
||||
The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
# ansible-base 2.10 or newer:
|
||||
path: {{ ('/etc', path, 'subdir', file) | path_join }}
|
||||
|
||||
# Also works with Ansible 2.9:
|
||||
path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Abstract transformations
|
||||
------------------------
|
||||
|
||||
Dictionaries
|
||||
^^^^^^^^^^^^
|
||||
|
||||
You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a single-entry dictionary
|
||||
debug:
|
||||
msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
|
||||
vars:
|
||||
myvar: myvalue
|
||||
|
||||
- name: Create a list of dictionaries where the 'server' field is taken from a list
|
||||
debug:
|
||||
msg: >-
|
||||
{{ myservers | map('community.general.dict_kv', 'server')
|
||||
| map('combine', common_config) }}
|
||||
vars:
|
||||
common_config:
|
||||
type: host
|
||||
database: all
|
||||
myservers:
|
||||
- server1
|
||||
- server2
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a single-entry dictionary] **************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"thatsmyvar": "myvalue"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server1",
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server2",
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a dictionary with the dict function
|
||||
debug:
|
||||
msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
|
||||
|
||||
- name: Create a dictionary with the community.general.dict filter
|
||||
debug:
|
||||
msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
|
||||
|
||||
- name: Create a list of dictionaries with map and the community.general.dict filter
|
||||
debug:
|
||||
msg: >-
|
||||
{{ values | map('zip', ['k1', 'k2', 'k3'])
|
||||
| map('map', 'reverse')
|
||||
| map('community.general.dict') }}
|
||||
vars:
|
||||
values:
|
||||
- - foo
|
||||
- 23
|
||||
- a
|
||||
- - bar
|
||||
- 42
|
||||
- b
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a dictionary with the dict function] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a dictionary with the community.general.dict filter] ************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"k1": "foo",
|
||||
"k2": 23,
|
||||
"k3": "a"
|
||||
},
|
||||
{
|
||||
"k1": "bar",
|
||||
"k2": 42,
|
||||
"k3": "b"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Grouping
|
||||
^^^^^^^^
|
||||
|
||||
If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
|
||||
|
||||
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Output mount facts grouped by device name
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('device')
|
||||
|
||||
- name: Output mount facts grouped by mount point
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Output mount facts grouped by device name] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('device')": {
|
||||
"/dev/sda1": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
},
|
||||
"/dev/sda2": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "abcdef01-2345-6789-0abc-def012345678"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Output mount facts grouped by mount point] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
|
||||
"/": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
|
||||
},
|
||||
"/boot": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Merging lists of dictionaries
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
debug:
|
||||
var: list1 | community.general.lists_mergeby(list2, 'name')
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /bazzz
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Merge two lists by common attribute 'name'] ****************************************
|
||||
ok: [localhost] => {
|
||||
"list1 | community.general.lists_mergeby(list2, 'name')": [
|
||||
{
|
||||
"extra": false,
|
||||
"name": "bar"
|
||||
},
|
||||
{
|
||||
"name": "baz",
|
||||
"path": "/bazzz"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "foo",
|
||||
"path": "/foo"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "meh"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
Working with times
|
||||
------------------
|
||||
|
||||
The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
|
||||
|
||||
There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
|
||||
|
||||
.. list-table:: Units
|
||||
:widths: 25 25 25 25
|
||||
:header-rows: 1
|
||||
|
||||
* - Unit name
|
||||
- Unit value in seconds
|
||||
- Unit strings for filter
|
||||
- Shorthand filter
|
||||
* - Millisecond
|
||||
- 1/1000 second
|
||||
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
|
||||
- ``to_milliseconds``
|
||||
* - Second
|
||||
- 1 second
|
||||
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
|
||||
- ``to_seconds``
|
||||
* - Minute
|
||||
- 60 seconds
|
||||
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
|
||||
- ``to_minutes``
|
||||
* - Hour
|
||||
- 60*60 seconds
|
||||
- ``h``, ``hour``, ``hours``
|
||||
- ``to_hours``
|
||||
* - Day
|
||||
- 24*60*60 seconds
|
||||
- ``d``, ``day``, ``days``
|
||||
- ``to_days``
|
||||
* - Week
|
||||
- 7*24*60*60 seconds
|
||||
- ``w``, ``week``, ``weeks``
|
||||
- ``to_weeks``
|
||||
* - Month
|
||||
- 30*24*60*60 seconds
|
||||
- ``mo``, ``month``, ``months``
|
||||
- ``to_months``
|
||||
* - Year
|
||||
- 365*24*60*60 seconds
|
||||
- ``y``, ``year``, ``years``
|
||||
- ``to_years``
|
||||
|
||||
Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Convert string to seconds
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
|
||||
|
||||
- name: Convert string to hours
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
|
||||
|
||||
- name: Convert string to years (using 365.25 days == 1 year)
|
||||
debug:
|
||||
msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Convert string to seconds] **********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "109210.123"
|
||||
}
|
||||
|
||||
TASK [Convert string to hours] ************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "30.336145277778"
|
||||
}
|
||||
|
||||
TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
|
||||
ok: [localhost] => {
|
||||
"msg": "1.096851471595"
|
||||
}
|
||||
|
||||
.. versionadded: 0.2.0
|
||||
|
||||
Working with versions
|
||||
---------------------
|
||||
|
||||
If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Sort list by version number
|
||||
debug:
|
||||
var: ansible_versions | community.general.version_sort
|
||||
vars:
|
||||
ansible_versions:
|
||||
- '2.8.0'
|
||||
- '2.11.0'
|
||||
- '2.7.0'
|
||||
- '2.10.0'
|
||||
- '2.9.0'
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Sort list by version number] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_versions | community.general.version_sort": [
|
||||
"2.7.0",
|
||||
"2.8.0",
|
||||
"2.9.0",
|
||||
"2.10.0",
|
||||
"2.11.0"
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.2.0
|
||||
|
||||
Creating identifiers
|
||||
--------------------
|
||||
|
||||
The following filters allow to create identifiers.
|
||||
|
||||
Hashids
|
||||
^^^^^^^
|
||||
|
||||
`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create hashid"
|
||||
debug:
|
||||
msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
|
||||
|
||||
- name: "Decode hashid"
|
||||
debug:
|
||||
msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "jm2Cytn"
|
||||
}
|
||||
|
||||
TASK [Decode hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
1234,
|
||||
5,
|
||||
6
|
||||
]
|
||||
}
|
||||
|
||||
The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
|
||||
|
||||
:salt: String to use as salt when hashing.
|
||||
:alphabet: String of 16 or more unique characters to produce a hash.
|
||||
:min_length: Minimum length of hash produced.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Random MACs
|
||||
^^^^^^^^^^^
|
||||
|
||||
You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create a random MAC starting with ff:"
|
||||
debug:
|
||||
msg: "{{ 'FF' | community.general.random_mac }}"
|
||||
|
||||
- name: "Create a random MAC starting with 00:11:22:"
|
||||
debug:
|
||||
msg: "{{ '00:11:22' | community.general.random_mac }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a random MAC starting with ff:] **********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "ff:69:d3:78:7f:b4"
|
||||
}
|
||||
|
||||
TASK [Create a random MAC starting with 00:11:22:] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "00:11:22:71:5d:3b"
|
||||
}
|
||||
|
||||
You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
"{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
|
||||
|
||||
Conversions
|
||||
-----------
|
||||
|
||||
Parsing CSV files
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Parse CSV from string"
|
||||
debug:
|
||||
msg: "{{ csv_string | community.general.from_csv }}"
|
||||
vars:
|
||||
csv_string: |
|
||||
foo,bar,baz
|
||||
1,2,3
|
||||
you,this,then
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Parse CSV from string] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"bar": "2",
|
||||
"baz": "3",
|
||||
"foo": "1"
|
||||
},
|
||||
{
|
||||
"bar": "this",
|
||||
"baz": "then",
|
||||
"foo": "you"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
The ``from_csv`` filter has several keyword arguments to control its behavior:
|
||||
|
||||
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
|
||||
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
|
||||
:delimiter: Sets the delimiter to use. Default depends on the dialect used.
|
||||
:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
|
||||
:strict: Set to ``true`` to error out on invalid CSV input.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Converting to JSON
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Run 'ls' to list files in /
|
||||
command: ls /
|
||||
register: result
|
||||
|
||||
- name: Parse the ls output
|
||||
debug:
|
||||
msg: "{{ result.stdout | community.general.jc('ls') }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Run 'ls' to list files in /] ********************************************************
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Parse the ls output] ****************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"filename": "bin"
|
||||
},
|
||||
{
|
||||
"filename": "boot"
|
||||
},
|
||||
{
|
||||
"filename": "dev"
|
||||
},
|
||||
{
|
||||
"filename": "etc"
|
||||
},
|
||||
{
|
||||
"filename": "home"
|
||||
},
|
||||
{
|
||||
"filename": "lib"
|
||||
},
|
||||
{
|
||||
"filename": "proc"
|
||||
},
|
||||
{
|
||||
"filename": "root"
|
||||
},
|
||||
{
|
||||
"filename": "run"
|
||||
},
|
||||
{
|
||||
"filename": "tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
.. _ansible_collections.community.general.docsite.json_query_filter:
|
||||
|
||||
Selecting JSON data: JSON queries
|
||||
---------------------------------
|
||||
|
||||
To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
|
||||
|
||||
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
|
||||
|
||||
Consider this data structure:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
{
|
||||
"domain_definition": {
|
||||
"domain": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "cluster2"
|
||||
}
|
||||
],
|
||||
"server": [
|
||||
{
|
||||
"name": "server11",
|
||||
"cluster": "cluster1",
|
||||
"port": "8080"
|
||||
},
|
||||
{
|
||||
"name": "server12",
|
||||
"cluster": "cluster1",
|
||||
"port": "8090"
|
||||
},
|
||||
{
|
||||
"name": "server21",
|
||||
"cluster": "cluster2",
|
||||
"port": "9080"
|
||||
},
|
||||
{
|
||||
"name": "server22",
|
||||
"cluster": "cluster2",
|
||||
"port": "9090"
|
||||
}
|
||||
],
|
||||
"library": [
|
||||
{
|
||||
"name": "lib1",
|
||||
"target": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "lib2",
|
||||
"target": "cluster2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
To extract all clusters from this structure, you can use the following query:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all cluster names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
|
||||
|
||||
To extract all server names:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
|
||||
|
||||
To extract ports from cluster1:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
|
||||
|
||||
.. note:: You can use a variable to make the query more readable.
|
||||
|
||||
To print out the ports from cluster1 in a comma separated string:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1 as a string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
|
||||
|
||||
.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
|
||||
|
||||
You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
|
||||
|
||||
.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
|
||||
|
||||
To get a hash map with all ports and names of a cluster:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server ports and names from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
|
||||
|
||||
To extract ports from all clusters with name starting with 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?starts_with(name,'server1')].port"
|
||||
|
||||
To extract ports from all clusters with name containing 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
|
||||
Working with Unicode
|
||||
---------------------
|
||||
|
||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
||||
|
||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Compare Unicode representations
|
||||
debug:
|
||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
||||
vars:
|
||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
||||
without_combining_character: Mayagüez
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Compare Unicode representations] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": true
|
||||
}
|
||||
|
||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
||||
|
||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
||||
|
||||
.. versionadded:: 3.7.0
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
Abstract transformations
|
||||
------------------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
filter_guide_abstract_informations_dictionaries
|
||||
filter_guide_abstract_informations_grouping
|
||||
filter_guide_abstract_informations_merging_lists_of_dictionaries
|
||||
filter_guide_abstract_informations_counting_elements_in_sequence
|
||||
@@ -1,77 +0,0 @@
|
||||
Counting elements in a sequence
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Count character occurrences in a string
|
||||
debug:
|
||||
msg: "{{ 'abccbaabca' | community.general.counter }}"
|
||||
|
||||
- name: Count items in a list
|
||||
debug:
|
||||
msg: "{{ ['car', 'car', 'bike', 'plane', 'bike'] | community.general.counter }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Count character occurrences in a string] ********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"a": 4,
|
||||
"b": 3,
|
||||
"c": 3
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Count items in a list] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"bike": 2,
|
||||
"car": 2,
|
||||
"plane": 1
|
||||
}
|
||||
}
|
||||
|
||||
This plugin is useful for selecting resources based on current allocation:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks
|
||||
debug:
|
||||
msg: >-
|
||||
{{
|
||||
( disks | dict2items | map(attribute='value.adapter') | list
|
||||
| community.general.counter | dict2items
|
||||
| rejectattr('value', '>=', 4) | sort(attribute='value') | first
|
||||
).key
|
||||
}}
|
||||
vars:
|
||||
disks:
|
||||
sda:
|
||||
adapter: scsi_1
|
||||
sdb:
|
||||
adapter: scsi_1
|
||||
sdc:
|
||||
adapter: scsi_1
|
||||
sdd:
|
||||
adapter: scsi_1
|
||||
sde:
|
||||
adapter: scsi_2
|
||||
sdf:
|
||||
adapter: scsi_3
|
||||
sdg:
|
||||
adapter: scsi_3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks]
|
||||
ok: [localhost] => {
|
||||
"msg": "scsi_2"
|
||||
}
|
||||
|
||||
.. versionadded:: 4.3.0
|
||||
@@ -1,119 +0,0 @@
|
||||
Dictionaries
|
||||
^^^^^^^^^^^^
|
||||
|
||||
You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a single-entry dictionary
|
||||
debug:
|
||||
msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
|
||||
vars:
|
||||
myvar: myvalue
|
||||
|
||||
- name: Create a list of dictionaries where the 'server' field is taken from a list
|
||||
debug:
|
||||
msg: >-
|
||||
{{ myservers | map('community.general.dict_kv', 'server')
|
||||
| map('combine', common_config) }}
|
||||
vars:
|
||||
common_config:
|
||||
type: host
|
||||
database: all
|
||||
myservers:
|
||||
- server1
|
||||
- server2
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a single-entry dictionary] **************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"thatsmyvar": "myvalue"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server1",
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server2",
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a dictionary with the dict function
|
||||
debug:
|
||||
msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
|
||||
|
||||
- name: Create a dictionary with the community.general.dict filter
|
||||
debug:
|
||||
msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
|
||||
|
||||
- name: Create a list of dictionaries with map and the community.general.dict filter
|
||||
debug:
|
||||
msg: >-
|
||||
{{ values | map('zip', ['k1', 'k2', 'k3'])
|
||||
| map('map', 'reverse')
|
||||
| map('community.general.dict') }}
|
||||
vars:
|
||||
values:
|
||||
- - foo
|
||||
- 23
|
||||
- a
|
||||
- - bar
|
||||
- 42
|
||||
- b
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a dictionary with the dict function] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a dictionary with the community.general.dict filter] ************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"k1": "foo",
|
||||
"k2": 23,
|
||||
"k3": "a"
|
||||
},
|
||||
{
|
||||
"k1": "bar",
|
||||
"k2": 42,
|
||||
"k3": "b"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
@@ -1,98 +0,0 @@
|
||||
Grouping
|
||||
^^^^^^^^
|
||||
|
||||
If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
|
||||
|
||||
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Output mount facts grouped by device name
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('device')
|
||||
|
||||
- name: Output mount facts grouped by mount point
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Output mount facts grouped by device name] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('device')": {
|
||||
"/dev/sda1": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
},
|
||||
"/dev/sda2": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "abcdef01-2345-6789-0abc-def012345678"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Output mount facts grouped by mount point] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
|
||||
"/": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
|
||||
},
|
||||
"/boot": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
@@ -1,433 +0,0 @@
|
||||
Merging lists of dictionaries
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter.
|
||||
|
||||
.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin <ansible_collections.community.general.yaml_callback>`.
|
||||
|
||||
In the example below the lists are merged by the attribute ``name``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
set_fact:
|
||||
list3: "{{ list1|
|
||||
community.general.lists_mergeby(list2, 'name') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /baz
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- extra: false
|
||||
name: bar
|
||||
- name: baz
|
||||
path: /baz
|
||||
- extra: true
|
||||
name: foo
|
||||
path: /foo
|
||||
- extra: true
|
||||
name: meh
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
It is possible to use a list of lists as an input of the filter:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /baz
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces the same result as in the previous example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- extra: false
|
||||
name: bar
|
||||
- name: baz
|
||||
path: /baz
|
||||
- extra: true
|
||||
name: foo
|
||||
path: /foo
|
||||
- extra: true
|
||||
name: meh
|
||||
|
||||
The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0.
|
||||
|
||||
**recursive**
|
||||
Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
|
||||
|
||||
**list_merge**
|
||||
Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists.
|
||||
|
||||
The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries <combine_filter>` to learn details about these options.
|
||||
|
||||
Example ``list_merge=replace`` (default):
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', replace lists (default)
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true) }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
|
||||
Example ``list_merge=keep``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', keep lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='keep') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
Example ``list_merge=append``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', append lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='append') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
|
||||
Example ``list_merge=prepend``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', prepend lists
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='prepend') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
Example ``list_merge=append_rp``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', append lists 'remove present'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='append_rp') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- default_value
|
||||
- patch_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
|
||||
Example ``list_merge=prepend_rp``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Merge recursive by 'name', prepend lists 'remove present'
|
||||
set_fact:
|
||||
list3: "{{ [list1, list2]|
|
||||
community.general.lists_mergeby('name',
|
||||
recursive=true,
|
||||
list_merge='prepend_rp') }}"
|
||||
vars:
|
||||
list1:
|
||||
- name: myname01
|
||||
param01:
|
||||
x: default_value
|
||||
y: default_value
|
||||
list:
|
||||
- default_value
|
||||
- name: myname02
|
||||
param01: [1, 1, 2, 3]
|
||||
|
||||
list2:
|
||||
- name: myname01
|
||||
param01:
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
list:
|
||||
- patch_value
|
||||
- name: myname02
|
||||
param01: [3, 4, 4, {key: value}]
|
||||
- debug:
|
||||
var: list3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
list3:
|
||||
- name: myname01
|
||||
param01:
|
||||
list:
|
||||
- patch_value
|
||||
- default_value
|
||||
x: default_value
|
||||
y: patch_value
|
||||
z: patch_value
|
||||
- name: myname02
|
||||
param01:
|
||||
- 3
|
||||
- 4
|
||||
- 4
|
||||
- key: value
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
@@ -1,108 +0,0 @@
|
||||
Conversions
|
||||
-----------
|
||||
|
||||
Parsing CSV files
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Parse CSV from string"
|
||||
debug:
|
||||
msg: "{{ csv_string | community.general.from_csv }}"
|
||||
vars:
|
||||
csv_string: |
|
||||
foo,bar,baz
|
||||
1,2,3
|
||||
you,this,then
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Parse CSV from string] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"bar": "2",
|
||||
"baz": "3",
|
||||
"foo": "1"
|
||||
},
|
||||
{
|
||||
"bar": "this",
|
||||
"baz": "then",
|
||||
"foo": "you"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
The ``from_csv`` filter has several keyword arguments to control its behavior:
|
||||
|
||||
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
|
||||
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
|
||||
:delimiter: Sets the delimiter to use. Default depends on the dialect used.
|
||||
:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
|
||||
:strict: Set to ``true`` to error out on invalid CSV input.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Converting to JSON
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Run 'ls' to list files in /
|
||||
command: ls /
|
||||
register: result
|
||||
|
||||
- name: Parse the ls output
|
||||
debug:
|
||||
msg: "{{ result.stdout | community.general.jc('ls') }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Run 'ls' to list files in /] ********************************************************
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Parse the ls output] ****************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"filename": "bin"
|
||||
},
|
||||
{
|
||||
"filename": "boot"
|
||||
},
|
||||
{
|
||||
"filename": "dev"
|
||||
},
|
||||
{
|
||||
"filename": "etc"
|
||||
},
|
||||
{
|
||||
"filename": "home"
|
||||
},
|
||||
{
|
||||
"filename": "lib"
|
||||
},
|
||||
{
|
||||
"filename": "proc"
|
||||
},
|
||||
{
|
||||
"filename": "root"
|
||||
},
|
||||
{
|
||||
"filename": "run"
|
||||
},
|
||||
{
|
||||
"filename": "tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
@@ -1,80 +0,0 @@
|
||||
Creating identifiers
|
||||
--------------------
|
||||
|
||||
The following filters allow to create identifiers.
|
||||
|
||||
Hashids
|
||||
^^^^^^^
|
||||
|
||||
`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create hashid"
|
||||
debug:
|
||||
msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
|
||||
|
||||
- name: "Decode hashid"
|
||||
debug:
|
||||
msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "jm2Cytn"
|
||||
}
|
||||
|
||||
TASK [Decode hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
1234,
|
||||
5,
|
||||
6
|
||||
]
|
||||
}
|
||||
|
||||
The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
|
||||
|
||||
:salt: String to use as salt when hashing.
|
||||
:alphabet: String of 16 or more unique characters to produce a hash.
|
||||
:min_length: Minimum length of hash produced.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Random MACs
|
||||
^^^^^^^^^^^
|
||||
|
||||
You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create a random MAC starting with ff:"
|
||||
debug:
|
||||
msg: "{{ 'FF' | community.general.random_mac }}"
|
||||
|
||||
- name: "Create a random MAC starting with 00:11:22:"
|
||||
debug:
|
||||
msg: "{{ '00:11:22' | community.general.random_mac }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a random MAC starting with ff:] **********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "ff:69:d3:78:7f:b4"
|
||||
}
|
||||
|
||||
TASK [Create a random MAC starting with 00:11:22:] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "00:11:22:71:5d:3b"
|
||||
}
|
||||
|
||||
You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
"{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
|
||||
@@ -1,14 +0,0 @@
|
||||
Paths
|
||||
-----
|
||||
|
||||
The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
# ansible-base 2.10 or newer:
|
||||
path: {{ ('/etc', path, 'subdir', file) | path_join }}
|
||||
|
||||
# Also works with Ansible 2.9:
|
||||
path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
@@ -1,144 +0,0 @@
|
||||
.. _ansible_collections.community.general.docsite.json_query_filter:
|
||||
|
||||
Selecting JSON data: JSON queries
|
||||
---------------------------------
|
||||
|
||||
To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
|
||||
|
||||
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
|
||||
|
||||
Consider this data structure:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
{
|
||||
"domain_definition": {
|
||||
"domain": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "cluster2"
|
||||
}
|
||||
],
|
||||
"server": [
|
||||
{
|
||||
"name": "server11",
|
||||
"cluster": "cluster1",
|
||||
"port": "8080"
|
||||
},
|
||||
{
|
||||
"name": "server12",
|
||||
"cluster": "cluster1",
|
||||
"port": "8090"
|
||||
},
|
||||
{
|
||||
"name": "server21",
|
||||
"cluster": "cluster2",
|
||||
"port": "9080"
|
||||
},
|
||||
{
|
||||
"name": "server22",
|
||||
"cluster": "cluster2",
|
||||
"port": "9090"
|
||||
}
|
||||
],
|
||||
"library": [
|
||||
{
|
||||
"name": "lib1",
|
||||
"target": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "lib2",
|
||||
"target": "cluster2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
To extract all clusters from this structure, you can use the following query:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all cluster names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
|
||||
|
||||
To extract all server names:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
|
||||
|
||||
To extract ports from cluster1:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
|
||||
|
||||
.. note:: You can use a variable to make the query more readable.
|
||||
|
||||
To print out the ports from cluster1 in a comma separated string:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1 as a string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
|
||||
|
||||
.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
|
||||
|
||||
You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
|
||||
|
||||
.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
|
||||
|
||||
To get a hash map with all ports and names of a cluster:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server ports and names from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
|
||||
|
||||
To extract ports from all clusters with name starting with 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?starts_with(name,'server1')].port"
|
||||
|
||||
To extract ports from all clusters with name containing 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
@@ -1,84 +0,0 @@
|
||||
Working with times
|
||||
------------------
|
||||
|
||||
The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
|
||||
|
||||
There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
|
||||
|
||||
.. list-table:: Units
|
||||
:widths: 25 25 25 25
|
||||
:header-rows: 1
|
||||
|
||||
* - Unit name
|
||||
- Unit value in seconds
|
||||
- Unit strings for filter
|
||||
- Shorthand filter
|
||||
* - Millisecond
|
||||
- 1/1000 second
|
||||
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
|
||||
- ``to_milliseconds``
|
||||
* - Second
|
||||
- 1 second
|
||||
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
|
||||
- ``to_seconds``
|
||||
* - Minute
|
||||
- 60 seconds
|
||||
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
|
||||
- ``to_minutes``
|
||||
* - Hour
|
||||
- 60*60 seconds
|
||||
- ``h``, ``hour``, ``hours``
|
||||
- ``to_hours``
|
||||
* - Day
|
||||
- 24*60*60 seconds
|
||||
- ``d``, ``day``, ``days``
|
||||
- ``to_days``
|
||||
* - Week
|
||||
- 7*24*60*60 seconds
|
||||
- ``w``, ``week``, ``weeks``
|
||||
- ``to_weeks``
|
||||
* - Month
|
||||
- 30*24*60*60 seconds
|
||||
- ``mo``, ``month``, ``months``
|
||||
- ``to_months``
|
||||
* - Year
|
||||
- 365*24*60*60 seconds
|
||||
- ``y``, ``year``, ``years``
|
||||
- ``to_years``
|
||||
|
||||
Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Convert string to seconds
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
|
||||
|
||||
- name: Convert string to hours
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
|
||||
|
||||
- name: Convert string to years (using 365.25 days == 1 year)
|
||||
debug:
|
||||
msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Convert string to seconds] **********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "109210.123"
|
||||
}
|
||||
|
||||
TASK [Convert string to hours] ************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "30.336145277778"
|
||||
}
|
||||
|
||||
TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
|
||||
ok: [localhost] => {
|
||||
"msg": "1.096851471595"
|
||||
}
|
||||
|
||||
.. versionadded: 0.2.0
|
||||
@@ -1,30 +0,0 @@
|
||||
Working with Unicode
|
||||
---------------------
|
||||
|
||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
||||
|
||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Compare Unicode representations
|
||||
debug:
|
||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
||||
vars:
|
||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
||||
without_combining_character: Mayagüez
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Compare Unicode representations] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": true
|
||||
}
|
||||
|
||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
||||
|
||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
||||
|
||||
.. versionadded:: 3.7.0
|
||||
@@ -1,34 +0,0 @@
|
||||
Working with versions
|
||||
---------------------
|
||||
|
||||
If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Sort list by version number
|
||||
debug:
|
||||
var: ansible_versions | community.general.version_sort
|
||||
vars:
|
||||
ansible_versions:
|
||||
- '2.8.0'
|
||||
- '2.11.0'
|
||||
- '2.7.0'
|
||||
- '2.10.0'
|
||||
- '2.9.0'
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Sort list by version number] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_versions | community.general.version_sort": [
|
||||
"2.7.0",
|
||||
"2.8.0",
|
||||
"2.9.0",
|
||||
"2.10.0",
|
||||
"2.11.0"
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.2.0
|
||||
@@ -1,28 +0,0 @@
|
||||
.. _ansible_collections.community.general.docsite.test_guide:
|
||||
|
||||
community.general Test (Plugin) Guide
|
||||
=====================================
|
||||
|
||||
The :ref:`community.general collection <plugins_in_community.general>` offers currently one test plugin.
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
Feature Tests
|
||||
-------------
|
||||
|
||||
The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Make sure that community.aws.route53 is available
|
||||
assert:
|
||||
that:
|
||||
- >
|
||||
'community.aws.route53' is community.general.a_module
|
||||
|
||||
- name: Make sure that community.general.does_not_exist is not a module or action plugin
|
||||
assert:
|
||||
that:
|
||||
- "'community.general.does_not_exist' is not community.general.a_module"
|
||||
|
||||
.. versionadded:: 4.0.0
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 4.4.0
|
||||
version: 3.8.4
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
105
meta/runtime.yml
105
meta/runtime.yml
@@ -12,11 +12,20 @@ plugin_routing:
|
||||
hashi_vault:
|
||||
redirect: community.hashi_vault.hashi_vault
|
||||
nios:
|
||||
redirect: infoblox.nios_modules.nios_lookup
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios lookup plugin has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_lookup instead.
|
||||
nios_next_ip:
|
||||
redirect: infoblox.nios_modules.nios_next_ip
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_next_ip lookup plugin has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_next_ip instead.
|
||||
nios_next_network:
|
||||
redirect: infoblox.nios_modules.nios_next_network
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_next_network lookup plugin has been
|
||||
deprecated. Please use infoblox.nios_modules.nios_next_network instead.
|
||||
modules:
|
||||
ali_instance_facts:
|
||||
tombstone:
|
||||
@@ -257,37 +266,85 @@ plugin_routing:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.nginx_status_info instead.
|
||||
nios_a_record:
|
||||
redirect: infoblox.nios_modules.nios_a_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_a_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_a_record instead.
|
||||
nios_aaaa_record:
|
||||
redirect: infoblox.nios_modules.nios_aaaa_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_aaaa_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_aaaa_record instead.
|
||||
nios_cname_record:
|
||||
redirect: infoblox.nios_modules.nios_cname_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_cname_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_cname_record instead.
|
||||
nios_dns_view:
|
||||
redirect: infoblox.nios_modules.nios_dns_view
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_dns_view module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_dns_view instead.
|
||||
nios_fixed_address:
|
||||
redirect: infoblox.nios_modules.nios_fixed_address
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_fixed_address module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_fixed_address instead.
|
||||
nios_host_record:
|
||||
redirect: infoblox.nios_modules.nios_host_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_host_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_host_record instead.
|
||||
nios_member:
|
||||
redirect: infoblox.nios_modules.nios_member
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_member module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_member instead.
|
||||
nios_mx_record:
|
||||
redirect: infoblox.nios_modules.nios_mx_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_mx_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_mx_record instead.
|
||||
nios_naptr_record:
|
||||
redirect: infoblox.nios_modules.nios_naptr_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_naptr_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_naptr_record instead.
|
||||
nios_network:
|
||||
redirect: infoblox.nios_modules.nios_network
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_network module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_network instead.
|
||||
nios_network_view:
|
||||
redirect: infoblox.nios_modules.nios_network_view
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_network_view module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_network_view instead.
|
||||
nios_nsgroup:
|
||||
redirect: infoblox.nios_modules.nios_nsgroup
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_nsgroup module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_nsgroup instead.
|
||||
nios_ptr_record:
|
||||
redirect: infoblox.nios_modules.nios_ptr_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_ptr_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_ptr_record instead.
|
||||
nios_srv_record:
|
||||
redirect: infoblox.nios_modules.nios_srv_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_srv_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_srv_record instead.
|
||||
nios_txt_record:
|
||||
redirect: infoblox.nios_modules.nios_txt_record
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_txt_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_txt_record instead.
|
||||
nios_zone:
|
||||
redirect: infoblox.nios_modules.nios_zone
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_zone module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_zone instead.
|
||||
ome_device_info:
|
||||
redirect: dellemc.openmanage.ome_device_info
|
||||
one_image_facts:
|
||||
@@ -571,7 +628,10 @@ plugin_routing:
|
||||
kubevirt_vm_options:
|
||||
redirect: community.kubevirt.kubevirt_vm_options
|
||||
nios:
|
||||
redirect: infoblox.nios_modules.nios
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios document fragment has been deprecated.
|
||||
Please use infoblox.nios_modules.nios instead.
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
module_utils:
|
||||
@@ -590,7 +650,10 @@ plugin_routing:
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
net_tools.nios.api:
|
||||
redirect: infoblox.nios_modules.api
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.net_tools.nios.api module_utils has been
|
||||
deprecated. Please use infoblox.nios_modules.api instead.
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
remote_management.dellemc.dellemc_idrac:
|
||||
|
||||
@@ -226,15 +226,18 @@ class ElasticSource(object):
|
||||
|
||||
message = "success"
|
||||
status = "success"
|
||||
enriched_error_message = None
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message(res)
|
||||
enriched_error_message = self.enrich_error_message(res)
|
||||
if res.get('exception') is not None:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
status = "failure"
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
@@ -256,7 +259,7 @@ class ElasticSource(object):
|
||||
"ansible.task.host.status": host_data.status}) as span:
|
||||
span.outcome = status
|
||||
if 'failure' in status:
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||
|
||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
@@ -269,24 +272,6 @@ class ElasticSource(object):
|
||||
use_elastic_traceparent_header=True,
|
||||
debug=True)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return ElasticSource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
return lines[-1]
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message(result):
|
||||
message = result.get('msg', 'failed')
|
||||
exception = result.get('exception')
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
|
||||
@@ -11,16 +11,14 @@ name: mail
|
||||
type: notification
|
||||
short_description: Sends failure events via email
|
||||
description:
|
||||
- This callback will report failures via email.
|
||||
- This callback will report failures via email
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
options:
|
||||
mta:
|
||||
description:
|
||||
- Mail Transfer Agent, server that accepts SMTP.
|
||||
type: str
|
||||
description: Mail Transfer Agent, server that accepts SMTP
|
||||
env:
|
||||
- name: SMTPHOST
|
||||
ini:
|
||||
@@ -28,53 +26,39 @@ options:
|
||||
key: smtphost
|
||||
default: localhost
|
||||
mtaport:
|
||||
description:
|
||||
- Mail Transfer Agent Port.
|
||||
- Port at which server SMTP.
|
||||
type: int
|
||||
description: Mail Transfer Agent Port, port at which server SMTP
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: smtpport
|
||||
default: 25
|
||||
to:
|
||||
description:
|
||||
- Mail recipient.
|
||||
type: list
|
||||
elements: str
|
||||
description: Mail recipient
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: to
|
||||
default: [root]
|
||||
default: root
|
||||
sender:
|
||||
description:
|
||||
- Mail sender.
|
||||
- Note that this will be required from community.general 6.0.0 on.
|
||||
type: str
|
||||
description: Mail sender
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: sender
|
||||
cc:
|
||||
description:
|
||||
- CC'd recipients.
|
||||
type: list
|
||||
elements: str
|
||||
description: CC'd recipient
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: cc
|
||||
bcc:
|
||||
description:
|
||||
- BCC'd recipients.
|
||||
type: list
|
||||
elements: str
|
||||
description: BCC'd recipient
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: bcc
|
||||
notes:
|
||||
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
|
||||
'''
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import email.utils
|
||||
import smtplib
|
||||
|
||||
from ansible.module_utils.six import string_types
|
||||
@@ -104,13 +88,9 @@ class CallbackModule(CallbackBase):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
self.sender = self.get_option('sender')
|
||||
if self.sender is None:
|
||||
self._display.deprecated(
|
||||
'The sender for the mail callback has not been specified. This will be an error in the future',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
self.to = self.get_option('to')
|
||||
self.smtphost = self.get_option('mta')
|
||||
self.smtpport = self.get_option('mtaport')
|
||||
self.smtpport = int(self.get_option('mtaport'))
|
||||
self.cc = self.get_option('cc')
|
||||
self.bcc = self.get_option('bcc')
|
||||
|
||||
@@ -120,27 +100,28 @@ class CallbackModule(CallbackBase):
|
||||
|
||||
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
|
||||
|
||||
content = 'Date: %s\n' % email.utils.formatdate()
|
||||
content += 'From: %s\n' % self.sender
|
||||
if self.to:
|
||||
content += 'To: %s\n' % ','.join(self.to)
|
||||
if self.cc:
|
||||
content += 'Cc: %s\n' % ','.join(self.cc)
|
||||
content += 'Message-ID: %s\n' % email.utils.make_msgid()
|
||||
content += 'Subject: %s\n\n' % subject.strip()
|
||||
content += body
|
||||
b_sender = to_bytes(self.sender)
|
||||
b_to = to_bytes(self.to)
|
||||
b_cc = to_bytes(self.cc)
|
||||
b_bcc = to_bytes(self.bcc)
|
||||
b_subject = to_bytes(subject)
|
||||
b_body = to_bytes(body)
|
||||
|
||||
addresses = self.to
|
||||
b_content = b'From: %s\n' % b_sender
|
||||
b_content += b'To: %s\n' % b_to
|
||||
if self.cc:
|
||||
addresses += self.cc
|
||||
b_content += b'Cc: %s\n' % b_cc
|
||||
b_content += b'Subject: %s\n\n' % b_subject
|
||||
b_content += b_body
|
||||
|
||||
b_addresses = b_to.split(b',')
|
||||
if self.cc:
|
||||
b_addresses += b_cc.split(b',')
|
||||
if self.bcc:
|
||||
addresses += self.bcc
|
||||
b_addresses += b_bcc.split(b',')
|
||||
|
||||
if not addresses:
|
||||
self._display.warning('No receiver has been specified for the mail callback plugin.')
|
||||
|
||||
for address in addresses:
|
||||
smtp.sendmail(self.sender, address, to_bytes(content))
|
||||
for b_address in b_addresses:
|
||||
smtp.sendmail(b_sender, b_address, b_content)
|
||||
|
||||
smtp.quit()
|
||||
|
||||
|
||||
@@ -80,7 +80,6 @@ from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
@@ -92,6 +91,8 @@ try:
|
||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
@@ -179,7 +180,7 @@ class OpenTelemetrySource(object):
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = task.args
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
@@ -246,45 +247,34 @@ class OpenTelemetrySource(object):
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = 'success'
|
||||
res = {}
|
||||
rc = 0
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
if host_data.status != 'included':
|
||||
# Support loops
|
||||
if 'results' in host_data.result._result:
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action)
|
||||
enriched_error_message = self.enrich_error_message_from_results(host_data.result._result['results'], task_data.action)
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
message = self.get_error_message(res)
|
||||
enriched_error_message = self.enrich_error_message(res)
|
||||
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message(res)
|
||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||
# Record an exception with the task message
|
||||
span.record_exception(BaseException(enriched_error_message))
|
||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
||||
elif host_data.status == 'skipped':
|
||||
message = res['skip_reason'] if 'skip_reason' in res else 'skipped'
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
elif host_data.status == 'ignored':
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
|
||||
span.set_status(status)
|
||||
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
|
||||
names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
|
||||
values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
|
||||
self.set_span_attribute(span, ("ansible.task.args.name"), names)
|
||||
self.set_span_attribute(span, ("ansible.task.args.value"), values)
|
||||
self.set_span_attribute(span, "ansible.task.args", task_data.args)
|
||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
||||
self.set_span_attribute(span, "ansible.task.message", message)
|
||||
self.set_span_attribute(span, "ansible.task.name", name)
|
||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||
# This will allow to enrich the service map
|
||||
self.add_attributes_for_service_map_if_possible(span, task_data)
|
||||
span.end(end_time=host_data.finish)
|
||||
|
||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||
@@ -296,64 +286,12 @@ class OpenTelemetrySource(object):
|
||||
if attributeValue is not None:
|
||||
span.set_attribute(attributeName, attributeValue)
|
||||
|
||||
def add_attributes_for_service_map_if_possible(self, span, task_data):
|
||||
"""Update the span attributes with the service that the task interacted with, if possible."""
|
||||
|
||||
redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
|
||||
if redacted_url:
|
||||
self.set_span_attribute(span, "http.url", redacted_url.geturl())
|
||||
|
||||
@staticmethod
|
||||
def parse_and_redact_url_if_possible(args):
|
||||
"""Parse and redact the url, if possible."""
|
||||
|
||||
try:
|
||||
parsed_url = urlparse(OpenTelemetrySource.url_from_args(args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if OpenTelemetrySource.is_valid_url(parsed_url):
|
||||
return OpenTelemetrySource.redact_user_password(parsed_url)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def url_from_args(args):
|
||||
# the order matters
|
||||
url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url", "registry_url")
|
||||
for arg in url_args:
|
||||
if args is not None and args.get(arg):
|
||||
return args.get(arg)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def redact_user_password(url):
|
||||
return url._replace(netloc=url.hostname) if url.password else url
|
||||
|
||||
@staticmethod
|
||||
def is_valid_url(url):
|
||||
if all([url.scheme, url.netloc, url.hostname]):
|
||||
return "{{" not in url.hostname
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def transform_ansible_unicode_to_str(value):
|
||||
parsed_url = urlparse(str(value))
|
||||
if OpenTelemetrySource.is_valid_url(parsed_url):
|
||||
return OpenTelemetrySource.redact_user_password(parsed_url).geturl()
|
||||
return str(value)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return OpenTelemetrySource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def get_error_message_from_results(results, action):
|
||||
for result in results:
|
||||
if result.get('failed', False):
|
||||
return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
@@ -366,14 +304,6 @@ class OpenTelemetrySource(object):
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message_from_results(results, action):
|
||||
message = ""
|
||||
for result in results:
|
||||
if result.get('failed', False):
|
||||
message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
|
||||
return message
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
|
||||
138
plugins/doc_fragments/_netapp.py
Normal file
138
plugins/doc_fragments/_netapp.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
- See respective platform section for more details
|
||||
requirements:
|
||||
- See respective platform section for more details
|
||||
notes:
|
||||
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
|
||||
'''
|
||||
|
||||
# Documentation fragment for ONTAP (na_cdot)
|
||||
ONTAP = r'''
|
||||
options:
|
||||
hostname:
|
||||
required: true
|
||||
description:
|
||||
- The hostname or IP address of the ONTAP instance.
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
||||
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
||||
aliases: ['user']
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for the specified user.
|
||||
aliases: ['pass']
|
||||
requirements:
|
||||
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
|
||||
- Ansible 2.2
|
||||
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
|
||||
|
||||
notes:
|
||||
- The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
|
||||
|
||||
'''
|
||||
|
||||
# Documentation fragment for SolidFire
|
||||
SOLIDFIRE = r'''
|
||||
options:
|
||||
hostname:
|
||||
required: true
|
||||
description:
|
||||
- The hostname or IP address of the SolidFire cluster.
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation
|
||||
U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
|
||||
aliases: ['user']
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for the specified user.
|
||||
aliases: ['pass']
|
||||
|
||||
requirements:
|
||||
- The modules were developed with SolidFire 10.1
|
||||
- solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
|
||||
|
||||
notes:
|
||||
- The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
|
||||
|
||||
'''
|
||||
|
||||
# Documentation fragment for ONTAP (na_ontap)
|
||||
NA_ONTAP = r'''
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- The hostname or IP address of the ONTAP instance.
|
||||
type: str
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
||||
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ user ]
|
||||
password:
|
||||
description:
|
||||
- Password for the specified user.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ pass ]
|
||||
https:
|
||||
description:
|
||||
- Enable and disable https
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description:
|
||||
- If set to C(no), the SSL certificates will not be validated.
|
||||
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: yes
|
||||
http_port:
|
||||
description:
|
||||
- Override the default port (80 or 443) with this port
|
||||
type: int
|
||||
ontapi:
|
||||
description:
|
||||
- The ontap api version to use
|
||||
type: int
|
||||
use_rest:
|
||||
description:
|
||||
- REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
|
||||
- Always -- will always use the REST API
|
||||
- Never -- will always use the ZAPI
|
||||
- Auto -- will try to use the REST Api
|
||||
default: Auto
|
||||
choices: ['Never', 'Always', 'Auto']
|
||||
type: str
|
||||
|
||||
|
||||
requirements:
|
||||
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
|
||||
- Ansible 2.6
|
||||
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
|
||||
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
|
||||
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
|
||||
|
||||
notes:
|
||||
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
|
||||
|
||||
'''
|
||||
@@ -1,41 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
client_id:
|
||||
description:
|
||||
- The OAuth consumer key.
|
||||
- If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
|
||||
type: str
|
||||
client_secret:
|
||||
description:
|
||||
- The OAuth consumer secret.
|
||||
- If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
|
||||
type: str
|
||||
user:
|
||||
description:
|
||||
- The username.
|
||||
- If not set the environment variable C(BITBUCKET_USERNAME) will be used.
|
||||
type: str
|
||||
version_added: 4.0.0
|
||||
password:
|
||||
description:
|
||||
- The App password.
|
||||
- If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
|
||||
type: str
|
||||
version_added: 4.0.0
|
||||
notes:
|
||||
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
|
||||
- Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
|
||||
- If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
|
||||
'''
|
||||
@@ -1,31 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
requirements:
|
||||
- requests (Python library U(https://pypi.org/project/requests/))
|
||||
|
||||
options:
|
||||
api_token:
|
||||
description:
|
||||
- GitLab access token with API permissions.
|
||||
type: str
|
||||
api_oauth_token:
|
||||
description:
|
||||
- GitLab OAuth token for logging in.
|
||||
type: str
|
||||
version_added: 4.2.0
|
||||
api_job_token:
|
||||
description:
|
||||
- GitLab CI job token for logging in.
|
||||
type: str
|
||||
version_added: 4.2.0
|
||||
'''
|
||||
103
plugins/doc_fragments/nios.py
Normal file
103
plugins/doc_fragments/nios.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
provider:
|
||||
description:
|
||||
- A dict object containing connection details.
|
||||
type: dict
|
||||
suboptions:
|
||||
host:
|
||||
description:
|
||||
- Specifies the DNS host name or address for connecting to the remote
|
||||
instance of NIOS WAPI over REST
|
||||
- Value can also be specified using C(INFOBLOX_HOST) environment
|
||||
variable.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Configures the username to use to authenticate the connection to
|
||||
the remote instance of NIOS.
|
||||
- Value can also be specified using C(INFOBLOX_USERNAME) environment
|
||||
variable.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Specifies the password to use to authenticate the connection to
|
||||
the remote instance of NIOS.
|
||||
- Value can also be specified using C(INFOBLOX_PASSWORD) environment
|
||||
variable.
|
||||
type: str
|
||||
validate_certs:
|
||||
description:
|
||||
- Boolean value to enable or disable verifying SSL certificates
|
||||
- Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
|
||||
variable.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ ssl_verify ]
|
||||
http_request_timeout:
|
||||
description:
|
||||
- The amount of time before to wait before receiving a response
|
||||
- Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
|
||||
variable.
|
||||
type: int
|
||||
default: 10
|
||||
max_retries:
|
||||
description:
|
||||
- Configures the number of attempted retries before the connection
|
||||
is declared usable
|
||||
- Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
|
||||
variable.
|
||||
type: int
|
||||
default: 3
|
||||
wapi_version:
|
||||
description:
|
||||
- Specifies the version of WAPI to use
|
||||
- Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
|
||||
variable.
|
||||
- Until ansible 2.8 the default WAPI was 1.4
|
||||
type: str
|
||||
default: '2.1'
|
||||
max_results:
|
||||
description:
|
||||
- Specifies the maximum number of objects to be returned,
|
||||
if set to a negative number the appliance will return an error when the
|
||||
number of returned objects would exceed the setting.
|
||||
- Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
|
||||
variable.
|
||||
type: int
|
||||
default: 1000
|
||||
http_pool_connections:
|
||||
description:
|
||||
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: int
|
||||
default: 10
|
||||
http_pool_maxsize:
|
||||
description:
|
||||
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: int
|
||||
default: 10
|
||||
silent_ssl_warnings:
|
||||
description:
|
||||
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: bool
|
||||
default: true
|
||||
notes:
|
||||
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
||||
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
||||
|
||||
'''
|
||||
@@ -1,36 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Remy Keil <remy.keil@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils.common._collections_compat import Sequence
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def counter(sequence):
|
||||
''' Count elements in a sequence. Returns dict with count result. '''
|
||||
if not isinstance(sequence, Sequence):
|
||||
raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
|
||||
(sequence, type(sequence)))
|
||||
|
||||
try:
|
||||
result = dict(Counter(sequence))
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterError(
|
||||
"community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Ansible counter jinja2 filters '''
|
||||
|
||||
def filters(self):
|
||||
filters = {
|
||||
'counter': counter,
|
||||
}
|
||||
|
||||
return filters
|
||||
@@ -1,111 +1,41 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020-2022, Vladimir Botka <vbotka@gmail.com>
|
||||
# Copyright (c) 2020, Vladimir Botka <vbotka@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.errors import AnsibleError, AnsibleFilterError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.release import __version__ as ansible_version
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
|
||||
|
||||
def merge_hash_wrapper(x, y, recursive=False, list_merge='replace'):
|
||||
''' Wrapper of the function merge_hash from ansible.utils.vars. Only 2 paramaters are allowed
|
||||
for Ansible 2.9 and lower.'''
|
||||
def lists_mergeby(l1, l2, index):
|
||||
''' merge lists by attribute index. Example:
|
||||
- debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" '''
|
||||
|
||||
if LooseVersion(ansible_version) < LooseVersion('2.10'):
|
||||
if list_merge != 'replace' or recursive:
|
||||
msg = ("Non default options of list_merge(default=replace) or recursive(default=False) "
|
||||
"are not allowed in Ansible version 2.9 or lower. Ansible version is %s, "
|
||||
"recursive=%s, and list_merge=%s.")
|
||||
raise AnsibleFilterError(msg % (ansible_version, recursive, list_merge))
|
||||
else:
|
||||
return merge_hash(x, y)
|
||||
else:
|
||||
return merge_hash(x, y, recursive, list_merge)
|
||||
if not isinstance(l1, Sequence):
|
||||
raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||
(l1, type(l1)))
|
||||
|
||||
|
||||
def list_mergeby(x, y, index, recursive=False, list_merge='replace'):
|
||||
''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used.
|
||||
This function is used by the function lists_mergeby.
|
||||
'''
|
||||
|
||||
d = defaultdict(dict)
|
||||
for l in (x, y):
|
||||
for elem in l:
|
||||
if not isinstance(elem, Mapping):
|
||||
msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s"
|
||||
raise AnsibleFilterError(msg % (elem, type(elem)))
|
||||
if index in elem.keys():
|
||||
d[elem[index]].update(merge_hash_wrapper(d[elem[index]], elem, recursive, list_merge))
|
||||
return sorted(d.values(), key=itemgetter(index))
|
||||
|
||||
|
||||
def lists_mergeby(*terms, **kwargs):
|
||||
''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge'
|
||||
control the merging of the lists in values. The function merge_hash from ansible.utils.vars
|
||||
is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
|
||||
Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
|
||||
hashes/dictionaries".
|
||||
|
||||
Example:
|
||||
- debug:
|
||||
msg: "{{ list1|
|
||||
community.general.lists_mergeby(list2,
|
||||
'index',
|
||||
recursive=True,
|
||||
list_merge='append')|
|
||||
list }}"
|
||||
'''
|
||||
|
||||
recursive = kwargs.pop('recursive', False)
|
||||
list_merge = kwargs.pop('list_merge', 'replace')
|
||||
if kwargs:
|
||||
raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.")
|
||||
if len(terms) < 2:
|
||||
raise AnsibleFilterError("At least one list and index are needed.")
|
||||
|
||||
# allow the user to do `[list1, list2, ...] | lists_mergeby('index')`
|
||||
flat_list = []
|
||||
for sublist in terms[:-1]:
|
||||
if not isinstance(sublist, Sequence):
|
||||
msg = ("All arguments before the argument index for community.general.lists_mergeby "
|
||||
"must be lists. %s is %s")
|
||||
raise AnsibleFilterError(msg % (sublist, type(sublist)))
|
||||
if len(sublist) > 0:
|
||||
if all(isinstance(l, Sequence) for l in sublist):
|
||||
for item in sublist:
|
||||
flat_list.append(item)
|
||||
else:
|
||||
flat_list.append(sublist)
|
||||
lists = flat_list
|
||||
|
||||
if not lists:
|
||||
return []
|
||||
|
||||
if len(lists) == 1:
|
||||
return lists[0]
|
||||
|
||||
index = terms[-1]
|
||||
if not isinstance(l2, Sequence):
|
||||
raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||
(l2, type(l2)))
|
||||
|
||||
if not isinstance(index, string_types):
|
||||
msg = ("First argument after the lists for community.general.lists_mergeby must be string. "
|
||||
"%s is %s")
|
||||
raise AnsibleFilterError(msg % (index, type(index)))
|
||||
raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' %
|
||||
(index, type(index)))
|
||||
|
||||
high_to_low_prio_list_iterator = reversed(lists)
|
||||
result = next(high_to_low_prio_list_iterator)
|
||||
for list in high_to_low_prio_list_iterator:
|
||||
result = list_mergeby(list, result, index, recursive, list_merge)
|
||||
|
||||
return result
|
||||
d = defaultdict(dict)
|
||||
for l in (l1, l2):
|
||||
for elem in l:
|
||||
if not isinstance(elem, Mapping):
|
||||
raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem))
|
||||
if index in elem.keys():
|
||||
d[elem[index]].update(elem)
|
||||
return sorted(d.values(), key=itemgetter(index))
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
@@ -40,21 +40,10 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: no
|
||||
exclude_profiles:
|
||||
description:
|
||||
- Profiles to exclude from inventory.
|
||||
- Ignored if I(include_profiles) is specified.
|
||||
description: Profiles to exclude from inventory
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
include_profiles:
|
||||
description:
|
||||
- Profiles to include from inventory.
|
||||
- If specified, all other profiles will be excluded.
|
||||
- I(exclude_profiles) is ignored if I(include_profiles) is specified.
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
version_added: 4.4.0
|
||||
group_by:
|
||||
description: Keys to group hosts by
|
||||
type: list
|
||||
@@ -82,7 +71,8 @@ password: secure
|
||||
import socket
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
|
||||
|
||||
@@ -104,9 +94,18 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
NAME = 'community.general.cobbler'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
self.cache_key = None
|
||||
|
||||
# from config
|
||||
self.cobbler_url = None
|
||||
self.exclude_profiles = [] # A list of profiles to exclude
|
||||
|
||||
self.connection = None
|
||||
self.token = None
|
||||
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
@@ -178,12 +177,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
self.inventory.add_child(group_name, child)
|
||||
return group_name
|
||||
|
||||
def _exclude_profile(self, profile):
|
||||
if self.include_profiles:
|
||||
return profile not in self.include_profiles
|
||||
else:
|
||||
return profile in self.exclude_profiles
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
@@ -197,16 +190,15 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
self.exclude_profiles = self.get_option('exclude_profiles')
|
||||
self.include_profiles = self.get_option('include_profiles')
|
||||
self.group_by = self.get_option('group_by')
|
||||
|
||||
for profile in self._get_profiles():
|
||||
if profile['parent']:
|
||||
self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
|
||||
if not self._exclude_profile(profile['parent']):
|
||||
if profile['parent'] not in self.exclude_profiles:
|
||||
parent_group_name = self._add_safe_group_name(profile['parent'])
|
||||
self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
|
||||
if not self._exclude_profile(profile['name']):
|
||||
if profile['name'] not in self.exclude_profiles:
|
||||
group_name = self._add_safe_group_name(profile['name'])
|
||||
self.display.vvvv('Added profile group %s\n' % group_name)
|
||||
self.inventory.add_child(parent_group_name, group_name)
|
||||
@@ -218,7 +210,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
while i < len(profile_elements) - 1:
|
||||
profile_group = '-'.join(profile_elements[0:i + 1])
|
||||
profile_group_child = '-'.join(profile_elements[0:i + 2])
|
||||
if self._exclude_profile(profile_group):
|
||||
if profile_group in self.exclude_profiles:
|
||||
self.display.vvvv('Excluding profile %s\n' % profile_group)
|
||||
break
|
||||
group_name = self._add_safe_group_name(profile_group)
|
||||
@@ -239,7 +231,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
hostname = host['hostname'] # None
|
||||
interfaces = host['interfaces']
|
||||
|
||||
if self._exclude_profile(host['profile']):
|
||||
if host['profile'] in self.exclude_profiles:
|
||||
self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
|
||||
continue
|
||||
|
||||
|
||||
@@ -16,17 +16,7 @@ DOCUMENTATION = '''
|
||||
- Get inventory hosts from the Icinga2 API.
|
||||
- "Uses a configuration file as an inventory source, it must end in
|
||||
C(.icinga2.yml) or C(.icinga2.yaml)."
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
options:
|
||||
strict:
|
||||
version_added: 4.4.0
|
||||
compose:
|
||||
version_added: 4.4.0
|
||||
groups:
|
||||
version_added: 4.4.0
|
||||
keyed_groups:
|
||||
version_added: 4.4.0
|
||||
plugin:
|
||||
description: Name of the plugin.
|
||||
required: true
|
||||
@@ -45,23 +35,13 @@ DOCUMENTATION = '''
|
||||
type: string
|
||||
required: true
|
||||
host_filter:
|
||||
description:
|
||||
- An Icinga2 API valid host filter. Leave blank for no filtering
|
||||
description: An Icinga2 API valid host filter.
|
||||
type: string
|
||||
required: false
|
||||
validate_certs:
|
||||
description: Enables or disables SSL certificate verification.
|
||||
type: boolean
|
||||
default: true
|
||||
inventory_attr:
|
||||
description:
|
||||
- Allows the override of the inventory name based on different attributes.
|
||||
- This allows for changing the way limits are used.
|
||||
- The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
|
||||
type: string
|
||||
default: address
|
||||
choices: ['name', 'display_name', 'address']
|
||||
version_added: 4.2.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -72,21 +52,6 @@ user: ansible
|
||||
password: secure
|
||||
host_filter: \"linux-servers\" in host.groups
|
||||
validate_certs: false
|
||||
inventory_attr: name
|
||||
groups:
|
||||
# simple name matching
|
||||
webservers: inventory_hostname.startswith('web')
|
||||
|
||||
# using icinga2 template
|
||||
databaseservers: "'db-template' in (icinga2_attributes.templates|list)"
|
||||
|
||||
compose:
|
||||
# set all icinga2 attributes to a host variable 'icinga2_attrs'
|
||||
icinga2_attrs: icinga2_attributes
|
||||
|
||||
# set 'ansible_user' and 'ansible_port' from icinga2 host vars
|
||||
ansible_user: icinga2_attributes.vars.ansible_user
|
||||
ansible_port: icinga2_attributes.vars.ansible_port | default(22)
|
||||
'''
|
||||
|
||||
import json
|
||||
@@ -94,7 +59,6 @@ import json
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
@@ -112,7 +76,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
self.icinga2_password = None
|
||||
self.ssl_verify = None
|
||||
self.host_filter = None
|
||||
self.inventory_attr = None
|
||||
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
@@ -151,21 +114,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
if data is not None:
|
||||
request_args['data'] = json.dumps(data)
|
||||
self.display.vvv("Request Args: %s" % request_args)
|
||||
try:
|
||||
response = open_url(request_url, **request_args)
|
||||
except HTTPError as e:
|
||||
try:
|
||||
error_body = json.loads(e.read().decode())
|
||||
self.display.vvv("Error returned: {0}".format(error_body))
|
||||
except Exception:
|
||||
error_body = {"status": None}
|
||||
if e.code == 404 and error_body.get('status') == "No objects found.":
|
||||
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
|
||||
raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
|
||||
|
||||
response = open_url(request_url, **request_args)
|
||||
response_body = response.read()
|
||||
json_data = json.loads(response_body.decode('utf-8'))
|
||||
self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
|
||||
if 200 <= response.status <= 299:
|
||||
return json_data
|
||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
||||
@@ -204,7 +155,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Query for all hosts """
|
||||
self.display.vvv("Querying Icinga2 for inventory")
|
||||
query_args = {
|
||||
"attrs": ["address", "address6", "name", "display_name", "state_type", "state", "templates", "groups", "vars", "zone"],
|
||||
"attrs": ["address", "state_type", "state", "groups"],
|
||||
}
|
||||
if self.host_filter is not None:
|
||||
query_args['host_filter'] = self.host_filter
|
||||
@@ -214,12 +165,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
ansible_inv = self._convert_inv(results_json)
|
||||
return ansible_inv
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _populate(self):
|
||||
groups = self._to_json(self.get_inventory_from_icinga())
|
||||
return groups
|
||||
@@ -232,40 +177,25 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
||||
groups_dict = {"_meta": {"hostvars": {}}}
|
||||
for entry in json_data:
|
||||
host_name = entry['name']
|
||||
host_attrs = entry['attrs']
|
||||
if self.inventory_attr == "name":
|
||||
host_name = entry.get('name')
|
||||
if self.inventory_attr == "address":
|
||||
# When looking for address for inventory, if missing fallback to object name
|
||||
if host_attrs.get('address', '') != '':
|
||||
host_name = host_attrs.get('address')
|
||||
else:
|
||||
host_name = entry.get('name')
|
||||
if self.inventory_attr == "display_name":
|
||||
host_name = host_attrs.get('display_name')
|
||||
if host_attrs['state'] == 0:
|
||||
host_attrs['state'] = 'on'
|
||||
else:
|
||||
host_attrs['state'] = 'off'
|
||||
host_groups = host_attrs.get('groups')
|
||||
self.inventory.add_host(host_name)
|
||||
host_groups = host_attrs['groups']
|
||||
host_addr = host_attrs['address']
|
||||
self.inventory.add_host(host_addr)
|
||||
for group in host_groups:
|
||||
if group not in self.inventory.groups.keys():
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, host_name)
|
||||
# If the address attribute is populated, override ansible_host with the value
|
||||
if host_attrs.get('address') != '':
|
||||
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
|
||||
self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
|
||||
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
|
||||
self.inventory.set_variable(host_name, 'state',
|
||||
self.inventory.add_child(group, host_addr)
|
||||
self.inventory.set_variable(host_addr, 'address', host_addr)
|
||||
self.inventory.set_variable(host_addr, 'hostname', host_name)
|
||||
self.inventory.set_variable(host_addr, 'state',
|
||||
host_attrs['state'])
|
||||
self.inventory.set_variable(host_name, 'state_type',
|
||||
self.inventory.set_variable(host_addr, 'state_type',
|
||||
host_attrs['state_type'])
|
||||
# Adds all attributes to a variable 'icinga2_attributes'
|
||||
construct_vars = dict(self.inventory.get_host(host_name).get_vars())
|
||||
construct_vars['icinga2_attributes'] = host_attrs
|
||||
self._apply_constructable(host_name, construct_vars)
|
||||
return groups_dict
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
@@ -281,7 +211,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
self.icinga2_password = self.get_option('password')
|
||||
self.ssl_verify = self.get_option('validate_certs')
|
||||
self.host_filter = self.get_option('host_filter')
|
||||
self.inventory_attr = self.get_option('inventory_attr')
|
||||
# Not currently enabled
|
||||
# self.cache_key = self.get_cache_key(path)
|
||||
# self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
@@ -66,12 +66,6 @@ EXAMPLES = r'''
|
||||
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
|
||||
plugin: community.general.linode
|
||||
|
||||
# You can use Jinja to template the access token.
|
||||
plugin: community.general.linode
|
||||
access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
|
||||
# For older Ansible versions, you need to write this as:
|
||||
# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
|
||||
|
||||
# Example with regions, types, groups and access token
|
||||
plugin: community.general.linode
|
||||
access_token: foobar
|
||||
@@ -111,7 +105,6 @@ import os
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.template import Templar
|
||||
|
||||
|
||||
try:
|
||||
@@ -126,14 +119,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
NAME = 'community.general.linode'
|
||||
|
||||
def _build_client(self, loader):
|
||||
def _build_client(self):
|
||||
"""Build the Linode client."""
|
||||
|
||||
t = Templar(loader=loader)
|
||||
|
||||
access_token = self.get_option('access_token')
|
||||
if t.is_template(access_token):
|
||||
access_token = t.template(variable=access_token, disable_lookups=False)
|
||||
|
||||
if access_token is None:
|
||||
try:
|
||||
@@ -298,7 +287,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
|
||||
|
||||
config_data = self._read_config_data(path)
|
||||
self._build_client(loader)
|
||||
self._build_client()
|
||||
|
||||
self._get_instances_inventory()
|
||||
|
||||
|
||||
@@ -13,9 +13,6 @@ DOCUMENTATION = r'''
|
||||
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
||||
version_added: "3.0.0"
|
||||
author: "Frank Dornheim (@conloos)"
|
||||
requirements:
|
||||
- ipaddress
|
||||
- lxd >= 4.0
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||
@@ -50,38 +47,26 @@ DOCUMENTATION = r'''
|
||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||
type: str
|
||||
state:
|
||||
description: Filter the instance according to the current status.
|
||||
description: Filter the container according to the current status.
|
||||
type: str
|
||||
default: none
|
||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||
type_filter:
|
||||
prefered_container_network_interface:
|
||||
description:
|
||||
- Filter the instances by type C(virtual-machine), C(container) or C(both).
|
||||
- The first version of the inventory only supported containers.
|
||||
type: str
|
||||
default: container
|
||||
choices: [ 'virtual-machine', 'container', 'both' ]
|
||||
version_added: 4.2.0
|
||||
prefered_instance_network_interface:
|
||||
description:
|
||||
- If an instance has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||
- The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
|
||||
The old name still works as an alias.
|
||||
type: str
|
||||
default: eth
|
||||
aliases:
|
||||
- prefered_container_network_interface
|
||||
prefered_instance_network_family:
|
||||
prefered_container_network_family:
|
||||
description:
|
||||
- If an instance has multiple network interfaces, which one is the prefered by family.
|
||||
- If a container has multiple network interfaces, which one is the prefered by family.
|
||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||
type: str
|
||||
default: inet
|
||||
choices: [ 'inet', 'inet6' ]
|
||||
groupby:
|
||||
description:
|
||||
- Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
|
||||
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
||||
- See example for syntax.
|
||||
type: dict
|
||||
'''
|
||||
@@ -96,49 +81,38 @@ plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
state: RUNNING
|
||||
|
||||
# simple lxd.yml including virtual machines and containers
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
type_filter: both
|
||||
|
||||
# grouping lxd.yml
|
||||
groupby:
|
||||
locationBerlin:
|
||||
type: location
|
||||
attribute: Berlin
|
||||
netRangeIPv4:
|
||||
type: network_range
|
||||
attribute: 10.98.143.0/24
|
||||
netRangeIPv6:
|
||||
type: network_range
|
||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
testpattern:
|
||||
type: pattern
|
||||
attribute: test
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
locationBerlin:
|
||||
type: location
|
||||
attribute: Berlin
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
releaseFocal:
|
||||
type: release
|
||||
attribute: focal
|
||||
releaseBionic:
|
||||
type: release
|
||||
attribute: bionic
|
||||
typeVM:
|
||||
type: type
|
||||
attribute: virtual-machine
|
||||
typeContainer:
|
||||
type: type
|
||||
attribute: container
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
netRangeIPv4:
|
||||
type: network_range
|
||||
attribute: 10.98.143.0/24
|
||||
netRangeIPv6:
|
||||
type: network_range
|
||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||
'''
|
||||
|
||||
import binascii
|
||||
@@ -150,17 +124,10 @@ import socket
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
|
||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||
|
||||
try:
|
||||
import ipaddress
|
||||
except ImportError as exc:
|
||||
IPADDRESS_IMPORT_ERROR = exc
|
||||
else:
|
||||
IPADDRESS_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
DEBUG = 4
|
||||
@@ -307,10 +274,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||
|
||||
def _get_instances(self):
|
||||
"""Get instancenames
|
||||
def _get_containers(self):
|
||||
"""Get Containernames
|
||||
|
||||
Returns all instancenames
|
||||
Returns all containernames
|
||||
|
||||
Args:
|
||||
None
|
||||
@@ -319,27 +286,25 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all instances"""
|
||||
# e.g. {
|
||||
# "metadata": [
|
||||
# "/1.0/instances/foo",
|
||||
# "/1.0/instances/bar"
|
||||
# ],
|
||||
# "status": "Success",
|
||||
# "status_code": 200,
|
||||
# "type": "sync"
|
||||
# }
|
||||
instances = self.socket.do('GET', '/1.0/instances')
|
||||
return [m.split('/')[3] for m in instances['metadata']]
|
||||
list(names): names of all containers"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
||||
containers = self.socket.do('GET', '/1.0/containers')
|
||||
return [m.split('/')[3] for m in containers['metadata']]
|
||||
|
||||
def _get_config(self, branch, name):
|
||||
"""Get inventory of instance
|
||||
"""Get inventory of container
|
||||
|
||||
Get config of instance
|
||||
Get config of container
|
||||
|
||||
Args:
|
||||
str(branch): Name oft the API-Branch
|
||||
str(name): Name of instance
|
||||
str(name): Name of Container
|
||||
Kwargs:
|
||||
None
|
||||
Source:
|
||||
@@ -347,7 +312,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(config): Config of the instance"""
|
||||
dict(config): Config of the container"""
|
||||
config = {}
|
||||
if isinstance(branch, (tuple, list)):
|
||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||
@@ -355,13 +320,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||
return config
|
||||
|
||||
def get_instance_data(self, names):
|
||||
"""Create Inventory of the instance
|
||||
def get_container_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the instances and collect Informations.
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of instance names
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -370,20 +335,20 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = ['instances', ('instances', 'state')]
|
||||
instance_config = {}
|
||||
branches = ['containers', ('instances', 'state')]
|
||||
container_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
instance_config['instances'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(instance_config, self.data)
|
||||
container_config['containers'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(container_config, self.data)
|
||||
|
||||
def get_network_data(self, names):
|
||||
"""Create Inventory of the instance
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the instances and collect Informations.
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of instance names
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -402,26 +367,26 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_config['networks'] = {name: None}
|
||||
self.data = dict_merge(network_config, self.data)
|
||||
|
||||
def extract_network_information_from_instance_config(self, instance_name):
|
||||
def extract_network_information_from_container_config(self, container_name):
|
||||
"""Returns the network interface configuration
|
||||
|
||||
Returns the network ipv4 and ipv6 config of the instance without local-link
|
||||
Returns the network ipv4 and ipv6 config of the container without local-link
|
||||
|
||||
Args:
|
||||
str(instance_name): Name oft he instance
|
||||
str(container_name): Name oft he container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(network_configuration): network config"""
|
||||
instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
|
||||
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
||||
network_configuration = None
|
||||
if instance_network_interfaces:
|
||||
if container_network_interfaces:
|
||||
network_configuration = {}
|
||||
gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
|
||||
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
||||
for interface_name in gen_interface_names:
|
||||
gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
network_configuration[interface_name] = []
|
||||
for address in gen_address:
|
||||
address_set = {}
|
||||
@@ -432,24 +397,24 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_configuration[interface_name].append(address_set)
|
||||
return network_configuration
|
||||
|
||||
def get_prefered_instance_network_interface(self, instance_name):
|
||||
"""Helper to get the prefered interface of thr instance
|
||||
def get_prefered_container_network_interface(self, container_name):
|
||||
"""Helper to get the prefered interface of thr container
|
||||
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
||||
|
||||
Args:
|
||||
str(containe_name): name of instance
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
str(prefered_interface): None or interface name"""
|
||||
instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
prefered_interface = None # init
|
||||
if instance_network_interfaces: # instance have network interfaces
|
||||
if container_network_interfaces: # container have network interfaces
|
||||
# generator if interfaces which start with the desired pattern
|
||||
net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
|
||||
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
||||
selected_interfaces = [] # init
|
||||
for interface in net_generator:
|
||||
selected_interfaces.append(interface)
|
||||
@@ -457,13 +422,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
prefered_interface = sorted(selected_interfaces)[0]
|
||||
return prefered_interface
|
||||
|
||||
def get_instance_vlans(self, instance_name):
|
||||
"""Get VLAN(s) from instance
|
||||
def get_container_vlans(self, container_name):
|
||||
"""Get VLAN(s) from container
|
||||
|
||||
Helper to get the VLAN_ID from the instance
|
||||
Helper to get the VLAN_ID from the container
|
||||
|
||||
Args:
|
||||
str(containe_name): name of instance
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -476,13 +441,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||
|
||||
# get networkdevices of instance and return
|
||||
# get networkdevices of container and return
|
||||
# e.g.
|
||||
# "eth0":{ "name":"eth0",
|
||||
# "network":"lxdbr0",
|
||||
# "type":"nic"},
|
||||
vlan_ids = {}
|
||||
devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
|
||||
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
||||
for device in devices:
|
||||
if 'network' in devices[device]:
|
||||
if devices[device]['network'] in network_vlans:
|
||||
@@ -518,14 +483,14 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def _set_data_entry(self, instance_name, key, value, path=None):
|
||||
def _set_data_entry(self, container_name, key, value, path=None):
|
||||
"""Helper to save data
|
||||
|
||||
Helper to save the data in self.data
|
||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||
|
||||
Args:
|
||||
str(instance_name): name of instance
|
||||
str(container_name): name of container
|
||||
str(key): same as dict
|
||||
*(value): same as dict
|
||||
Kwargs:
|
||||
@@ -536,24 +501,24 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None"""
|
||||
if not path:
|
||||
path = self.data['inventory']
|
||||
if instance_name not in path:
|
||||
path[instance_name] = {}
|
||||
if container_name not in path:
|
||||
path[container_name] = {}
|
||||
|
||||
try:
|
||||
if isinstance(value, dict) and key in path[instance_name]:
|
||||
path[instance_name] = dict_merge(value, path[instance_name][key])
|
||||
if isinstance(value, dict) and key in path[container_name]:
|
||||
path[container_name] = dict_merge(value, path[container_name][key])
|
||||
else:
|
||||
path[instance_name][key] = value
|
||||
path[container_name][key] = value
|
||||
except KeyError as err:
|
||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||
|
||||
def extract_information_from_instance_configs(self):
|
||||
def extract_information_from_container_configs(self):
|
||||
"""Process configuration information
|
||||
|
||||
Preparation of the data
|
||||
|
||||
Args:
|
||||
dict(configs): instance configurations
|
||||
dict(configs): Container configurations
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -564,35 +529,33 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if 'inventory' not in self.data:
|
||||
self.data['inventory'] = {}
|
||||
|
||||
for instance_name in self.data['instances']:
|
||||
self._set_data_entry(instance_name, 'os', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'release', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'version', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/profiles'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'location', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/location'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'state', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'type', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/type'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
|
||||
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
|
||||
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
|
||||
for container_name in self.data['containers']:
|
||||
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/location'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
||||
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
||||
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
||||
|
||||
def build_inventory_network(self, instance_name):
|
||||
"""Add the network interfaces of the instance to the inventory
|
||||
def build_inventory_network(self, container_name):
|
||||
"""Add the network interfaces of the container to the inventory
|
||||
|
||||
Logic:
|
||||
- if the instance have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- if the container have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
|
||||
Args:
|
||||
str(instance_name): name of instance
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -600,45 +563,45 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def interface_selection(instance_name):
|
||||
"""Select instance Interface for inventory
|
||||
def interface_selection(container_name):
|
||||
"""Select container Interface for inventory
|
||||
|
||||
Logic:
|
||||
- get preferred_interface & prefered_instance_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
|
||||
- get preferred_interface & prefered_container_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
||||
|
||||
Args:
|
||||
str(instance_name): name of instance
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(interface_name: ip)"""
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
|
||||
prefered_instance_network_family = self.prefered_instance_network_family
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
||||
prefered_container_network_family = self.prefered_container_network_family
|
||||
|
||||
ip_address = ''
|
||||
if prefered_interface:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_instance_network_family:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
else:
|
||||
interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||
for interface in interfaces.values():
|
||||
for config in interface:
|
||||
if config['family'] == prefered_instance_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
return ip_address
|
||||
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
|
||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
||||
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
||||
else:
|
||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
||||
|
||||
def build_inventory_hosts(self):
|
||||
"""Build host-part dynamic inventory
|
||||
@@ -654,33 +617,29 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
for instance_name in self.data['inventory']:
|
||||
instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
|
||||
|
||||
# Only consider instances that match the "state" filter, if self.state is not None
|
||||
for container_name in self.data['inventory']:
|
||||
# Only consider containers that match the "state" filter, if self.state is not None
|
||||
if self.filter:
|
||||
if self.filter.lower() != instance_state:
|
||||
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
||||
continue
|
||||
# add instance
|
||||
self.inventory.add_host(instance_name)
|
||||
# add container
|
||||
self.inventory.add_host(container_name)
|
||||
# add network informations
|
||||
self.build_inventory_network(instance_name)
|
||||
self.build_inventory_network(container_name)
|
||||
# add os
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(instance_name)).lower())
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
||||
# add release
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(instance_name)).lower())
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
||||
# add profile
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
||||
# add state
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
|
||||
# add type
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
||||
# add location information
|
||||
if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
|
||||
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
||||
# add VLAN_ID information
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
||||
|
||||
def build_inventory_groups_location(self, group_name):
|
||||
"""create group by attribute: location
|
||||
@@ -697,9 +656,9 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
for instance_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
for container_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_pattern(self, group_name):
|
||||
"""create group by name pattern
|
||||
@@ -718,10 +677,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
regex_pattern = self.groupby[group_name].get('attribute')
|
||||
|
||||
for instance_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, instance_name)
|
||||
for container_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, container_name)
|
||||
if result:
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_network_range(self, group_name):
|
||||
"""check if IP is in network-class
|
||||
@@ -744,14 +703,14 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
raise AnsibleParserError(
|
||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||
|
||||
for instance_name in self.inventory.hosts:
|
||||
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][instance_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
|
||||
for container_name in self.inventory.hosts:
|
||||
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
||||
try:
|
||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||
if address.version == network.version and address in network:
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
except ValueError:
|
||||
# Ignore invalid IP addresses returned by lxd
|
||||
pass
|
||||
@@ -762,7 +721,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Noneself.data['inventory'][container_name][interface]
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
@@ -771,12 +730,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_release(self, group_name):
|
||||
"""create group by attribute: release
|
||||
@@ -793,12 +752,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_profile(self, group_name):
|
||||
"""create group by attribute: profile
|
||||
@@ -815,12 +774,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_vlanid(self, group_name):
|
||||
"""create group by attribute: vlanid
|
||||
@@ -837,34 +796,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_type(self, group_name):
|
||||
"""create group by attribute: type
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups(self):
|
||||
"""Build group-part dynamic inventory
|
||||
@@ -893,7 +830,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
* 'release'
|
||||
* 'profile'
|
||||
* 'vlanid'
|
||||
* 'type'
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
@@ -919,8 +855,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.build_inventory_groups_profile(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||
self.build_inventory_groups_vlanid(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'type':
|
||||
self.build_inventory_groups_type(group_name)
|
||||
else:
|
||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||
|
||||
@@ -947,30 +881,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.build_inventory_hosts()
|
||||
self.build_inventory_groups()
|
||||
|
||||
def cleandata(self):
|
||||
"""Clean the dynamic inventory
|
||||
|
||||
The first version of the inventory only supported container.
|
||||
This will change in the future.
|
||||
The following function cleans up the data and remove the all items with the wrong type.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
iter_keys = list(self.data['instances'].keys())
|
||||
for instance_name in iter_keys:
|
||||
if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
|
||||
del self.data['instances'][instance_name]
|
||||
|
||||
def _populate(self):
|
||||
"""Return the hosts and groups
|
||||
|
||||
Returns the processed instance configurations from the lxd import
|
||||
Returns the processed container configurations from the lxd import
|
||||
|
||||
Args:
|
||||
None
|
||||
@@ -983,16 +897,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||
self.socket = self._connect_to_socket()
|
||||
self.get_instance_data(self._get_instances())
|
||||
self.get_container_data(self._get_containers())
|
||||
self.get_network_data(self._get_networks())
|
||||
|
||||
# The first version of the inventory only supported containers.
|
||||
# This will change in the future.
|
||||
# The following function cleans up the data.
|
||||
if self.type_filter != 'both':
|
||||
self.cleandata()
|
||||
|
||||
self.extract_information_from_instance_configs()
|
||||
self.extract_information_from_container_configs()
|
||||
|
||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||
|
||||
@@ -1016,10 +924,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
AnsibleParserError
|
||||
Returns:
|
||||
None"""
|
||||
if IPADDRESS_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('another_library must be installed to use this plugin'),
|
||||
IPADDRESS_IMPORT_ERROR)
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
||||
# Read the inventory YAML file
|
||||
@@ -1031,9 +935,8 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.data = {} # store for inventory-data
|
||||
self.groupby = self.get_option('groupby')
|
||||
self.plugin = self.get_option('plugin')
|
||||
self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
|
||||
self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
|
||||
self.type_filter = self.get_option('type_filter')
|
||||
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
||||
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||
self.filter = None
|
||||
else:
|
||||
|
||||
@@ -31,12 +31,6 @@ DOCUMENTATION = r'''
|
||||
tags:
|
||||
description: Filter results on a specific tag.
|
||||
type: list
|
||||
scw_profile:
|
||||
description:
|
||||
- The config profile to use in config file.
|
||||
- By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
|
||||
type: string
|
||||
version_added: 4.4.0
|
||||
oauth_token:
|
||||
description:
|
||||
- Scaleway OAuth token.
|
||||
@@ -309,13 +303,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
if not oauth_token and os.path.exists(scw_config_path):
|
||||
with open(scw_config_path) as fh:
|
||||
scw_config = yaml.safe_load(fh)
|
||||
ansible_profile = self.get_option('scw_profile')
|
||||
|
||||
if ansible_profile:
|
||||
active_profile = ansible_profile
|
||||
else:
|
||||
active_profile = scw_config.get('active_profile', 'default')
|
||||
|
||||
active_profile = scw_config.get('active_profile', 'default')
|
||||
if active_profile == 'default':
|
||||
oauth_token = scw_config.get('secret_key')
|
||||
else:
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: xen_orchestra
|
||||
short_description: Xen Orchestra inventory source
|
||||
version_added: 4.1.0
|
||||
author:
|
||||
- Dom Del Nano (@ddelnano) <ddelnano@gmail.com>
|
||||
- Samori Gorse (@shinuza) <samorigorse@gmail.com>
|
||||
requirements:
|
||||
- websocket-client >= 1.0.0
|
||||
description:
|
||||
- Get inventory hosts from a Xen Orchestra deployment.
|
||||
- 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
|
||||
required: yes
|
||||
choices: ['community.general.xen_orchestra']
|
||||
type: str
|
||||
api_host:
|
||||
description:
|
||||
- API host to XOA API.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_HOST
|
||||
user:
|
||||
description:
|
||||
- Xen Orchestra user.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
|
||||
required: yes
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_USER
|
||||
password:
|
||||
description:
|
||||
- Xen Orchestra password.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
|
||||
required: yes
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_PASSWORD
|
||||
validate_certs:
|
||||
description: Verify TLS certificate if using HTTPS.
|
||||
type: boolean
|
||||
default: true
|
||||
use_ssl:
|
||||
description: Use wss when connecting to the Xen Orchestra API
|
||||
type: boolean
|
||||
default: true
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# file must be named xen_orchestra.yaml or xen_orchestra.yml
|
||||
plugin: community.general.xen_orchestra
|
||||
api_host: 192.168.1.255
|
||||
user: xo
|
||||
password: xo_pwd
|
||||
validate_certs: true
|
||||
use_ssl: true
|
||||
groups:
|
||||
kube_nodes: "'kube_node' in tags"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import ssl
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
# 3rd party imports
|
||||
try:
|
||||
HAS_WEBSOCKET = True
|
||||
import websocket
|
||||
from websocket import create_connection
|
||||
|
||||
if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'):
|
||||
raise ImportError
|
||||
except ImportError as e:
|
||||
HAS_WEBSOCKET = False
|
||||
|
||||
|
||||
HALTED = 'Halted'
|
||||
PAUSED = 'Paused'
|
||||
RUNNING = 'Running'
|
||||
SUSPENDED = 'Suspended'
|
||||
POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED]
|
||||
HOST_GROUP = 'xo_hosts'
|
||||
POOL_GROUP = 'xo_pools'
|
||||
|
||||
|
||||
def clean_group_name(label):
|
||||
return label.lower().replace(' ', '-').replace('-', '_')
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using XenOrchestra as source. '''
|
||||
|
||||
NAME = 'community.general.xen_orchestra'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# from config
|
||||
self.counter = -1
|
||||
self.session = None
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
@property
|
||||
def pointer(self):
|
||||
self.counter += 1
|
||||
return self.counter
|
||||
|
||||
def create_connection(self, xoa_api_host):
|
||||
validate_certs = self.get_option('validate_certs')
|
||||
use_ssl = self.get_option('use_ssl')
|
||||
proto = 'wss' if use_ssl else 'ws'
|
||||
|
||||
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
|
||||
self.conn = create_connection(
|
||||
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
|
||||
|
||||
def login(self, user, password):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
|
||||
'username': user, 'password': password}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
result = json.loads(self.conn.recv())
|
||||
|
||||
if 'error' in result:
|
||||
raise AnsibleError(
|
||||
'Could not connect: {0}'.format(result['error']))
|
||||
|
||||
def get_object(self, name):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0',
|
||||
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
answer = json.loads(self.conn.recv())
|
||||
|
||||
if 'error' in answer:
|
||||
raise AnsibleError(
|
||||
'Could not request: {0}'.format(answer['error']))
|
||||
|
||||
return answer['result']
|
||||
|
||||
def _get_objects(self):
|
||||
self.create_connection(self.xoa_api_host)
|
||||
self.login(self.xoa_user, self.xoa_password)
|
||||
|
||||
return {
|
||||
'vms': self.get_object('VM'),
|
||||
'pools': self.get_object('pool'),
|
||||
'hosts': self.get_object('host'),
|
||||
}
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _add_vms(self, vms, hosts, pools):
|
||||
for uuid, vm in vms.items():
|
||||
group = 'with_ip'
|
||||
ip = vm.get('mainIpAddress')
|
||||
entry_name = uuid
|
||||
power_state = vm['power_state'].lower()
|
||||
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
|
||||
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
|
||||
|
||||
self.inventory.add_host(entry_name)
|
||||
|
||||
# Grouping by power state
|
||||
self.inventory.add_child(power_state, entry_name)
|
||||
|
||||
# Grouping by host
|
||||
if host_name:
|
||||
self.inventory.add_child(host_name, entry_name)
|
||||
|
||||
# Grouping by pool
|
||||
if pool_name:
|
||||
self.inventory.add_child(pool_name, entry_name)
|
||||
|
||||
# Grouping VMs with an IP together
|
||||
if ip is None:
|
||||
group = 'without_ip'
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, entry_name)
|
||||
|
||||
# Adding meta
|
||||
self.inventory.set_variable(entry_name, 'uuid', uuid)
|
||||
self.inventory.set_variable(entry_name, 'ip', ip)
|
||||
self.inventory.set_variable(entry_name, 'ansible_host', ip)
|
||||
self.inventory.set_variable(entry_name, 'power_state', power_state)
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'name_label', vm['name_label'])
|
||||
self.inventory.set_variable(entry_name, 'type', vm['type'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'cpus', vm['CPUs']['number'])
|
||||
self.inventory.set_variable(entry_name, 'tags', vm['tags'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'memory', vm['memory']['size'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'has_ip', group == 'with_ip')
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'is_managed', vm.get('managementAgentDetected', False))
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'os_version', vm['os_version'])
|
||||
|
||||
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
|
||||
|
||||
def _add_hosts(self, hosts, pools):
|
||||
for host in hosts.values():
|
||||
entry_name = host['uuid']
|
||||
group_name = 'xo_host_{0}'.format(
|
||||
clean_group_name(host['name_label']))
|
||||
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
self.inventory.add_host(entry_name)
|
||||
self.inventory.add_child(HOST_GROUP, entry_name)
|
||||
self.inventory.add_child(pool_name, entry_name)
|
||||
|
||||
self.inventory.set_variable(entry_name, 'enabled', host['enabled'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'hostname', host['hostname'])
|
||||
self.inventory.set_variable(entry_name, 'memory', host['memory'])
|
||||
self.inventory.set_variable(entry_name, 'address', host['address'])
|
||||
self.inventory.set_variable(entry_name, 'cpus', host['cpus'])
|
||||
self.inventory.set_variable(entry_name, 'type', 'host')
|
||||
self.inventory.set_variable(entry_name, 'tags', host['tags'])
|
||||
self.inventory.set_variable(entry_name, 'version', host['version'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'power_state', host['power_state'].lower())
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'product_brand', host['productBrand'])
|
||||
|
||||
for pool in pools.values():
|
||||
group_name = 'xo_pool_{0}'.format(
|
||||
clean_group_name(pool['name_label']))
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
def _add_pools(self, pools):
|
||||
for pool in pools.values():
|
||||
group_name = 'xo_pool_{0}'.format(
|
||||
clean_group_name(pool['name_label']))
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
# TODO: Refactor
|
||||
def _pool_group_name_for_uuid(self, pools, pool_uuid):
|
||||
for pool in pools:
|
||||
if pool == pool_uuid:
|
||||
return 'xo_pool_{0}'.format(
|
||||
clean_group_name(pools[pool_uuid]['name_label']))
|
||||
|
||||
# TODO: Refactor
|
||||
def _host_group_name_for_uuid(self, hosts, host_uuid):
|
||||
for host in hosts:
|
||||
if host == host_uuid:
|
||||
return 'xo_host_{0}'.format(
|
||||
clean_group_name(hosts[host_uuid]['name_label']
|
||||
))
|
||||
|
||||
def _populate(self, objects):
|
||||
# Prepare general groups
|
||||
self.inventory.add_group(HOST_GROUP)
|
||||
self.inventory.add_group(POOL_GROUP)
|
||||
for group in POWER_STATES:
|
||||
self.inventory.add_group(group.lower())
|
||||
|
||||
self._add_pools(objects['pools'])
|
||||
self._add_hosts(objects['hosts'], objects['pools'])
|
||||
self._add_vms(objects['vms'], objects['hosts'], objects['pools'])
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv(
|
||||
'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"')
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_WEBSOCKET:
|
||||
raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: '
|
||||
'https://github.com/websocket-client/websocket-client.')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
self.inventory = inventory
|
||||
|
||||
self.protocol = 'wss'
|
||||
self.xoa_api_host = self.get_option('api_host')
|
||||
self.xoa_user = self.get_option('user')
|
||||
self.xoa_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
self.validate_certs = self.get_option('validate_certs')
|
||||
if not self.get_option('use_ssl'):
|
||||
self.protocol = 'ws'
|
||||
|
||||
objects = self._get_objects()
|
||||
self._populate(objects)
|
||||
@@ -1,138 +0,0 @@
|
||||
# (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
name: collection_version
|
||||
author: Felix Fontein (@felixfontein)
|
||||
version_added: "4.0.0"
|
||||
short_description: Retrieves the version of an installed collection
|
||||
description:
|
||||
- This lookup allows to query the version of an installed collection, and to determine whether a
|
||||
collection is installed at all.
|
||||
- By default it returns C(none) for non-existing collections and C(*) for collections without a
|
||||
version number. The latter should only happen in development environments, or when installing
|
||||
a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
|
||||
by providing other values with I(result_not_found) and I(result_no_version).
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- The collections to look for.
|
||||
- For example C(community.general).
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
result_not_found:
|
||||
description:
|
||||
- The value to return when the collection could not be found.
|
||||
- By default, C(none) is returned.
|
||||
type: string
|
||||
default: ~
|
||||
result_no_version:
|
||||
description:
|
||||
- The value to return when the collection has no version number.
|
||||
- This can happen for collections installed from git which do not have a version number
|
||||
in C(galaxy.yml).
|
||||
- By default, C(*) is returned.
|
||||
type: string
|
||||
default: '*'
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Check version of community.general
|
||||
ansible.builtin.debug:
|
||||
msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- The version number of the collections listed as input.
|
||||
- If a collection can not be found, it will return the value provided in I(result_not_found).
|
||||
By default, this is C(none).
|
||||
- If a collection can be found, but the version not identified, it will return the value provided in
|
||||
I(result_no_version). By default, this is C(*). This can happen for collections installed
|
||||
from git which do not have a version number in C(galaxy.yml).
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import yaml
|
||||
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.module_utils.compat.importlib import import_module
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
|
||||
FQCN_RE = re.compile(r'^[A-Za-z0-9_]+\.[A-Za-z0-9_]+$')
|
||||
|
||||
|
||||
def load_collection_meta_manifest(manifest_path):
|
||||
with open(manifest_path, 'rb') as f:
|
||||
meta = json.load(f)
|
||||
return {
|
||||
'version': meta['collection_info']['version'],
|
||||
}
|
||||
|
||||
|
||||
def load_collection_meta_galaxy(galaxy_path, no_version='*'):
|
||||
with open(galaxy_path, 'rb') as f:
|
||||
meta = yaml.safe_load(f)
|
||||
return {
|
||||
'version': meta.get('version') or no_version,
|
||||
}
|
||||
|
||||
|
||||
def load_collection_meta(collection_pkg, no_version='*'):
|
||||
path = os.path.dirname(collection_pkg.__file__)
|
||||
|
||||
# Try to load MANIFEST.json
|
||||
manifest_path = os.path.join(path, 'MANIFEST.json')
|
||||
if os.path.exists(manifest_path):
|
||||
return load_collection_meta_manifest(manifest_path)
|
||||
|
||||
# Try to load galaxy.y(a)ml
|
||||
galaxy_path = os.path.join(path, 'galaxy.yml')
|
||||
galaxy_alt_path = os.path.join(path, 'galaxy.yaml')
|
||||
# galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed
|
||||
# in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for
|
||||
# ansible-core 2.12.
|
||||
for path in (galaxy_path, galaxy_alt_path):
|
||||
if os.path.exists(path):
|
||||
return load_collection_meta_galaxy(path, no_version=no_version)
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
result = []
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
not_found = self.get_option('result_not_found')
|
||||
no_version = self.get_option('result_no_version')
|
||||
|
||||
for term in terms:
|
||||
if not FQCN_RE.match(term):
|
||||
raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term))
|
||||
|
||||
try:
|
||||
collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term))
|
||||
except ImportError:
|
||||
# Collection not found
|
||||
result.append(not_found)
|
||||
continue
|
||||
|
||||
try:
|
||||
data = load_collection_meta(collection_pkg, no_version=no_version)
|
||||
except Exception as exc:
|
||||
raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc))
|
||||
|
||||
result.append(data.get('version', no_version))
|
||||
|
||||
return result
|
||||
126
plugins/lookup/nios.py
Normal file
126
plugins/lookup/nios.py
Normal file
@@ -0,0 +1,126 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2018 Red Hat | Ansible
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios
|
||||
short_description: Query Infoblox NIOS objects
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_lookup
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
|
||||
supports adding additional keywords to filter the return data and specify
|
||||
the desired set of returned fields.
|
||||
requirements:
|
||||
- infoblox-client
|
||||
extends_documentation_fragment:
|
||||
- community.general.nios
|
||||
|
||||
options:
|
||||
_terms:
|
||||
description: The name of the object to return from NIOS
|
||||
required: True
|
||||
return_fields:
|
||||
description: The list of field names to return for the specified object.
|
||||
filter:
|
||||
description: a dict object that is used to filter the return objects
|
||||
extattrs:
|
||||
description: a dict object that is used to filter on extattrs
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: fetch all networkview objects
|
||||
ansible.builtin.set_fact:
|
||||
networkviews: "{{ lookup('community.general.nios', 'networkview',
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: fetch the default dns view
|
||||
ansible.builtin.set_fact:
|
||||
dns_views: "{{ lookup('community.general.nios', 'view', filter={'name': 'default'},
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
# all of the examples below use credentials that are set using env variables
|
||||
# export INFOBLOX_HOST=nios01
|
||||
# export INFOBLOX_USERNAME=admin
|
||||
# export INFOBLOX_PASSWORD=admin
|
||||
|
||||
- name: fetch all host records and include extended attributes
|
||||
ansible.builtin.set_fact:
|
||||
host_records: "{{ lookup('community.general.nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
|
||||
|
||||
|
||||
- name: use env variables to pass credentials
|
||||
ansible.builtin.set_fact:
|
||||
networkviews: "{{ lookup('community.general.nios', 'networkview') }}"
|
||||
|
||||
- name: get a host record
|
||||
ansible.builtin.set_fact:
|
||||
host: "{{ lookup('community.general.nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
|
||||
|
||||
- name: get the authoritative zone from a non default dns view
|
||||
ansible.builtin.set_fact:
|
||||
host: "{{ lookup('community.general.nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
obj_type:
|
||||
description:
|
||||
- The object type specified in the terms argument
|
||||
type: dictionary
|
||||
contains:
|
||||
obj_field:
|
||||
description:
|
||||
- One or more obj_type fields as specified by return_fields argument or
|
||||
the default set of fields as per the object type
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
try:
|
||||
obj_type = terms[0]
|
||||
except IndexError:
|
||||
raise AnsibleError('the object_type must be specified')
|
||||
|
||||
return_fields = kwargs.pop('return_fields', None)
|
||||
filter_data = kwargs.pop('filter', {})
|
||||
extattrs = normalize_extattrs(kwargs.pop('extattrs', {}))
|
||||
provider = kwargs.pop('provider', {})
|
||||
wapi = WapiLookup(provider)
|
||||
res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs)
|
||||
if res is not None:
|
||||
for obj in res:
|
||||
if 'extattrs' in obj:
|
||||
obj['extattrs'] = flatten_extattrs(obj['extattrs'])
|
||||
else:
|
||||
res = []
|
||||
return res
|
||||
105
plugins/lookup/nios_next_ip.py
Normal file
105
plugins/lookup/nios_next_ip.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2018 Red Hat | Ansible
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios_next_ip
|
||||
short_description: Return the next available IP address for a network
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_next_ip
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to return the next available IP addresses
|
||||
for a given network CIDR
|
||||
requirements:
|
||||
- infoblox-client
|
||||
extends_documentation_fragment:
|
||||
- community.general.nios
|
||||
|
||||
options:
|
||||
_terms:
|
||||
description: The CIDR network to retrieve the next addresses from
|
||||
required: True
|
||||
num:
|
||||
description: The number of IP addresses to return
|
||||
required: false
|
||||
default: 1
|
||||
exclude:
|
||||
description: List of IP's that need to be excluded from returned IP addresses
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: return next available IP address for network 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- The list of next IP addresses available
|
||||
type: list
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
try:
|
||||
network = terms[0]
|
||||
except IndexError:
|
||||
raise AnsibleError('missing argument in the form of A.B.C.D/E')
|
||||
|
||||
provider = kwargs.pop('provider', {})
|
||||
wapi = WapiLookup(provider)
|
||||
|
||||
network_obj = wapi.get_object('network', {'network': network})
|
||||
if network_obj is None:
|
||||
raise AnsibleError('unable to find network object %s' % network)
|
||||
|
||||
num = kwargs.get('num', 1)
|
||||
exclude_ip = kwargs.get('exclude', [])
|
||||
|
||||
try:
|
||||
ref = network_obj[0]['_ref']
|
||||
avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
|
||||
return [avail_ips['ips']]
|
||||
except Exception as exc:
|
||||
raise AnsibleError(to_text(exc))
|
||||
118
plugins/lookup/nios_next_network.py
Normal file
118
plugins/lookup/nios_next_network.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2018 Red Hat | Ansible
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios_next_network
|
||||
short_description: Return the next available network range for a network-container
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_next_network
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to return the next available network addresses for
|
||||
a given network CIDR
|
||||
requirements:
|
||||
- infoblox_client
|
||||
extends_documentation_fragment:
|
||||
- community.general.nios
|
||||
|
||||
options:
|
||||
_terms:
|
||||
description: The CIDR network to retrieve the next network from next available network within the specified
|
||||
container.
|
||||
required: True
|
||||
cidr:
|
||||
description:
|
||||
- The CIDR of the network to retrieve the next network from next available network within the
|
||||
specified container. Also, Requested CIDR must be specified and greater than the parent CIDR.
|
||||
required: True
|
||||
default: 24
|
||||
num:
|
||||
description: The number of network addresses to return from network-container
|
||||
required: false
|
||||
default: 1
|
||||
exclude:
|
||||
description: Network addresses returned from network-container excluding list of user's input network range
|
||||
required: false
|
||||
default: ''
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: return next available network for network-container 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25,
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 2 available network addresses for network-container 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, num=2,
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- The list of next network addresses available
|
||||
type: list
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
try:
|
||||
network = terms[0]
|
||||
except IndexError:
|
||||
raise AnsibleError('missing network argument in the form of A.B.C.D/E')
|
||||
try:
|
||||
cidr = kwargs.get('cidr', 24)
|
||||
except IndexError:
|
||||
raise AnsibleError('missing CIDR argument in the form of xx')
|
||||
|
||||
provider = kwargs.pop('provider', {})
|
||||
wapi = WapiLookup(provider)
|
||||
network_obj = wapi.get_object('networkcontainer', {'network': network})
|
||||
|
||||
if network_obj is None:
|
||||
raise AnsibleError('unable to find network-container object %s' % network)
|
||||
num = kwargs.get('num', 1)
|
||||
exclude_ip = kwargs.get('exclude', [])
|
||||
|
||||
try:
|
||||
ref = network_obj[0]['_ref']
|
||||
avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip})
|
||||
return [avail_nets['networks']]
|
||||
except Exception as exc:
|
||||
raise AnsibleError(to_text(exc))
|
||||
@@ -1,119 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""The community.general.random_words Ansible lookup plugin."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: random_words
|
||||
author:
|
||||
- Thomas Sjögren (@konstruktoid)
|
||||
short_description: Return a number of random words
|
||||
version_added: "4.0.0"
|
||||
requirements:
|
||||
- xkcdpass U(https://github.com/redacted/XKCD-password-generator)
|
||||
description:
|
||||
- Returns a number of random words. The output can for example be used for
|
||||
passwords.
|
||||
- See U(https://xkcd.com/936/) for background.
|
||||
options:
|
||||
numwords:
|
||||
description:
|
||||
- The number of words.
|
||||
default: 6
|
||||
type: int
|
||||
min_length:
|
||||
description:
|
||||
- Minimum length of words to make password.
|
||||
default: 5
|
||||
type: int
|
||||
max_length:
|
||||
description:
|
||||
- Maximum length of words to make password.
|
||||
default: 9
|
||||
type: int
|
||||
delimiter:
|
||||
description:
|
||||
- The delimiter character between words.
|
||||
default: " "
|
||||
type: str
|
||||
case:
|
||||
description:
|
||||
- The method for setting the case of each word in the passphrase.
|
||||
choices: ["alternating", "upper", "lower", "random", "capitalize"]
|
||||
default: "lower"
|
||||
type: str
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Generate password with default settings
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_words')
|
||||
# Example result: 'traitor gigabyte cesarean unless aspect clear'
|
||||
|
||||
- name: Generate password with six, five character, words
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_words', min_length=5, max_length=5)
|
||||
# Example result: 'brink banjo getup staff trump comfy'
|
||||
|
||||
- name: Generate password with three capitalized words and the '-' delimiter
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_words', numwords=3, delimiter='-', case='capitalize')
|
||||
# Example result: 'Overlabor-Faucet-Coastline'
|
||||
|
||||
- name: Generate password with three words without any delimiter
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_words', numwords=3, delimiter='')
|
||||
# Example result: 'deskworkmonopolystriking'
|
||||
# https://www.ncsc.gov.uk/blog-post/the-logic-behind-three-random-words
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
_raw:
|
||||
description: A single-element list containing random words.
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
try:
|
||||
from xkcdpass import xkcd_password as xp
|
||||
|
||||
HAS_XKCDPASS = True
|
||||
except ImportError:
|
||||
HAS_XKCDPASS = False
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
"""The random_words Ansible lookup class."""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
if not HAS_XKCDPASS:
|
||||
raise AnsibleLookupError(
|
||||
"Python xkcdpass library is required. "
|
||||
'Please install using "pip install xkcdpass"'
|
||||
)
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
method = self.get_option("case")
|
||||
delimiter = self.get_option("delimiter")
|
||||
max_length = self.get_option("max_length")
|
||||
min_length = self.get_option("min_length")
|
||||
numwords = self.get_option("numwords")
|
||||
|
||||
words = xp.locate_wordfile()
|
||||
wordlist = xp.generate_wordlist(
|
||||
max_length=max_length, min_length=min_length, wordfile=words
|
||||
)
|
||||
|
||||
values = xp.generate_xkcdpassword(
|
||||
wordlist, case=method, delimiter=delimiter, numwords=numwords
|
||||
)
|
||||
|
||||
return [values]
|
||||
@@ -1,107 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, RevBits <info@revbits.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: revbitspss
|
||||
author: RevBits (@RevBits) <info@revbits.com>
|
||||
short_description: Get secrets from RevBits PAM server
|
||||
version_added: 4.1.0
|
||||
description:
|
||||
- Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
|
||||
Server using API key authentication with the REST API.
|
||||
requirements:
|
||||
- revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- This will be an array of keys for secrets which you want to fetch from RevBits PAM.
|
||||
required: true
|
||||
type: list
|
||||
elements: string
|
||||
base_url:
|
||||
description:
|
||||
- This will be the base URL of the server, for example C(https://server-url-here).
|
||||
required: true
|
||||
type: string
|
||||
api_key:
|
||||
description:
|
||||
- This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
|
||||
required: true
|
||||
type: string
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
_list:
|
||||
description:
|
||||
- The JSON responses which you can access with defined keys.
|
||||
- If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.revbitspss',
|
||||
'UUIDPAM', 'DB_PASS',
|
||||
base_url='https://server-url-here',
|
||||
api_key='API_KEY_GOES_HERE'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
try:
|
||||
from pam.revbits_ansible.server import SecretServer
|
||||
except ImportError as imp_exc:
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
@staticmethod
|
||||
def Client(server_parameters):
|
||||
return SecretServer(**server_parameters)
|
||||
|
||||
def run(self, terms, variables, **kwargs):
|
||||
if ANOTHER_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('revbits_ansible must be installed to use this plugin'),
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR
|
||||
)
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
secret_server = LookupModule.Client(
|
||||
{
|
||||
"base_url": self.get_option('base_url'),
|
||||
"api_key": self.get_option('api_key'),
|
||||
}
|
||||
)
|
||||
result = []
|
||||
for term in terms:
|
||||
try:
|
||||
display.vvv(u"Secret Server lookup of Secret with ID %s" % term)
|
||||
result.append({term: secret_server.get_pam_secret(term)})
|
||||
except Exception as error:
|
||||
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||
return result
|
||||
748
plugins/module_utils/_netapp.py
Normal file
748
plugins/module_utils/_netapp.py
Normal file
@@ -0,0 +1,748 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
|
||||
# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import mimetypes
|
||||
|
||||
from pprint import pformat
|
||||
from ansible.module_utils import six
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
||||
except ImportError:
|
||||
ansible_version = 'unknown'
|
||||
|
||||
try:
|
||||
from netapp_lib.api.zapi import zapi
|
||||
HAS_NETAPP_LIB = True
|
||||
except ImportError:
|
||||
HAS_NETAPP_LIB = False
|
||||
|
||||
try:
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
import ssl
|
||||
try:
|
||||
from urlparse import urlparse, urlunparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
|
||||
HAS_SF_SDK = False
|
||||
SF_BYTE_MAP = dict(
|
||||
# Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
|
||||
bytes=1,
|
||||
b=1,
|
||||
kb=1000,
|
||||
mb=1000 ** 2,
|
||||
gb=1000 ** 3,
|
||||
tb=1000 ** 4,
|
||||
pb=1000 ** 5,
|
||||
eb=1000 ** 6,
|
||||
zb=1000 ** 7,
|
||||
yb=1000 ** 8
|
||||
)
|
||||
|
||||
POW2_BYTE_MAP = dict(
|
||||
# Here, 1 kb = 1024
|
||||
bytes=1,
|
||||
b=1,
|
||||
kb=1024,
|
||||
mb=1024 ** 2,
|
||||
gb=1024 ** 3,
|
||||
tb=1024 ** 4,
|
||||
pb=1024 ** 5,
|
||||
eb=1024 ** 6,
|
||||
zb=1024 ** 7,
|
||||
yb=1024 ** 8
|
||||
)
|
||||
|
||||
try:
|
||||
from solidfire.factory import ElementFactory
|
||||
from solidfire.custom.models import TimeIntervalFrequency
|
||||
from solidfire.models import Schedule, ScheduleInfo
|
||||
|
||||
HAS_SF_SDK = True
|
||||
except Exception:
|
||||
HAS_SF_SDK = False
|
||||
|
||||
|
||||
def has_netapp_lib():
|
||||
return HAS_NETAPP_LIB
|
||||
|
||||
|
||||
def has_sf_sdk():
|
||||
return HAS_SF_SDK
|
||||
|
||||
|
||||
def na_ontap_host_argument_spec():
|
||||
|
||||
return dict(
|
||||
hostname=dict(required=True, type='str'),
|
||||
username=dict(required=True, type='str', aliases=['user']),
|
||||
password=dict(required=True, type='str', aliases=['pass'], no_log=True),
|
||||
https=dict(required=False, type='bool', default=False),
|
||||
validate_certs=dict(required=False, type='bool', default=True),
|
||||
http_port=dict(required=False, type='int'),
|
||||
ontapi=dict(required=False, type='int'),
|
||||
use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
|
||||
)
|
||||
|
||||
|
||||
def ontap_sf_host_argument_spec():
|
||||
|
||||
return dict(
|
||||
hostname=dict(required=True, type='str'),
|
||||
username=dict(required=True, type='str', aliases=['user']),
|
||||
password=dict(required=True, type='str', aliases=['pass'], no_log=True)
|
||||
)
|
||||
|
||||
|
||||
def aws_cvs_host_argument_spec():
|
||||
|
||||
return dict(
|
||||
api_url=dict(required=True, type='str'),
|
||||
validate_certs=dict(required=False, type='bool', default=True),
|
||||
api_key=dict(required=True, type='str', no_log=True),
|
||||
secret_key=dict(required=True, type='str', no_log=True)
|
||||
)
|
||||
|
||||
|
||||
def create_sf_connection(module, port=None):
|
||||
hostname = module.params['hostname']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
|
||||
if HAS_SF_SDK and hostname and username and password:
|
||||
try:
|
||||
return_val = ElementFactory.create(hostname, username, password, port=port)
|
||||
return return_val
|
||||
except Exception:
|
||||
raise Exception("Unable to create SF connection")
|
||||
else:
|
||||
module.fail_json(msg="the python SolidFire SDK module is required")
|
||||
|
||||
|
||||
def setup_na_ontap_zapi(module, vserver=None):
|
||||
hostname = module.params['hostname']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
https = module.params['https']
|
||||
validate_certs = module.params['validate_certs']
|
||||
port = module.params['http_port']
|
||||
version = module.params['ontapi']
|
||||
|
||||
if HAS_NETAPP_LIB:
|
||||
# set up zapi
|
||||
server = zapi.NaServer(hostname)
|
||||
server.set_username(username)
|
||||
server.set_password(password)
|
||||
if vserver:
|
||||
server.set_vserver(vserver)
|
||||
if version:
|
||||
minor = version
|
||||
else:
|
||||
minor = 110
|
||||
server.set_api_version(major=1, minor=minor)
|
||||
# default is HTTP
|
||||
if https:
|
||||
if port is None:
|
||||
port = 443
|
||||
transport_type = 'HTTPS'
|
||||
# HACK to bypass certificate verification
|
||||
if validate_certs is False:
|
||||
if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
else:
|
||||
if port is None:
|
||||
port = 80
|
||||
transport_type = 'HTTP'
|
||||
server.set_transport_type(transport_type)
|
||||
server.set_port(port)
|
||||
server.set_server_type('FILER')
|
||||
return server
|
||||
else:
|
||||
module.fail_json(msg="the python NetApp-Lib module is required")
|
||||
|
||||
|
||||
def setup_ontap_zapi(module, vserver=None):
|
||||
hostname = module.params['hostname']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
|
||||
if HAS_NETAPP_LIB:
|
||||
# set up zapi
|
||||
server = zapi.NaServer(hostname)
|
||||
server.set_username(username)
|
||||
server.set_password(password)
|
||||
if vserver:
|
||||
server.set_vserver(vserver)
|
||||
# Todo : Replace hard-coded values with configurable parameters.
|
||||
server.set_api_version(major=1, minor=110)
|
||||
server.set_port(80)
|
||||
server.set_server_type('FILER')
|
||||
server.set_transport_type('HTTP')
|
||||
return server
|
||||
else:
|
||||
module.fail_json(msg="the python NetApp-Lib module is required")
|
||||
|
||||
|
||||
def eseries_host_argument_spec():
|
||||
"""Retrieve a base argument specification common to all NetApp E-Series modules"""
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
ssid=dict(type='str', required=False, default='1'),
|
||||
validate_certs=dict(type='bool', required=False, default=True)
|
||||
))
|
||||
return argument_spec
|
||||
|
||||
|
||||
class NetAppESeriesModule(object):
|
||||
"""Base class for all NetApp E-Series modules.
|
||||
|
||||
Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
|
||||
verification, http requests, secure http redirection for embedded web services, and logging setup.
|
||||
|
||||
Be sure to add the following lines in the module's documentation section:
|
||||
extends_documentation_fragment:
|
||||
- netapp.eseries
|
||||
|
||||
:param dict(dict) ansible_options: dictionary of ansible option definitions
|
||||
:param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
|
||||
:param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
|
||||
:param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
|
||||
:param list(list) required_if: list containing list(s) containing the option, the option value, and then
|
||||
a list of required options. (optional)
|
||||
:param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
|
||||
:param list(list) required_together: list containing list(s) of options that are required together. (optional)
|
||||
:param bool log_requests: controls whether to log each request (default: True)
|
||||
"""
|
||||
DEFAULT_TIMEOUT = 60
|
||||
DEFAULT_SECURE_PORT = "8443"
|
||||
DEFAULT_REST_API_PATH = "devmgr/v2/"
|
||||
DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
|
||||
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
|
||||
"netapp-client-type": "Ansible-%s" % ansible_version}
|
||||
HTTP_AGENT = "Ansible / %s" % ansible_version
|
||||
SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
|
||||
pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
|
||||
|
||||
def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
|
||||
mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
|
||||
log_requests=True):
|
||||
argument_spec = eseries_host_argument_spec()
|
||||
argument_spec.update(ansible_options)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive, required_if=required_if,
|
||||
required_one_of=required_one_of, required_together=required_together)
|
||||
|
||||
args = self.module.params
|
||||
self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
|
||||
self.ssid = args["ssid"]
|
||||
self.url = args["api_url"]
|
||||
self.log_requests = log_requests
|
||||
self.creds = dict(url_username=args["api_username"],
|
||||
url_password=args["api_password"],
|
||||
validate_certs=args["validate_certs"])
|
||||
|
||||
if not self.url.endswith("/"):
|
||||
self.url += "/"
|
||||
|
||||
self.is_embedded_mode = None
|
||||
self.is_web_services_valid_cache = None
|
||||
|
||||
def _check_web_services_version(self):
|
||||
"""Verify proxy or embedded web services meets minimum version required for module.
|
||||
|
||||
The minimum required web services version is evaluated against version supplied through the web services rest
|
||||
api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
|
||||
|
||||
This helper function will update the supplied api url if secure http is not used for embedded web services
|
||||
|
||||
:raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
|
||||
"""
|
||||
if not self.is_web_services_valid_cache:
|
||||
|
||||
url_parts = urlparse(self.url)
|
||||
if not url_parts.scheme or not url_parts.netloc:
|
||||
self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
|
||||
|
||||
if url_parts.scheme not in ["http", "https"]:
|
||||
self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
|
||||
|
||||
self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
|
||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
|
||||
|
||||
if rc != 200:
|
||||
self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
|
||||
self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
|
||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
||||
try:
|
||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
|
||||
except Exception as error:
|
||||
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
|
||||
% (self.ssid, to_native(error)))
|
||||
|
||||
major, minor, other, revision = data["version"].split(".")
|
||||
minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
|
||||
|
||||
if not (major > minimum_major or
|
||||
(major == minimum_major and minor > minimum_minor) or
|
||||
(major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
|
||||
self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
|
||||
" Version required: [%s]." % (data["version"], self.web_services_version))
|
||||
|
||||
self.module.log("Web services rest api version met the minimum required version.")
|
||||
self.is_web_services_valid_cache = True
|
||||
|
||||
def is_embedded(self):
|
||||
"""Determine whether web services server is the embedded web services.
|
||||
|
||||
If web services about endpoint fails based on an URLError then the request will be attempted again using
|
||||
secure http.
|
||||
|
||||
:raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
|
||||
:return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
|
||||
"""
|
||||
self._check_web_services_version()
|
||||
|
||||
if self.is_embedded_mode is None:
|
||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
||||
try:
|
||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
|
||||
self.is_embedded_mode = not data["runningAsProxy"]
|
||||
except Exception as error:
|
||||
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
|
||||
% (self.ssid, to_native(error)))
|
||||
|
||||
return self.is_embedded_mode
|
||||
|
||||
def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
|
||||
"""Issue an HTTP request to a url, retrieving an optional JSON response.
|
||||
|
||||
:param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
|
||||
full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
|
||||
:param data: data required for the request (data may be json or any python structured data)
|
||||
:param str method: request method such as GET, POST, DELETE.
|
||||
:param dict headers: dictionary containing request headers.
|
||||
:param bool ignore_errors: forces the request to ignore any raised exceptions.
|
||||
"""
|
||||
self._check_web_services_version()
|
||||
|
||||
if headers is None:
|
||||
headers = self.DEFAULT_HEADERS
|
||||
|
||||
if not isinstance(data, str) and headers["Content-Type"] == "application/json":
|
||||
data = json.dumps(data)
|
||||
|
||||
if path.startswith("/"):
|
||||
path = path[1:]
|
||||
request_url = self.url + self.DEFAULT_REST_API_PATH + path
|
||||
|
||||
# if self.log_requests:
|
||||
self.module.log(pformat(dict(url=request_url, data=data, method=method)))
|
||||
|
||||
return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
|
||||
timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
|
||||
|
||||
|
||||
def create_multipart_formdata(files, fields=None, send_8kb=False):
|
||||
"""Create the data for a multipart/form request.
|
||||
|
||||
:param list(list) files: list of lists each containing (name, filename, path).
|
||||
:param list(list) fields: list of lists each containing (key, value).
|
||||
:param bool send_8kb: only sends the first 8kb of the files (default: False).
|
||||
"""
|
||||
boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
|
||||
data_parts = list()
|
||||
data = None
|
||||
|
||||
if six.PY2: # Generate payload for Python 2
|
||||
newline = "\r\n"
|
||||
if fields is not None:
|
||||
for key, value in fields:
|
||||
data_parts.extend(["--%s" % boundary,
|
||||
'Content-Disposition: form-data; name="%s"' % key,
|
||||
"",
|
||||
value])
|
||||
|
||||
for name, filename, path in files:
|
||||
with open(path, "rb") as fh:
|
||||
value = fh.read(8192) if send_8kb else fh.read()
|
||||
|
||||
data_parts.extend(["--%s" % boundary,
|
||||
'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
|
||||
"Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
|
||||
"",
|
||||
value])
|
||||
data_parts.extend(["--%s--" % boundary, ""])
|
||||
data = newline.join(data_parts)
|
||||
|
||||
else:
|
||||
newline = six.b("\r\n")
|
||||
if fields is not None:
|
||||
for key, value in fields:
|
||||
data_parts.extend([six.b("--%s" % boundary),
|
||||
six.b('Content-Disposition: form-data; name="%s"' % key),
|
||||
six.b(""),
|
||||
six.b(value)])
|
||||
|
||||
for name, filename, path in files:
|
||||
with open(path, "rb") as fh:
|
||||
value = fh.read(8192) if send_8kb else fh.read()
|
||||
|
||||
data_parts.extend([six.b("--%s" % boundary),
|
||||
six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
|
||||
six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
|
||||
six.b(""),
|
||||
value])
|
||||
data_parts.extend([six.b("--%s--" % boundary), b""])
|
||||
data = newline.join(data_parts)
|
||||
|
||||
headers = {
|
||||
"Content-Type": "multipart/form-data; boundary=%s" % boundary,
|
||||
"Content-Length": str(len(data))}
|
||||
|
||||
return headers, data
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
"""Issue an HTTP request to a url, retrieving an optional JSON response."""
|
||||
|
||||
if headers is None:
|
||||
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
||||
headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
|
||||
|
||||
if not http_agent:
|
||||
http_agent = "Ansible / %s" % ansible_version
|
||||
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError as err:
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except Exception:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
|
||||
category="Information", event="setup", autosupport="false"):
|
||||
ems_log = zapi.NaElement('ems-autosupport-log')
|
||||
# Host name invoking the API.
|
||||
ems_log.add_new_child("computer-name", name)
|
||||
# ID of event. A user defined event-id, range [0..2^32-2].
|
||||
ems_log.add_new_child("event-id", id)
|
||||
# Name of the application invoking the API.
|
||||
ems_log.add_new_child("event-source", source)
|
||||
# Version of application invoking the API.
|
||||
ems_log.add_new_child("app-version", version)
|
||||
# Application defined category of the event.
|
||||
ems_log.add_new_child("category", category)
|
||||
# Description of event to log. An application defined message to log.
|
||||
ems_log.add_new_child("event-description", event)
|
||||
ems_log.add_new_child("log-level", "6")
|
||||
ems_log.add_new_child("auto-support", autosupport)
|
||||
server.invoke_successfully(ems_log, True)
|
||||
|
||||
|
||||
def get_cserver_zapi(server):
|
||||
vserver_info = zapi.NaElement('vserver-get-iter')
|
||||
query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
|
||||
query = zapi.NaElement('query')
|
||||
query.add_child_elem(query_details)
|
||||
vserver_info.add_child_elem(query)
|
||||
result = server.invoke_successfully(vserver_info,
|
||||
enable_tunneling=False)
|
||||
attribute_list = result.get_child_by_name('attributes-list')
|
||||
vserver_list = attribute_list.get_child_by_name('vserver-info')
|
||||
return vserver_list.get_child_content('vserver-name')
|
||||
|
||||
|
||||
def get_cserver(connection, is_rest=False):
|
||||
if not is_rest:
|
||||
return get_cserver_zapi(connection)
|
||||
|
||||
params = {'fields': 'type'}
|
||||
api = "private/cli/vserver"
|
||||
json, error = connection.get(api, params)
|
||||
if json is None or error is not None:
|
||||
# exit if there is an error or no data
|
||||
return None
|
||||
vservers = json.get('records')
|
||||
if vservers is not None:
|
||||
for vserver in vservers:
|
||||
if vserver['type'] == 'admin': # cluster admin
|
||||
return vserver['vserver']
|
||||
if len(vservers) == 1: # assume vserver admin
|
||||
return vservers[0]['vserver']
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class OntapRestAPI(object):
|
||||
def __init__(self, module, timeout=60):
|
||||
self.module = module
|
||||
self.username = self.module.params['username']
|
||||
self.password = self.module.params['password']
|
||||
self.hostname = self.module.params['hostname']
|
||||
self.use_rest = self.module.params['use_rest']
|
||||
self.verify = self.module.params['validate_certs']
|
||||
self.timeout = timeout
|
||||
self.url = 'https://' + self.hostname + '/api/'
|
||||
self.errors = list()
|
||||
self.debug_logs = list()
|
||||
self.check_required_library()
|
||||
|
||||
def check_required_library(self):
|
||||
if not HAS_REQUESTS:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'))
|
||||
|
||||
def send_request(self, method, api, params, json=None, return_status_code=False):
|
||||
''' send http request and process reponse, including error conditions '''
|
||||
url = self.url + api
|
||||
status_code = None
|
||||
content = None
|
||||
json_dict = None
|
||||
json_error = None
|
||||
error_details = None
|
||||
|
||||
def get_json(response):
|
||||
''' extract json, and error message if present '''
|
||||
try:
|
||||
json = response.json()
|
||||
except ValueError:
|
||||
return None, None
|
||||
error = json.get('error')
|
||||
return json, error
|
||||
|
||||
try:
|
||||
response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
|
||||
content = response.content # for debug purposes
|
||||
status_code = response.status_code
|
||||
# If the response was successful, no Exception will be raised
|
||||
response.raise_for_status()
|
||||
json_dict, json_error = get_json(response)
|
||||
except requests.exceptions.HTTPError as err:
|
||||
__, json_error = get_json(response)
|
||||
if json_error is None:
|
||||
self.log_error(status_code, 'HTTP error: %s' % err)
|
||||
error_details = str(err)
|
||||
# If an error was reported in the json payload, it is handled below
|
||||
except requests.exceptions.ConnectionError as err:
|
||||
self.log_error(status_code, 'Connection error: %s' % err)
|
||||
error_details = str(err)
|
||||
except Exception as err:
|
||||
self.log_error(status_code, 'Other error: %s' % err)
|
||||
error_details = str(err)
|
||||
if json_error is not None:
|
||||
self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
|
||||
error_details = json_error
|
||||
self.log_debug(status_code, content)
|
||||
if return_status_code:
|
||||
return status_code, error_details
|
||||
return json_dict, error_details
|
||||
|
||||
def get(self, api, params):
|
||||
method = 'GET'
|
||||
return self.send_request(method, api, params)
|
||||
|
||||
def post(self, api, data, params=None):
|
||||
method = 'POST'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def patch(self, api, data, params=None):
|
||||
method = 'PATCH'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def delete(self, api, data, params=None):
|
||||
method = 'DELETE'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def _is_rest(self, used_unsupported_rest_properties=None):
|
||||
if self.use_rest == "Always":
|
||||
if used_unsupported_rest_properties:
|
||||
error = "REST API currently does not support '%s'" % \
|
||||
', '.join(used_unsupported_rest_properties)
|
||||
return True, error
|
||||
else:
|
||||
return True, None
|
||||
if self.use_rest == 'Never' or used_unsupported_rest_properties:
|
||||
# force ZAPI if requested or if some parameter requires it
|
||||
return False, None
|
||||
method = 'HEAD'
|
||||
api = 'cluster/software'
|
||||
status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
|
||||
if status_code == 200:
|
||||
return True, None
|
||||
return False, None
|
||||
|
||||
def is_rest(self, used_unsupported_rest_properties=None):
|
||||
''' only return error if there is a reason to '''
|
||||
use_rest, error = self._is_rest(used_unsupported_rest_properties)
|
||||
if used_unsupported_rest_properties is None:
|
||||
return use_rest
|
||||
return use_rest, error
|
||||
|
||||
def log_error(self, status_code, message):
|
||||
self.errors.append(message)
|
||||
self.debug_logs.append((status_code, message))
|
||||
|
||||
def log_debug(self, status_code, content):
|
||||
self.debug_logs.append((status_code, content))
|
||||
|
||||
|
||||
class AwsCvsRestAPI(object):
|
||||
def __init__(self, module, timeout=60):
|
||||
self.module = module
|
||||
self.api_key = self.module.params['api_key']
|
||||
self.secret_key = self.module.params['secret_key']
|
||||
self.api_url = self.module.params['api_url']
|
||||
self.verify = self.module.params['validate_certs']
|
||||
self.timeout = timeout
|
||||
self.url = 'https://' + self.api_url + '/v1/'
|
||||
self.check_required_library()
|
||||
|
||||
def check_required_library(self):
|
||||
if not HAS_REQUESTS:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'))
|
||||
|
||||
def send_request(self, method, api, params, json=None):
|
||||
''' send http request and process reponse, including error conditions '''
|
||||
url = self.url + api
|
||||
status_code = None
|
||||
content = None
|
||||
json_dict = None
|
||||
json_error = None
|
||||
error_details = None
|
||||
headers = {
|
||||
'Content-type': "application/json",
|
||||
'api-key': self.api_key,
|
||||
'secret-key': self.secret_key,
|
||||
'Cache-Control': "no-cache",
|
||||
}
|
||||
|
||||
def get_json(response):
|
||||
''' extract json, and error message if present '''
|
||||
try:
|
||||
json = response.json()
|
||||
|
||||
except ValueError:
|
||||
return None, None
|
||||
success_code = [200, 201, 202]
|
||||
if response.status_code not in success_code:
|
||||
error = json.get('message')
|
||||
else:
|
||||
error = None
|
||||
return json, error
|
||||
try:
|
||||
response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
|
||||
status_code = response.status_code
|
||||
# If the response was successful, no Exception will be raised
|
||||
json_dict, json_error = get_json(response)
|
||||
except requests.exceptions.HTTPError as err:
|
||||
__, json_error = get_json(response)
|
||||
if json_error is None:
|
||||
error_details = str(err)
|
||||
except requests.exceptions.ConnectionError as err:
|
||||
error_details = str(err)
|
||||
except Exception as err:
|
||||
error_details = str(err)
|
||||
if json_error is not None:
|
||||
error_details = json_error
|
||||
|
||||
return json_dict, error_details
|
||||
|
||||
# If an error was reported in the json payload, it is handled below
|
||||
def get(self, api, params=None):
|
||||
method = 'GET'
|
||||
return self.send_request(method, api, params)
|
||||
|
||||
def post(self, api, data, params=None):
|
||||
method = 'POST'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def patch(self, api, data, params=None):
|
||||
method = 'PATCH'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def put(self, api, data, params=None):
|
||||
method = 'PUT'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def delete(self, api, data, params=None):
|
||||
method = 'DELETE'
|
||||
return self.send_request(method, api, params, json=data)
|
||||
|
||||
def get_state(self, jobId):
|
||||
""" Method to get the state of the job """
|
||||
method = 'GET'
|
||||
response, status_code = self.get('Jobs/%s' % jobId)
|
||||
while str(response['state']) not in 'done':
|
||||
response, status_code = self.get('Jobs/%s' % jobId)
|
||||
return 'done'
|
||||
2580
plugins/module_utils/compat/ipaddress.py
Normal file
2580
plugins/module_utils/compat/ipaddress.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,41 +7,55 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from urllib import quote_plus # Python 2.X
|
||||
from urlparse import urljoin
|
||||
except ImportError:
|
||||
from urllib.parse import quote_plus, urljoin # Python 3+
|
||||
from urllib.parse import quote_plus # Python 3+
|
||||
|
||||
import traceback
|
||||
|
||||
GITLAB_IMP_ERR = None
|
||||
try:
|
||||
import gitlab
|
||||
import requests
|
||||
HAS_GITLAB_PACKAGE = True
|
||||
except Exception:
|
||||
GITLAB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITLAB_PACKAGE = False
|
||||
|
||||
|
||||
def auth_argument_spec(spec=None):
|
||||
arg_spec = (dict(
|
||||
api_token=dict(type='str', no_log=True),
|
||||
api_oauth_token=dict(type='str', no_log=True),
|
||||
api_job_token=dict(type='str', no_log=True),
|
||||
))
|
||||
if spec:
|
||||
arg_spec.update(spec)
|
||||
return arg_spec
|
||||
def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
|
||||
url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
|
||||
headers = {}
|
||||
if access_token:
|
||||
headers['Authorization'] = "Bearer %s" % access_token
|
||||
else:
|
||||
headers['Private-Token'] = private_token
|
||||
|
||||
headers['Accept'] = "application/json"
|
||||
headers['Content-Type'] = "application/json"
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
|
||||
status = info['status']
|
||||
content = ""
|
||||
if response:
|
||||
content = response.read()
|
||||
if status == 204:
|
||||
return True, content
|
||||
elif status == 200 or status == 201:
|
||||
return True, json.loads(content)
|
||||
else:
|
||||
return False, str(status) + ": " + content
|
||||
|
||||
|
||||
def find_project(gitlab_instance, identifier):
|
||||
def findProject(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.projects.get(identifier)
|
||||
except Exception as e:
|
||||
@@ -54,7 +68,7 @@ def find_project(gitlab_instance, identifier):
|
||||
return project
|
||||
|
||||
|
||||
def find_group(gitlab_instance, identifier):
|
||||
def findGroup(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.groups.get(identifier)
|
||||
except Exception as e:
|
||||
@@ -63,14 +77,12 @@ def find_group(gitlab_instance, identifier):
|
||||
return project
|
||||
|
||||
|
||||
def gitlab_authentication(module):
|
||||
def gitlabAuthentication(module):
|
||||
gitlab_url = module.params['api_url']
|
||||
validate_certs = module.params['validate_certs']
|
||||
gitlab_user = module.params['api_username']
|
||||
gitlab_password = module.params['api_password']
|
||||
gitlab_token = module.params['api_token']
|
||||
gitlab_oauth_token = module.params['api_oauth_token']
|
||||
gitlab_job_token = module.params['api_job_token']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -83,16 +95,7 @@ def gitlab_authentication(module):
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
|
||||
private_token=gitlab_token, api_version=4)
|
||||
else:
|
||||
# We can create an oauth_token using a username and password
|
||||
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
|
||||
if gitlab_user:
|
||||
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
|
||||
resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
|
||||
resp_data = resp.json()
|
||||
gitlab_oauth_token = resp_data["access_token"]
|
||||
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
|
||||
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
|
||||
|
||||
gitlab_instance.auth()
|
||||
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
|
||||
|
||||
@@ -38,7 +38,6 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
|
||||
URL_REALM_INFO = "{url}/realms/{realm}"
|
||||
URL_REALMS = "{url}/admin/realms"
|
||||
URL_REALM = "{url}/admin/realms/{realm}"
|
||||
|
||||
@@ -231,31 +230,6 @@ class KeycloakAPI(object):
|
||||
self.validate_certs = self.module.params.get('validate_certs')
|
||||
self.restheaders = connection_header
|
||||
|
||||
def get_realm_info_by_id(self, realm='master'):
|
||||
""" Obtain realm public info by id
|
||||
|
||||
:param realm: realm id
|
||||
:return: dict of real, representation or None if none matching exist
|
||||
"""
|
||||
realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def get_realm_by_id(self, realm='master'):
|
||||
""" Obtain realm representation by id
|
||||
|
||||
|
||||
@@ -1,232 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
|
||||
def get_ilo_sessions(self):
|
||||
result = {}
|
||||
# listing all users has always been slower than other operations, why?
|
||||
session_list = []
|
||||
sessions_results = []
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['Description', 'Id', 'Name', 'UserName']
|
||||
|
||||
# Changed self.sessions_uri to Hardcoded string.
|
||||
response = self.get_request(
|
||||
self.root_uri + self.service_root + "SessionService/Sessions/")
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if 'Oem' in data:
|
||||
if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
|
||||
current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
|
||||
|
||||
for sessions in data[u'Members']:
|
||||
# session_list[] are URIs
|
||||
session_list.append(sessions[u'@odata.id'])
|
||||
# for each session, get details
|
||||
for uri in session_list:
|
||||
session = {}
|
||||
if uri != current_session:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
for property in properties:
|
||||
if property in data:
|
||||
session[property] = data[property]
|
||||
sessions_results.append(session)
|
||||
result["msg"] = sessions_results
|
||||
result["ret"] = True
|
||||
return result
|
||||
|
||||
def set_ntp_server(self, mgr_attributes):
|
||||
result = {}
|
||||
setkey = mgr_attributes['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
payload = {"DHCPv4": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseNTPServers"]:
|
||||
payload["DHCPv4"]["UseNTPServers"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseNTPServers"]:
|
||||
payload["DHCPv6"]["UseNTPServers"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
datetime_uri = self.manager_uri + "DateTime"
|
||||
|
||||
response = self.get_request(self.root_uri + datetime_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
ntp_list = data[setkey]
|
||||
if(len(ntp_list) == 2):
|
||||
ntp_list.pop(0)
|
||||
|
||||
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||
|
||||
payload = {setkey: ntp_list}
|
||||
|
||||
response1 = self.patch_request(self.root_uri + datetime_uri, payload)
|
||||
if not response1['ret']:
|
||||
return response1
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
|
||||
|
||||
def set_time_zone(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
uri = self.manager_uri + "DateTime/"
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response["data"]
|
||||
|
||||
if key not in data:
|
||||
return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
|
||||
|
||||
timezones = data["TimeZoneList"]
|
||||
index = ""
|
||||
for tz in timezones:
|
||||
if attr['mgr_attr_value'] in tz["Name"]:
|
||||
index = tz["Index"]
|
||||
break
|
||||
|
||||
payload = {key: {"Index": index}}
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_dns_server(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
uri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||
|
||||
if len(dns_list) == 3:
|
||||
dns_list.pop(0)
|
||||
|
||||
dns_list.append(attr['mgr_attr_value'])
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
key: dns_list
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_domain_name(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
payload = {"DHCPv4": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseDomainName"]:
|
||||
payload["DHCPv4"]["UseDomainName"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseDomainName"]:
|
||||
payload["DHCPv6"]["UseDomainName"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
domain_name = attr['mgr_attr_value']
|
||||
|
||||
payload = {"Oem": {
|
||||
"Hpe": {
|
||||
key: domain_name
|
||||
}
|
||||
}}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_wins_registration(self, mgrattr):
|
||||
Key = mgrattr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
Key: False
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
@@ -179,10 +179,10 @@ class IPAClient(object):
|
||||
result.append(key)
|
||||
return result
|
||||
|
||||
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None):
|
||||
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
|
||||
changed = False
|
||||
diff = list(set(ipa_list) - set(module_list))
|
||||
if append is not True and len(diff) > 0:
|
||||
if len(diff) > 0:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
if item:
|
||||
|
||||
@@ -75,14 +75,11 @@ class LXDClient(object):
|
||||
else:
|
||||
raise LXDClientException('URL scheme must be unix: or https:')
|
||||
|
||||
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None):
|
||||
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
|
||||
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
|
||||
if resp_json['type'] == 'async':
|
||||
url = '{0}/wait'.format(resp_json['operation'])
|
||||
resp_json = self._send_request('GET', url)
|
||||
if wait_for_container:
|
||||
while resp_json['metadata']['status'] == 'Running':
|
||||
resp_json = self._send_request('GET', url)
|
||||
if resp_json['metadata']['status'] != 'Success':
|
||||
self._raise_err_from_json(resp_json)
|
||||
return resp_json
|
||||
|
||||
@@ -52,36 +52,3 @@ def module_fails_on_exception(func):
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if not self.module.check_mode:
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip_returns(callable=None, value=None):
|
||||
|
||||
def deco(func):
|
||||
if callable is not None:
|
||||
@wraps(func)
|
||||
def wrapper_callable(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return callable(self, *args, **kwargs)
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_callable
|
||||
|
||||
if value is not None:
|
||||
@wraps(func)
|
||||
def wrapper_value(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return value
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_value
|
||||
|
||||
if callable is None and value is None:
|
||||
return check_mode_skip
|
||||
|
||||
return deco
|
||||
|
||||
@@ -141,7 +141,11 @@ class CmdMixin(object):
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
raise self.ModuleHelperException('Cannot determine value for parameter: {0}'.format(param))
|
||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
||||
version="4.0.0", collection_name="community.general")
|
||||
continue
|
||||
|
||||
else:
|
||||
raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
@@ -158,9 +162,8 @@ class CmdMixin(object):
|
||||
publish_rc=True,
|
||||
publish_out=True,
|
||||
publish_err=True,
|
||||
publish_cmd=True,
|
||||
*args, **kwargs):
|
||||
cmd_args = self._calculate_args(extra_params, params)
|
||||
self.vars.cmd_args = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
options.update(kwargs)
|
||||
@@ -172,15 +175,13 @@ class CmdMixin(object):
|
||||
})
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
rc, out, err = self.module.run_command(cmd_args, *args, **options)
|
||||
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
|
||||
if publish_rc:
|
||||
self.update_output(rc=rc)
|
||||
if publish_out:
|
||||
self.update_output(stdout=out)
|
||||
if publish_err:
|
||||
self.update_output(stderr=err)
|
||||
if publish_cmd:
|
||||
self.update_output(cmd_args=cmd_args)
|
||||
if process_output is None:
|
||||
_process = self.process_command_output
|
||||
else:
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class DeprecateAttrsMixin(object):
|
||||
|
||||
def _deprecate_setup(self, attr, target, module):
|
||||
if target is None:
|
||||
target = self
|
||||
if not hasattr(target, attr):
|
||||
raise ValueError("Target {0} has no attribute {1}".format(target, attr))
|
||||
if module is None:
|
||||
if isinstance(target, AnsibleModule):
|
||||
module = target
|
||||
elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
|
||||
module = target.module
|
||||
else:
|
||||
raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
|
||||
|
||||
# setup internal state dicts
|
||||
value_attr = "__deprecated_attr_value"
|
||||
trigger_attr = "__deprecated_attr_trigger"
|
||||
if not hasattr(target, value_attr):
|
||||
setattr(target, value_attr, {})
|
||||
if not hasattr(target, trigger_attr):
|
||||
setattr(target, trigger_attr, {})
|
||||
value_dict = getattr(target, value_attr)
|
||||
trigger_dict = getattr(target, trigger_attr)
|
||||
return target, module, value_dict, trigger_dict
|
||||
|
||||
def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
|
||||
target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
|
||||
|
||||
value_dict[attr] = getattr(target, attr, value)
|
||||
trigger_dict[attr] = False
|
||||
|
||||
def _trigger():
|
||||
if not trigger_dict[attr]:
|
||||
module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||
trigger_dict[attr] = True
|
||||
|
||||
def _getter(_self):
|
||||
_trigger()
|
||||
return value_dict[attr]
|
||||
|
||||
def _setter(_self, new_value):
|
||||
_trigger()
|
||||
value_dict[attr] = new_value
|
||||
|
||||
# override attribute
|
||||
prop = property(_getter)
|
||||
setattr(target, attr, prop)
|
||||
setattr(target, "_{0}_setter".format(attr), prop.setter(_setter))
|
||||
@@ -13,10 +13,9 @@ from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd im
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
|
||||
|
||||
|
||||
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
@@ -37,15 +36,6 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
self._deprecate_attr(
|
||||
attr="VarDict",
|
||||
msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
|
||||
"the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
|
||||
version="6.0.0",
|
||||
collection_name="community.general",
|
||||
target=ModuleHelper,
|
||||
module=self.module)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
|
||||
598
plugins/module_utils/net_tools/nios/api.py
Normal file
598
plugins/module_utils/net_tools/nios/api.py
Normal file
@@ -0,0 +1,598 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Red Hat Inc.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
from functools import partial
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.validation import check_type_dict
|
||||
|
||||
try:
|
||||
from infoblox_client.connector import Connector
|
||||
from infoblox_client.exceptions import InfobloxException
|
||||
HAS_INFOBLOX_CLIENT = True
|
||||
except ImportError:
|
||||
HAS_INFOBLOX_CLIENT = False
|
||||
|
||||
# defining nios constants
|
||||
NIOS_DNS_VIEW = 'view'
|
||||
NIOS_NETWORK_VIEW = 'networkview'
|
||||
NIOS_HOST_RECORD = 'record:host'
|
||||
NIOS_IPV4_NETWORK = 'network'
|
||||
NIOS_IPV6_NETWORK = 'ipv6network'
|
||||
NIOS_ZONE = 'zone_auth'
|
||||
NIOS_PTR_RECORD = 'record:ptr'
|
||||
NIOS_A_RECORD = 'record:a'
|
||||
NIOS_AAAA_RECORD = 'record:aaaa'
|
||||
NIOS_CNAME_RECORD = 'record:cname'
|
||||
NIOS_MX_RECORD = 'record:mx'
|
||||
NIOS_SRV_RECORD = 'record:srv'
|
||||
NIOS_NAPTR_RECORD = 'record:naptr'
|
||||
NIOS_TXT_RECORD = 'record:txt'
|
||||
NIOS_NSGROUP = 'nsgroup'
|
||||
NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
|
||||
NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
|
||||
NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
|
||||
NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
|
||||
NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
|
||||
NIOS_MEMBER = 'member'
|
||||
|
||||
NIOS_PROVIDER_SPEC = {
|
||||
'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
|
||||
'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
|
||||
'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
|
||||
'silent_ssl_warnings': dict(type='bool', default=True),
|
||||
'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
|
||||
'http_pool_connections': dict(type='int', default=10),
|
||||
'http_pool_maxsize': dict(type='int', default=10),
|
||||
'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
|
||||
'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
|
||||
'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
|
||||
}
|
||||
|
||||
|
||||
def get_connector(*args, **kwargs):
|
||||
''' Returns an instance of infoblox_client.connector.Connector
|
||||
:params args: positional arguments are silently ignored
|
||||
:params kwargs: dict that is passed to Connector init
|
||||
:returns: Connector
|
||||
'''
|
||||
if not HAS_INFOBLOX_CLIENT:
|
||||
raise Exception('infoblox-client is required but does not appear '
|
||||
'to be installed. It can be installed using the '
|
||||
'command `pip install infoblox-client`')
|
||||
|
||||
if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
|
||||
raise Exception('invalid or unsupported keyword argument for connector')
|
||||
for key, value in iteritems(NIOS_PROVIDER_SPEC):
|
||||
if key not in kwargs:
|
||||
# apply default values from NIOS_PROVIDER_SPEC since we cannot just
|
||||
# assume the provider values are coming from AnsibleModule
|
||||
if 'default' in value:
|
||||
kwargs[key] = value['default']
|
||||
|
||||
# override any values with env variables unless they were
|
||||
# explicitly set
|
||||
env = ('INFOBLOX_%s' % key).upper()
|
||||
if env in os.environ:
|
||||
kwargs[key] = os.environ.get(env)
|
||||
|
||||
if 'validate_certs' in kwargs.keys():
|
||||
kwargs['ssl_verify'] = kwargs['validate_certs']
|
||||
kwargs.pop('validate_certs', None)
|
||||
|
||||
return Connector(kwargs)
|
||||
|
||||
|
||||
def normalize_extattrs(value):
|
||||
''' Normalize extattrs field to expected format
|
||||
The module accepts extattrs as key/value pairs. This method will
|
||||
transform the key/value pairs into a structure suitable for
|
||||
sending across WAPI in the format of:
|
||||
extattrs: {
|
||||
key: {
|
||||
value: <value>
|
||||
}
|
||||
}
|
||||
'''
|
||||
return dict([(k, {'value': v}) for k, v in iteritems(value)])
|
||||
|
||||
|
||||
def flatten_extattrs(value):
|
||||
''' Flatten the key/value struct for extattrs
|
||||
WAPI returns extattrs field as a dict in form of:
|
||||
extattrs: {
|
||||
key: {
|
||||
value: <value>
|
||||
}
|
||||
}
|
||||
This method will flatten the structure to:
|
||||
extattrs: {
|
||||
key: value
|
||||
}
|
||||
'''
|
||||
return dict([(k, v['value']) for k, v in iteritems(value)])
|
||||
|
||||
|
||||
def member_normalize(member_spec):
|
||||
''' Transforms the member module arguments into a valid WAPI struct
|
||||
This function will transform the arguments into a structure that
|
||||
is a valid WAPI structure in the format of:
|
||||
{
|
||||
key: <value>,
|
||||
}
|
||||
It will remove any arguments that are set to None since WAPI will error on
|
||||
that condition.
|
||||
The remainder of the value validation is performed by WAPI
|
||||
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
|
||||
In this function, they are converted to dictionary.
|
||||
'''
|
||||
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
|
||||
'pre_provisioning', 'network_setting', 'v6_network_setting',
|
||||
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
|
||||
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
|
||||
for key in list(member_spec.keys()):
|
||||
if key in member_elements and member_spec[key] is not None:
|
||||
member_spec[key] = member_spec[key][0]
|
||||
if isinstance(member_spec[key], dict):
|
||||
member_spec[key] = member_normalize(member_spec[key])
|
||||
elif isinstance(member_spec[key], list):
|
||||
for x in member_spec[key]:
|
||||
if isinstance(x, dict):
|
||||
x = member_normalize(x)
|
||||
elif member_spec[key] is None:
|
||||
del member_spec[key]
|
||||
return member_spec
|
||||
|
||||
|
||||
def normalize_ib_spec(ib_spec):
|
||||
result = {}
|
||||
for arg in ib_spec:
|
||||
result[arg] = dict([(k, v)
|
||||
for k, v in iteritems(ib_spec[arg])
|
||||
if k not in ('ib_req', 'transform', 'update')])
|
||||
return result
|
||||
|
||||
|
||||
class WapiBase(object):
|
||||
''' Base class for implementing Infoblox WAPI API '''
|
||||
provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
|
||||
|
||||
def __init__(self, provider):
|
||||
self.connector = get_connector(**provider)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self.__dict__[name]
|
||||
except KeyError:
|
||||
if name.startswith('_'):
|
||||
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
return partial(self._invoke_method, name)
|
||||
|
||||
def _invoke_method(self, name, *args, **kwargs):
|
||||
try:
|
||||
method = getattr(self.connector, name)
|
||||
return method(*args, **kwargs)
|
||||
except InfobloxException as exc:
|
||||
if hasattr(self, 'handle_exception'):
|
||||
self.handle_exception(name, exc)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
class WapiLookup(WapiBase):
|
||||
''' Implements WapiBase for lookup plugins '''
|
||||
def handle_exception(self, method_name, exc):
|
||||
if ('text' in exc.response):
|
||||
raise Exception(exc.response['text'])
|
||||
else:
|
||||
raise Exception(exc)
|
||||
|
||||
|
||||
class WapiInventory(WapiBase):
|
||||
''' Implements WapiBase for dynamic inventory script '''
|
||||
pass
|
||||
|
||||
|
||||
class WapiModule(WapiBase):
|
||||
''' Implements WapiBase for executing a NIOS module '''
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
provider = module.params['provider']
|
||||
try:
|
||||
super(WapiModule, self).__init__(provider)
|
||||
except Exception as exc:
|
||||
self.module.fail_json(msg=to_text(exc))
|
||||
|
||||
def handle_exception(self, method_name, exc):
|
||||
''' Handles any exceptions raised
|
||||
This method will be called if an InfobloxException is raised for
|
||||
any call to the instance of Connector and also, in case of generic
|
||||
exception. This method will then gracefully fail the module.
|
||||
:args exc: instance of InfobloxException
|
||||
'''
|
||||
if ('text' in exc.response):
|
||||
self.module.fail_json(
|
||||
msg=exc.response['text'],
|
||||
type=exc.response['Error'].split(':')[0],
|
||||
code=exc.response.get('code'),
|
||||
operation=method_name
|
||||
)
|
||||
else:
|
||||
self.module.fail_json(msg=to_native(exc))
|
||||
|
||||
def run(self, ib_obj_type, ib_spec):
|
||||
''' Runs the module and performans configuration tasks
|
||||
:args ib_obj_type: the WAPI object type to operate against
|
||||
:args ib_spec: the specification for the WAPI object as a dict
|
||||
:returns: a results dict
|
||||
'''
|
||||
|
||||
update = new_name = None
|
||||
state = self.module.params['state']
|
||||
if state not in ('present', 'absent'):
|
||||
self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
|
||||
|
||||
result = {'changed': False}
|
||||
|
||||
obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
|
||||
|
||||
# get object reference
|
||||
ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
|
||||
proposed_object = {}
|
||||
for key, value in iteritems(ib_spec):
|
||||
if self.module.params[key] is not None:
|
||||
if 'transform' in value:
|
||||
proposed_object[key] = value['transform'](self.module)
|
||||
else:
|
||||
proposed_object[key] = self.module.params[key]
|
||||
|
||||
# If configure_by_dns is set to False and view is 'default', then delete the default dns
|
||||
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
|
||||
and ib_obj_type == NIOS_HOST_RECORD:
|
||||
del proposed_object['view']
|
||||
|
||||
if ib_obj_ref:
|
||||
if len(ib_obj_ref) > 1:
|
||||
for each in ib_obj_ref:
|
||||
# To check for existing A_record with same name with input A_record by IP
|
||||
if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
|
||||
current_object = each
|
||||
# To check for existing Host_record with same name with input Host_record by IP
|
||||
elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
|
||||
== proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
|
||||
current_object = each
|
||||
# Else set the current_object with input value
|
||||
else:
|
||||
current_object = obj_filter
|
||||
ref = None
|
||||
else:
|
||||
current_object = ib_obj_ref[0]
|
||||
if 'extattrs' in current_object:
|
||||
current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
|
||||
if current_object.get('_ref'):
|
||||
ref = current_object.pop('_ref')
|
||||
else:
|
||||
current_object = obj_filter
|
||||
ref = None
|
||||
# checks if the object type is member to normalize the attributes being passed
|
||||
if (ib_obj_type == NIOS_MEMBER):
|
||||
proposed_object = member_normalize(proposed_object)
|
||||
|
||||
# checks if the name's field has been updated
|
||||
if update and new_name:
|
||||
proposed_object['name'] = new_name
|
||||
|
||||
check_remove = []
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
# this check is for idempotency, as if the same ip address shall be passed
|
||||
# add param will be removed, and same exists true for remove case as well.
|
||||
if 'ipv4addrs' in [current_object and proposed_object]:
|
||||
for each in current_object['ipv4addrs']:
|
||||
if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
||||
del proposed_object['ipv4addrs'][0]['add']
|
||||
break
|
||||
check_remove += each.values()
|
||||
if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
|
||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
del proposed_object['ipv4addrs'][0]['remove']
|
||||
|
||||
res = None
|
||||
modified = not self.compare_objects(current_object, proposed_object)
|
||||
if 'extattrs' in proposed_object:
|
||||
proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
|
||||
|
||||
# Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
|
||||
proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
|
||||
|
||||
if state == 'present':
|
||||
if ref is None:
|
||||
if not self.module.check_mode:
|
||||
self.create_object(ib_obj_type, proposed_object)
|
||||
result['changed'] = True
|
||||
# Check if NIOS_MEMBER and the flag to call function create_token is set
|
||||
elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
|
||||
proposed_object = None
|
||||
# the function creates a token that can be used by a pre-provisioned member to join the grid
|
||||
result['api_results'] = self.call_func('create_token', ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif modified:
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
|
||||
self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
|
||||
|
||||
if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
|
||||
run_update = True
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
|
||||
run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
|
||||
if run_update:
|
||||
res = self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
else:
|
||||
res = ref
|
||||
if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
|
||||
# popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
del proposed_object['view']
|
||||
if not self.module.check_mode:
|
||||
res = self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif 'network_view' in proposed_object:
|
||||
proposed_object.pop('network_view')
|
||||
result['changed'] = True
|
||||
if not self.module.check_mode and res is None:
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
|
||||
elif state == 'absent':
|
||||
if ref is not None:
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
self.check_if_add_remove_ip_arg_exists(proposed_object)
|
||||
self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif not self.module.check_mode:
|
||||
self.delete_object(ref)
|
||||
result['changed'] = True
|
||||
|
||||
return result
|
||||
|
||||
def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
|
||||
''' Send POST request if host record input name and retrieved ref name is same,
|
||||
but input IP and retrieved IP is different'''
|
||||
|
||||
if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
|
||||
obj_host_name = obj_filter['name']
|
||||
ref_host_name = ib_obj_ref[0]['name']
|
||||
if 'ipv4addrs' in (current_object and proposed_object):
|
||||
current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
|
||||
proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
|
||||
elif 'ipv6addrs' in (current_object and proposed_object):
|
||||
current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
|
||||
proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
|
||||
|
||||
if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
|
||||
self.create_object(ib_obj_type, proposed_object)
|
||||
|
||||
def check_if_nios_next_ip_exists(self, proposed_object):
|
||||
''' Check if nios_next_ip argument is passed in ipaddr while creating
|
||||
host record, if yes then format proposed object ipv4addrs and pass
|
||||
func:nextavailableip and ipaddr range to create hostrecord with next
|
||||
available ip in one call to avoid any race condition '''
|
||||
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
elif 'ipv4addr' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
|
||||
return proposed_object
|
||||
|
||||
def check_if_add_remove_ip_arg_exists(self, proposed_object):
|
||||
'''
|
||||
This function shall check if add/remove param is set to true and
|
||||
is passed in the args, then we will update the proposed dictionary
|
||||
to add/remove IP to existing host_record, if the user passes false
|
||||
param with the argument nothing shall be done.
|
||||
:returns: True if param is changed based on add/remove, and also the
|
||||
changed proposed_object.
|
||||
'''
|
||||
update = False
|
||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
||||
if proposed_object['ipv4addrs'][0]['add']:
|
||||
proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs+'][0]['add']
|
||||
update = True
|
||||
else:
|
||||
del proposed_object['ipv4addrs'][0]['add']
|
||||
elif 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
if proposed_object['ipv4addrs'][0]['remove']:
|
||||
proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs-'][0]['remove']
|
||||
update = True
|
||||
else:
|
||||
del proposed_object['ipv4addrs'][0]['remove']
|
||||
return update, proposed_object
|
||||
|
||||
def issubset(self, item, objects):
|
||||
''' Checks if item is a subset of objects
|
||||
:args item: the subset item to validate
|
||||
:args objects: superset list of objects to validate against
|
||||
:returns: True if item is a subset of one entry in objects otherwise
|
||||
this method will return None
|
||||
'''
|
||||
for obj in objects:
|
||||
if isinstance(item, dict):
|
||||
if all(entry in obj.items() for entry in item.items()):
|
||||
return True
|
||||
else:
|
||||
if item in obj:
|
||||
return True
|
||||
|
||||
def compare_objects(self, current_object, proposed_object):
|
||||
for key, proposed_item in iteritems(proposed_object):
|
||||
current_item = current_object.get(key)
|
||||
|
||||
# if proposed has a key that current doesn't then the objects are
|
||||
# not equal and False will be immediately returned
|
||||
if current_item is None:
|
||||
return False
|
||||
|
||||
elif isinstance(proposed_item, list):
|
||||
if key == 'aliases':
|
||||
if set(current_item) != set(proposed_item):
|
||||
return False
|
||||
for subitem in proposed_item:
|
||||
if not self.issubset(subitem, current_item):
|
||||
return False
|
||||
|
||||
elif isinstance(proposed_item, dict):
|
||||
return self.compare_objects(current_item, proposed_item)
|
||||
|
||||
else:
|
||||
if current_item != proposed_item:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
|
||||
''' this function gets the reference object of pre-existing nios objects '''
|
||||
|
||||
update = False
|
||||
old_name = new_name = None
|
||||
if ('name' in obj_filter):
|
||||
# gets and returns the current object based on name/old_name passed
|
||||
try:
|
||||
name_obj = check_type_dict(obj_filter['name'])
|
||||
old_name = name_obj['old_name']
|
||||
new_name = name_obj['new_name']
|
||||
except TypeError:
|
||||
name = obj_filter['name']
|
||||
|
||||
if old_name and new_name:
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
|
||||
elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
|
||||
test_obj_filter = obj_filter
|
||||
else:
|
||||
test_obj_filter = dict([('name', old_name)])
|
||||
# get the object reference
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
if ib_obj:
|
||||
obj_filter['name'] = new_name
|
||||
else:
|
||||
test_obj_filter['name'] = new_name
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
update = True
|
||||
return ib_obj, update, new_name
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
# to check only by name if dns bypassing is set
|
||||
if not obj_filter['configure_for_dns']:
|
||||
test_obj_filter = dict([('name', name)])
|
||||
else:
|
||||
test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
|
||||
elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
|
||||
test_obj_filter = dict([['mac', obj_filter['mac']]])
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where a_record with uppercase name was returning null and was failing
|
||||
test_obj_filter = obj_filter
|
||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
# check if test_obj_filter is empty copy passed obj_filter
|
||||
else:
|
||||
test_obj_filter = obj_filter
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_ZONE):
|
||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
||||
temp = ib_spec['restart_if_needed']
|
||||
del ib_spec['restart_if_needed']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
||||
if not ib_obj:
|
||||
ib_spec['restart_if_needed'] = temp
|
||||
elif (ib_obj_type == NIOS_MEMBER):
|
||||
# del key 'create_token' as nios_member get_object fails with the key present
|
||||
temp = ib_spec['create_token']
|
||||
del ib_spec['create_token']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
if temp:
|
||||
# reinstate 'create_token' key
|
||||
ib_spec['create_token'] = temp
|
||||
else:
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
return ib_obj, update, new_name
|
||||
|
||||
def on_update(self, proposed_object, ib_spec):
|
||||
''' Event called before the update is sent to the API endpoing
|
||||
This method will allow the final proposed object to be changed
|
||||
and/or keys filtered before it is sent to the API endpoint to
|
||||
be processed.
|
||||
:args proposed_object: A dict item that will be encoded and sent
|
||||
the API endpoint with the updated data structure
|
||||
:returns: updated object to be sent to API endpoint
|
||||
'''
|
||||
keys = set()
|
||||
for key, value in iteritems(proposed_object):
|
||||
update = ib_spec[key].get('update', True)
|
||||
if not update:
|
||||
keys.add(key)
|
||||
return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
|
||||
@@ -68,9 +68,6 @@ def ansible_to_proxmox_bool(value):
|
||||
class ProxmoxAnsible(object):
|
||||
"""Base class for Proxmox modules"""
|
||||
def __init__(self, module):
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
self.module = module
|
||||
self.proxmox_api = self._connect()
|
||||
# Test token validity
|
||||
|
||||
@@ -1834,16 +1834,12 @@ class RedfishUtils(object):
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
# Checking if fans are present
|
||||
if u'Fans' in data:
|
||||
for device in data[u'Fans']:
|
||||
fan = {}
|
||||
for property in properties:
|
||||
if property in device:
|
||||
fan[property] = device[property]
|
||||
fan_results.append(fan)
|
||||
else:
|
||||
return {'ret': False, 'msg': "No Fans present"}
|
||||
for device in data[u'Fans']:
|
||||
fan = {}
|
||||
for property in properties:
|
||||
if property in device:
|
||||
fan[property] = device[property]
|
||||
fan_results.append(fan)
|
||||
result["entries"] = fan_results
|
||||
return result
|
||||
|
||||
@@ -2033,28 +2029,15 @@ class RedfishUtils(object):
|
||||
def get_multi_memory_inventory(self):
|
||||
return self.aggregate_systems(self.get_memory_inventory)
|
||||
|
||||
def get_nic(self, resource_uri):
|
||||
result = {}
|
||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
nic = {}
|
||||
for property in properties:
|
||||
if property in data:
|
||||
nic[property] = data[property]
|
||||
result['entries'] = nic
|
||||
return(result)
|
||||
|
||||
def get_nic_inventory(self, resource_uri):
|
||||
result = {}
|
||||
nic_list = []
|
||||
nic_results = []
|
||||
key = "EthernetInterfaces"
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
@@ -2078,9 +2061,18 @@ class RedfishUtils(object):
|
||||
nic_list.append(nic[u'@odata.id'])
|
||||
|
||||
for n in nic_list:
|
||||
nic = self.get_nic(n)
|
||||
if nic['ret']:
|
||||
nic_results.append(nic['entries'])
|
||||
nic = {}
|
||||
uri = self.root_uri + n
|
||||
response = self.get_request(uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
for property in properties:
|
||||
if property in data:
|
||||
nic[property] = data[property]
|
||||
|
||||
nic_results.append(nic)
|
||||
result["entries"] = nic_results
|
||||
return result
|
||||
|
||||
@@ -2705,14 +2697,39 @@ class RedfishUtils(object):
|
||||
return self.aggregate_managers(self.get_manager_health_report)
|
||||
|
||||
def set_manager_nic(self, nic_addr, nic_config):
|
||||
# Get the manager ethernet interface uri
|
||||
nic_info = self.get_manager_ethernet_uri(nic_addr)
|
||||
# Get EthernetInterface collection
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'EthernetInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
||||
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
||||
a.get('@odata.id')]
|
||||
|
||||
if nic_info.get('nic_addr') is None:
|
||||
return nic_info
|
||||
else:
|
||||
target_ethernet_uri = nic_info['nic_addr']
|
||||
target_ethernet_current_setting = nic_info['ethernet_setting']
|
||||
# Find target EthernetInterface
|
||||
target_ethernet_uri = None
|
||||
target_ethernet_current_setting = None
|
||||
if nic_addr == 'null':
|
||||
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
||||
nic_addr = (self.root_uri).split('/')[-1]
|
||||
nic_addr = nic_addr.split(':')[0] # split port if existing
|
||||
for uri in uris:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if '"' + nic_addr.lower() + '"' in str(data).lower() or "'" + nic_addr.lower() + "'" in str(data).lower():
|
||||
target_ethernet_uri = uri
|
||||
target_ethernet_current_setting = data
|
||||
break
|
||||
if target_ethernet_uri is None:
|
||||
return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
|
||||
|
||||
# Convert input to payload and check validity
|
||||
payload = {}
|
||||
@@ -2775,208 +2792,3 @@ class RedfishUtils(object):
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
|
||||
|
||||
# A helper function to get the EthernetInterface URI
|
||||
def get_manager_ethernet_uri(self, nic_addr='null'):
|
||||
# Get EthernetInterface collection
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'EthernetInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
||||
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
||||
a.get('@odata.id')]
|
||||
|
||||
# Find target EthernetInterface
|
||||
target_ethernet_uri = None
|
||||
target_ethernet_current_setting = None
|
||||
if nic_addr == 'null':
|
||||
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
||||
nic_addr = (self.root_uri).split('/')[-1]
|
||||
nic_addr = nic_addr.split(':')[0] # split port if existing
|
||||
for uri in uris:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
data_string = json.dumps(data)
|
||||
if nic_addr.lower() in data_string.lower():
|
||||
target_ethernet_uri = uri
|
||||
target_ethernet_current_setting = data
|
||||
break
|
||||
|
||||
nic_info = {}
|
||||
nic_info['nic_addr'] = target_ethernet_uri
|
||||
nic_info['ethernet_setting'] = target_ethernet_current_setting
|
||||
|
||||
if target_ethernet_uri is None:
|
||||
return {}
|
||||
else:
|
||||
return nic_info
|
||||
|
||||
def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None):
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'HostInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "HostInterfaces resource not found"}
|
||||
|
||||
hostinterfaces_uri = data["HostInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')]
|
||||
# Capture list of URIs that match a specified HostInterface resource ID
|
||||
if hostinterface_id:
|
||||
matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]]
|
||||
|
||||
if hostinterface_id and matching_hostinterface_uris:
|
||||
hostinterface_uri = list.pop(matching_hostinterface_uris)
|
||||
elif hostinterface_id and not matching_hostinterface_uris:
|
||||
return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id}
|
||||
elif len(uris) == 1:
|
||||
hostinterface_uri = list.pop(uris)
|
||||
else:
|
||||
return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."}
|
||||
|
||||
response = self.get_request(self.root_uri + hostinterface_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
current_hostinterface_config = response['data']
|
||||
payload = {}
|
||||
for property in hostinterface_config.keys():
|
||||
value = hostinterface_config[property]
|
||||
if property not in current_hostinterface_config:
|
||||
return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property}
|
||||
if isinstance(value, dict):
|
||||
if isinstance(current_hostinterface_config[property], dict):
|
||||
payload[property] = value
|
||||
elif isinstance(current_hostinterface_config[property], list):
|
||||
payload[property] = list()
|
||||
payload[property].append(value)
|
||||
else:
|
||||
return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property}
|
||||
else:
|
||||
payload[property] = value
|
||||
|
||||
need_change = False
|
||||
for property in payload.keys():
|
||||
set_value = payload[property]
|
||||
cur_value = current_hostinterface_config[property]
|
||||
if not isinstance(set_value, dict) and not isinstance(set_value, list):
|
||||
if set_value != cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, dict):
|
||||
for subprop in payload[property].keys():
|
||||
if subprop not in current_hostinterface_config[property]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][subprop]
|
||||
sub_cur_value = current_hostinterface_config[property][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in current_hostinterface_config[property][i]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][i][subprop]
|
||||
sub_cur_value = current_hostinterface_config[property][i][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if not need_change:
|
||||
return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"}
|
||||
|
||||
response = self.patch_request(self.root_uri + hostinterface_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"}
|
||||
|
||||
def get_hostinterfaces(self):
|
||||
result = {}
|
||||
hostinterface_results = []
|
||||
properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status',
|
||||
'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes',
|
||||
'AuthNoneRoleId', 'CredentialBootstrapping']
|
||||
manager_uri_list = self.manager_uris
|
||||
for manager_uri in manager_uri_list:
|
||||
response = self.get_request(self.root_uri + manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if 'HostInterfaces' in data:
|
||||
hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id']
|
||||
else:
|
||||
continue
|
||||
|
||||
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||
data = response['data']
|
||||
|
||||
if 'Members' in data:
|
||||
for hostinterface in data['Members']:
|
||||
hostinterface_uri = hostinterface['@odata.id']
|
||||
hostinterface_response = self.get_request(self.root_uri + hostinterface_uri)
|
||||
# dictionary for capturing individual HostInterface properties
|
||||
hostinterface_data_temp = {}
|
||||
if hostinterface_response['ret'] is False:
|
||||
return hostinterface_response
|
||||
hostinterface_data = hostinterface_response['data']
|
||||
for property in properties:
|
||||
if property in hostinterface_data:
|
||||
if hostinterface_data[property] is not None:
|
||||
hostinterface_data_temp[property] = hostinterface_data[property]
|
||||
# Check for the presence of a ManagerEthernetInterface
|
||||
# object, a link to a _single_ EthernetInterface that the
|
||||
# BMC uses to communicate with the host.
|
||||
if 'ManagerEthernetInterface' in hostinterface_data:
|
||||
interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id']
|
||||
interface_response = self.get_nic(interface_uri)
|
||||
if interface_response['ret'] is False:
|
||||
return interface_response
|
||||
hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries']
|
||||
|
||||
# Check for the presence of a HostEthernetInterfaces
|
||||
# object, a link to a _collection_ of EthernetInterfaces
|
||||
# that the host uses to communicate with the BMC.
|
||||
if 'HostEthernetInterfaces' in hostinterface_data:
|
||||
interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id']
|
||||
interfaces_response = self.get_request(self.root_uri + interfaces_uri)
|
||||
if interfaces_response['ret'] is False:
|
||||
return interfaces_response
|
||||
interfaces_data = interfaces_response['data']
|
||||
if 'Members' in interfaces_data:
|
||||
for interface in interfaces_data['Members']:
|
||||
interface_uri = interface['@odata.id']
|
||||
interface_response = self.get_nic(interface_uri)
|
||||
if interface_response['ret'] is False:
|
||||
return interface_response
|
||||
# Check if this is the first
|
||||
# HostEthernetInterfaces item and create empty
|
||||
# list if so.
|
||||
if 'HostEthernetInterfaces' not in hostinterface_data_temp:
|
||||
hostinterface_data_temp['HostEthernetInterfaces'] = []
|
||||
|
||||
hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries'])
|
||||
|
||||
hostinterface_results.append(hostinterface_data_temp)
|
||||
else:
|
||||
continue
|
||||
result["entries"] = hostinterface_results
|
||||
if not result["entries"]:
|
||||
return {'ret': False, 'msg': "No HostInterface objects found"}
|
||||
return result
|
||||
|
||||
@@ -15,6 +15,13 @@ from ansible.module_utils.urls import fetch_url, basic_auth_header
|
||||
class BitbucketHelper:
|
||||
BITBUCKET_API_URL = 'https://api.bitbucket.org'
|
||||
|
||||
error_messages = {
|
||||
'required_client_id': '`client_id` must be specified as a parameter or '
|
||||
'BITBUCKET_CLIENT_ID environment variable',
|
||||
'required_client_secret': '`client_secret` must be specified as a parameter or '
|
||||
'BITBUCKET_CLIENT_SECRET environment variable',
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.access_token = None
|
||||
@@ -22,40 +29,35 @@ class BitbucketHelper:
|
||||
@staticmethod
|
||||
def bitbucket_argument_spec():
|
||||
return dict(
|
||||
client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
|
||||
client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
|
||||
client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
|
||||
# TODO:
|
||||
# - Rename user to username once current usage of username is removed
|
||||
# - Alias user to username and deprecate it
|
||||
user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
|
||||
password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def bitbucket_required_one_of():
|
||||
return [['client_id', 'client_secret', 'user', 'password']]
|
||||
def check_arguments(self):
|
||||
if self.module.params['client_id'] is None:
|
||||
self.module.fail_json(msg=self.error_messages['required_client_id'])
|
||||
|
||||
@staticmethod
|
||||
def bitbucket_required_together():
|
||||
return [['client_id', 'client_secret'], ['user', 'password']]
|
||||
if self.module.params['client_secret'] is None:
|
||||
self.module.fail_json(msg=self.error_messages['required_client_secret'])
|
||||
|
||||
def fetch_access_token(self):
|
||||
if self.module.params['client_id'] and self.module.params['client_secret']:
|
||||
headers = {
|
||||
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
|
||||
}
|
||||
self.check_arguments()
|
||||
|
||||
info, content = self.request(
|
||||
api_url='https://bitbucket.org/site/oauth2/access_token',
|
||||
method='POST',
|
||||
data='grant_type=client_credentials',
|
||||
headers=headers,
|
||||
)
|
||||
headers = {
|
||||
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret'])
|
||||
}
|
||||
|
||||
if info['status'] == 200:
|
||||
self.access_token = content['access_token']
|
||||
else:
|
||||
self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
|
||||
info, content = self.request(
|
||||
api_url='https://bitbucket.org/site/oauth2/access_token',
|
||||
method='POST',
|
||||
data='grant_type=client_credentials',
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
if info['status'] == 200:
|
||||
self.access_token = content['access_token']
|
||||
else:
|
||||
self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
|
||||
|
||||
def request(self, api_url, method, data=None, headers=None):
|
||||
headers = headers or {}
|
||||
@@ -64,10 +66,6 @@ class BitbucketHelper:
|
||||
headers.update({
|
||||
'Authorization': 'Bearer {0}'.format(self.access_token),
|
||||
})
|
||||
elif self.module.params['user'] and self.module.params['password']:
|
||||
headers.update({
|
||||
'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
|
||||
})
|
||||
|
||||
if isinstance(data, dict):
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
packaging/language/cargo.py
|
||||
@@ -422,7 +422,6 @@ import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
import shlex
|
||||
|
||||
try:
|
||||
import lxc
|
||||
@@ -662,8 +661,9 @@ class LxcContainerManagement(object):
|
||||
"""
|
||||
|
||||
for key, value in variables_dict.items():
|
||||
build_command.append(str(key))
|
||||
build_command.append(str(value))
|
||||
build_command.append(
|
||||
'%s %s' % (key, value)
|
||||
)
|
||||
return build_command
|
||||
|
||||
def _get_vars(self, variables):
|
||||
@@ -686,6 +686,24 @@ class LxcContainerManagement(object):
|
||||
return_dict[v] = _var
|
||||
return return_dict
|
||||
|
||||
def _run_command(self, build_command, unsafe_shell=False):
|
||||
"""Return information from running an Ansible Command.
|
||||
|
||||
This will squash the build command list into a string and then
|
||||
execute the command via Ansible. The output is returned to the method.
|
||||
This output is returned as `return_code`, `stdout`, `stderr`.
|
||||
|
||||
:param build_command: Used for the command and all options.
|
||||
:type build_command: ``list``
|
||||
:param unsafe_shell: Enable or Disable unsafe sell commands.
|
||||
:type unsafe_shell: ``bol``
|
||||
"""
|
||||
|
||||
return self.module.run_command(
|
||||
' '.join(build_command),
|
||||
use_unsafe_shell=unsafe_shell
|
||||
)
|
||||
|
||||
def _config(self):
|
||||
"""Configure an LXC container.
|
||||
|
||||
@@ -792,7 +810,7 @@ class LxcContainerManagement(object):
|
||||
elif self.module.params.get('backing_store') == 'overlayfs':
|
||||
build_command.append('--snapshot')
|
||||
|
||||
rc, return_data, err = self.module.run_command(build_command)
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
message = "Failed executing %s." % os.path.basename(clone_cmd)
|
||||
self.failure(
|
||||
@@ -825,7 +843,7 @@ class LxcContainerManagement(object):
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('lxc-create', True),
|
||||
'--name', self.container_name,
|
||||
'--name %s' % self.container_name,
|
||||
'--quiet'
|
||||
]
|
||||
|
||||
@@ -851,12 +869,10 @@ class LxcContainerManagement(object):
|
||||
log_path = os.getenv('HOME')
|
||||
|
||||
build_command.extend([
|
||||
'--logfile',
|
||||
os.path.join(
|
||||
'--logfile %s' % os.path.join(
|
||||
log_path, 'lxc-%s.log' % self.container_name
|
||||
),
|
||||
'--logpriority',
|
||||
self.module.params.get(
|
||||
'--logpriority %s' % self.module.params.get(
|
||||
'container_log_level'
|
||||
).upper()
|
||||
])
|
||||
@@ -864,10 +880,9 @@ class LxcContainerManagement(object):
|
||||
# Add the template commands to the end of the command if there are any
|
||||
template_options = self.module.params.get('template_options', None)
|
||||
if template_options:
|
||||
build_command.append('--')
|
||||
build_command += shlex.split(template_options)
|
||||
build_command.append('-- %s' % template_options)
|
||||
|
||||
rc, return_data, err = self.module.run_command(build_command)
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
message = "Failed executing lxc-create."
|
||||
self.failure(
|
||||
@@ -1171,7 +1186,7 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('lxc-config', True),
|
||||
"lxc.bdev.lvm.vg"
|
||||
]
|
||||
rc, vg, err = self.module.run_command(build_command)
|
||||
rc, vg, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1189,7 +1204,7 @@ class LxcContainerManagement(object):
|
||||
build_command = [
|
||||
self.module.get_bin_path('lvs', True)
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1216,7 +1231,7 @@ class LxcContainerManagement(object):
|
||||
'--units',
|
||||
'g'
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1247,7 +1262,7 @@ class LxcContainerManagement(object):
|
||||
'--units',
|
||||
'g'
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1296,7 +1311,7 @@ class LxcContainerManagement(object):
|
||||
os.path.join(vg, source_lv),
|
||||
"-L%sg" % snapshot_size_gb
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1321,7 +1336,7 @@ class LxcContainerManagement(object):
|
||||
"/dev/%s/%s" % (vg, lv_name),
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1365,8 +1380,9 @@ class LxcContainerManagement(object):
|
||||
'.'
|
||||
]
|
||||
|
||||
rc, stdout, err = self.module.run_command(
|
||||
build_command
|
||||
rc, stdout, err = self._run_command(
|
||||
build_command=build_command,
|
||||
unsafe_shell=True
|
||||
)
|
||||
|
||||
os.umask(old_umask)
|
||||
@@ -1394,7 +1410,7 @@ class LxcContainerManagement(object):
|
||||
"-f",
|
||||
"%s/%s" % (vg, lv_name),
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1426,10 +1442,11 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('rsync', True),
|
||||
'-aHAX',
|
||||
fs_path,
|
||||
temp_dir,
|
||||
temp_dir
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(
|
||||
rc, stdout, err = self._run_command(
|
||||
build_command,
|
||||
unsafe_shell=True
|
||||
)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
@@ -1450,7 +1467,7 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('umount', True),
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1472,12 +1489,12 @@ class LxcContainerManagement(object):
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('mount', True),
|
||||
'-t', 'overlayfs',
|
||||
'-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||
'-t overlayfs',
|
||||
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||
'overlayfs',
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
|
||||
@@ -11,28 +11,29 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lxd_container
|
||||
short_description: Manage LXD instances
|
||||
short_description: Manage LXD Containers
|
||||
description:
|
||||
- Management of LXD containers and virtual machines.
|
||||
- Management of LXD containers
|
||||
author: "Hiroaki Nakamura (@hnakamur)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of an instance.
|
||||
- Name of a container.
|
||||
type: str
|
||||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- 'The architecture for the instance (for example C(x86_64) or C(i686)).
|
||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the instance (for example C({"limits.cpu": "2"})).
|
||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
- If the instance already exists and its "config" values in metadata
|
||||
obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
|
||||
- If the container already exists and its "config" values in metadata
|
||||
obtained from GET /1.0/containers/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
||||
are different, this module tries to apply the configurations.
|
||||
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
||||
type: dict
|
||||
@@ -42,32 +43,33 @@ options:
|
||||
- If set to C(true), options starting with C(volatile.) are ignored. As a result,
|
||||
they are reapplied for each execution.
|
||||
- This default behavior can be changed by setting this option to C(false).
|
||||
- The current default value C(true) is deprecated since community.general 4.0.0,
|
||||
and will change to C(false) in community.general 6.0.0.
|
||||
- The default value C(true) will be deprecated in community.general 4.0.0,
|
||||
and will change to C(false) in community.general 5.0.0.
|
||||
type: bool
|
||||
default: true
|
||||
required: false
|
||||
version_added: 3.7.0
|
||||
profiles:
|
||||
description:
|
||||
- Profile to be used by the instance.
|
||||
- Profile to be used by the container.
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the instance
|
||||
- 'The devices for the container
|
||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the instance is ephemeral (for example C(true) or C(false)).
|
||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
description:
|
||||
- 'The source for the instance
|
||||
- 'The source for the container
|
||||
(e.g. { "type": "image",
|
||||
"mode": "pull",
|
||||
"server": "https://images.linuxcontainers.org",
|
||||
@@ -85,56 +87,39 @@ options:
|
||||
- absent
|
||||
- frozen
|
||||
description:
|
||||
- Define the state of an instance.
|
||||
- Define the state of a container.
|
||||
required: false
|
||||
default: started
|
||||
type: str
|
||||
target:
|
||||
description:
|
||||
- For cluster deployments. Will attempt to create an instance on a target node.
|
||||
If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
|
||||
- For cluster deployments. Will attempt to create a container on a target node.
|
||||
If container exists elsewhere in a cluster, then container will not be replaced or moved.
|
||||
The name should respond to same name of the node you see in C(lxc cluster list).
|
||||
type: str
|
||||
required: false
|
||||
version_added: 1.0.0
|
||||
timeout:
|
||||
description:
|
||||
- A timeout for changing the state of the instance.
|
||||
- A timeout for changing the state of the container.
|
||||
- This is also used as a timeout for waiting until IPv4 addresses
|
||||
are set to the all network interfaces in the instance after
|
||||
are set to the all network interfaces in the container after
|
||||
starting or restarting.
|
||||
required: false
|
||||
default: 30
|
||||
type: int
|
||||
type:
|
||||
description:
|
||||
- Instance type can be either C(virtual-machine) or C(container).
|
||||
required: false
|
||||
default: container
|
||||
choices:
|
||||
- container
|
||||
- virtual-machine
|
||||
type: str
|
||||
version_added: 4.1.0
|
||||
wait_for_ipv4_addresses:
|
||||
description:
|
||||
- If this is true, the C(lxd_container) waits until IPv4 addresses
|
||||
are set to the all network interfaces in the instance after
|
||||
are set to the all network interfaces in the container after
|
||||
starting or restarting.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
wait_for_container:
|
||||
description:
|
||||
- If set to C(true), the tasks will wait till the task reports a
|
||||
success status when performing container operations.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 4.4.0
|
||||
force_stop:
|
||||
description:
|
||||
- If this is true, the C(lxd_container) forces to stop the instance
|
||||
when it stops or restarts the instance.
|
||||
- If this is true, the C(lxd_container) forces to stop the container
|
||||
when it stops or restarts the container.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
@@ -176,18 +161,18 @@ options:
|
||||
required: false
|
||||
type: str
|
||||
notes:
|
||||
- Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
with a name that already existed in the users namespace the module will
|
||||
simply return as "unchanged".
|
||||
- There are two ways to run commands inside a container or virtual machine, using the command
|
||||
- There are two ways to run commands in containers, using the command
|
||||
module or using the ansible lxd connection plugin bundled in Ansible >=
|
||||
2.1, the later requires python to be installed in the instance which can
|
||||
2.1, the later requires python to be installed in the container which can
|
||||
be done with the command module.
|
||||
- You can copy a file from the host to the instance
|
||||
- You can copy a file from the host to the container
|
||||
with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
|
||||
See the example below.
|
||||
- You can copy a file in the created instance to the localhost
|
||||
with `command=lxc file pull instance_name/dir/filename filename`.
|
||||
- You can copy a file in the created container to the localhost
|
||||
with `command=lxc file pull container_name/dir/filename filename`.
|
||||
See the first example below.
|
||||
'''
|
||||
|
||||
@@ -256,7 +241,6 @@ EXAMPLES = '''
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
state: absent
|
||||
type: container
|
||||
|
||||
# An example for restarting a container
|
||||
- hosts: localhost
|
||||
@@ -266,7 +250,6 @@ EXAMPLES = '''
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
state: restarted
|
||||
type: container
|
||||
|
||||
# An example for restarting a container using https to connect to the LXD server
|
||||
- hosts: localhost
|
||||
@@ -324,36 +307,16 @@ EXAMPLES = '''
|
||||
mode: pull
|
||||
alias: ubuntu/xenial/amd64
|
||||
target: node02
|
||||
|
||||
# An example for creating a virtual machine
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create container on another node
|
||||
community.general.lxd_container:
|
||||
name: new-vm-1
|
||||
type: virtual-machine
|
||||
state: started
|
||||
ignore_volatile_options: true
|
||||
wait_for_ipv4_addresses: true
|
||||
profiles: ["default"]
|
||||
source:
|
||||
protocol: simplestreams
|
||||
type: image
|
||||
mode: pull
|
||||
server: https://images.linuxcontainers.org
|
||||
alias: debian/11
|
||||
timeout: 600
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
addresses:
|
||||
description: Mapping from the network device name to a list of IPv4 addresses in the instance.
|
||||
description: Mapping from the network device name to a list of IPv4 addresses in the container
|
||||
returned: when state is started or restarted
|
||||
type: dict
|
||||
sample: {"eth0": ["10.155.92.191"]}
|
||||
old_state:
|
||||
description: The old state of the instance.
|
||||
description: The old state of the container
|
||||
returned: when state is started or restarted
|
||||
type: str
|
||||
sample: "stopped"
|
||||
@@ -363,7 +326,7 @@ logs:
|
||||
type: list
|
||||
sample: "(too long to be placed here)"
|
||||
actions:
|
||||
description: List of actions performed for the instance.
|
||||
description: List of actions performed for the container.
|
||||
returned: success
|
||||
type: list
|
||||
sample: '["create", "start"]'
|
||||
@@ -421,16 +384,6 @@ class LXDContainerManagement(object):
|
||||
self.force_stop = self.module.params['force_stop']
|
||||
self.addresses = None
|
||||
self.target = self.module.params['target']
|
||||
self.wait_for_container = self.module.params['wait_for_container']
|
||||
|
||||
self.type = self.module.params['type']
|
||||
|
||||
# LXD Rest API provides additional endpoints for creating containers and virtual-machines.
|
||||
self.api_endpoint = None
|
||||
if self.type == 'container':
|
||||
self.api_endpoint = '/1.0/containers'
|
||||
elif self.type == 'virtual-machine':
|
||||
self.api_endpoint = '/1.0/virtual-machines'
|
||||
|
||||
self.key_file = self.module.params.get('client_key')
|
||||
if self.key_file is None:
|
||||
@@ -467,20 +420,20 @@ class LXDContainerManagement(object):
|
||||
if param_val is not None:
|
||||
self.config[attr] = param_val
|
||||
|
||||
def _get_instance_json(self):
|
||||
def _get_container_json(self):
|
||||
return self.client.do(
|
||||
'GET', '{0}/{1}'.format(self.api_endpoint, self.name),
|
||||
'GET', '/1.0/containers/{0}'.format(self.name),
|
||||
ok_error_codes=[404]
|
||||
)
|
||||
|
||||
def _get_instance_state_json(self):
|
||||
def _get_container_state_json(self):
|
||||
return self.client.do(
|
||||
'GET', '{0}/{1}/state'.format(self.api_endpoint, self.name),
|
||||
'GET', '/1.0/containers/{0}/state'.format(self.name),
|
||||
ok_error_codes=[404]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _instance_json_to_module_state(resp_json):
|
||||
def _container_json_to_module_state(resp_json):
|
||||
if resp_json['type'] == 'error':
|
||||
return 'absent'
|
||||
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
||||
@@ -489,45 +442,45 @@ class LXDContainerManagement(object):
|
||||
body_json = {'action': action, 'timeout': self.timeout}
|
||||
if force_stop:
|
||||
body_json['force'] = True
|
||||
return self.client.do('PUT', '{0}/{1}/state'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
|
||||
|
||||
def _create_instance(self):
|
||||
def _create_container(self):
|
||||
config = self.config.copy()
|
||||
config['name'] = self.name
|
||||
if self.target:
|
||||
self.client.do('POST', '{0}?{1}'.format(self.api_endpoint, urlencode(dict(target=self.target))), config, wait_for_container=self.wait_for_container)
|
||||
self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
|
||||
else:
|
||||
self.client.do('POST', self.api_endpoint, config, wait_for_container=self.wait_for_container)
|
||||
self.client.do('POST', '/1.0/containers', config)
|
||||
self.actions.append('create')
|
||||
|
||||
def _start_instance(self):
|
||||
def _start_container(self):
|
||||
self._change_state('start')
|
||||
self.actions.append('start')
|
||||
|
||||
def _stop_instance(self):
|
||||
def _stop_container(self):
|
||||
self._change_state('stop', self.force_stop)
|
||||
self.actions.append('stop')
|
||||
|
||||
def _restart_instance(self):
|
||||
def _restart_container(self):
|
||||
self._change_state('restart', self.force_stop)
|
||||
self.actions.append('restart')
|
||||
|
||||
def _delete_instance(self):
|
||||
self.client.do('DELETE', '{0}/{1}'.format(self.api_endpoint, self.name))
|
||||
def _delete_container(self):
|
||||
self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
|
||||
self.actions.append('delete')
|
||||
|
||||
def _freeze_instance(self):
|
||||
def _freeze_container(self):
|
||||
self._change_state('freeze')
|
||||
self.actions.append('freeze')
|
||||
|
||||
def _unfreeze_instance(self):
|
||||
def _unfreeze_container(self):
|
||||
self._change_state('unfreeze')
|
||||
self.actions.append('unfreez')
|
||||
|
||||
def _instance_ipv4_addresses(self, ignore_devices=None):
|
||||
def _container_ipv4_addresses(self, ignore_devices=None):
|
||||
ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
|
||||
|
||||
resp_json = self._get_instance_state_json()
|
||||
resp_json = self._get_container_state_json()
|
||||
network = resp_json['metadata']['network'] or {}
|
||||
network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
|
||||
addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
|
||||
@@ -542,7 +495,7 @@ class LXDContainerManagement(object):
|
||||
due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
|
||||
while datetime.datetime.now() < due:
|
||||
time.sleep(1)
|
||||
addresses = self._instance_ipv4_addresses()
|
||||
addresses = self._container_ipv4_addresses()
|
||||
if self._has_all_ipv4_addresses(addresses):
|
||||
self.addresses = addresses
|
||||
return
|
||||
@@ -552,72 +505,72 @@ class LXDContainerManagement(object):
|
||||
|
||||
def _started(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_instance()
|
||||
self._unfreeze_container()
|
||||
elif self.old_state == 'stopped':
|
||||
self._start_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._start_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
if self.wait_for_ipv4_addresses:
|
||||
self._get_addresses()
|
||||
|
||||
def _stopped(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_instance()
|
||||
self._create_container()
|
||||
else:
|
||||
if self.old_state == 'stopped':
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._start_instance()
|
||||
self._apply_instance_configs()
|
||||
self._stop_instance()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._start_container()
|
||||
self._apply_container_configs()
|
||||
self._stop_container()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._stop_instance()
|
||||
self._unfreeze_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._stop_container()
|
||||
|
||||
def _restarted(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._restart_instance()
|
||||
self._unfreeze_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._restart_container()
|
||||
if self.wait_for_ipv4_addresses:
|
||||
self._get_addresses()
|
||||
|
||||
def _destroyed(self):
|
||||
if self.old_state != 'absent':
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_instance()
|
||||
self._unfreeze_container()
|
||||
if self.old_state != 'stopped':
|
||||
self._stop_instance()
|
||||
self._delete_instance()
|
||||
self._stop_container()
|
||||
self._delete_container()
|
||||
|
||||
def _frozen(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
self._freeze_instance()
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
self._freeze_container()
|
||||
else:
|
||||
if self.old_state == 'stopped':
|
||||
self._start_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._freeze_instance()
|
||||
self._start_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._freeze_container()
|
||||
|
||||
def _needs_to_change_instance_config(self, key):
|
||||
def _needs_to_change_container_config(self, key):
|
||||
if key not in self.config:
|
||||
return False
|
||||
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
||||
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
@@ -625,7 +578,7 @@ class LXDContainerManagement(object):
|
||||
return True
|
||||
return False
|
||||
elif key == 'config': # next default behavior
|
||||
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items())
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items())
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
@@ -633,41 +586,39 @@ class LXDContainerManagement(object):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
old_configs = self.old_instance_json['metadata'][key]
|
||||
old_configs = self.old_container_json['metadata'][key]
|
||||
return self.config[key] != old_configs
|
||||
|
||||
def _needs_to_apply_instance_configs(self):
|
||||
def _needs_to_apply_container_configs(self):
|
||||
return (
|
||||
self._needs_to_change_instance_config('architecture') or
|
||||
self._needs_to_change_instance_config('config') or
|
||||
self._needs_to_change_instance_config('ephemeral') or
|
||||
self._needs_to_change_instance_config('devices') or
|
||||
self._needs_to_change_instance_config('profiles')
|
||||
self._needs_to_change_container_config('architecture') or
|
||||
self._needs_to_change_container_config('config') or
|
||||
self._needs_to_change_container_config('ephemeral') or
|
||||
self._needs_to_change_container_config('devices') or
|
||||
self._needs_to_change_container_config('profiles')
|
||||
)
|
||||
|
||||
def _apply_instance_configs(self):
|
||||
old_metadata = self.old_instance_json['metadata']
|
||||
def _apply_container_configs(self):
|
||||
old_metadata = self.old_container_json['metadata']
|
||||
body_json = {
|
||||
'architecture': old_metadata['architecture'],
|
||||
'config': old_metadata['config'],
|
||||
'devices': old_metadata['devices'],
|
||||
'profiles': old_metadata['profiles']
|
||||
}
|
||||
|
||||
if self._needs_to_change_instance_config('architecture'):
|
||||
if self._needs_to_change_container_config('architecture'):
|
||||
body_json['architecture'] = self.config['architecture']
|
||||
if self._needs_to_change_instance_config('config'):
|
||||
if self._needs_to_change_container_config('config'):
|
||||
for k, v in self.config['config'].items():
|
||||
body_json['config'][k] = v
|
||||
if self._needs_to_change_instance_config('ephemeral'):
|
||||
if self._needs_to_change_container_config('ephemeral'):
|
||||
body_json['ephemeral'] = self.config['ephemeral']
|
||||
if self._needs_to_change_instance_config('devices'):
|
||||
if self._needs_to_change_container_config('devices'):
|
||||
body_json['devices'] = self.config['devices']
|
||||
if self._needs_to_change_instance_config('profiles'):
|
||||
if self._needs_to_change_container_config('profiles'):
|
||||
body_json['profiles'] = self.config['profiles']
|
||||
|
||||
self.client.do('PUT', '{0}/{1}'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||
self.actions.append('apply_instance_configs')
|
||||
self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
|
||||
self.actions.append('apply_container_configs')
|
||||
|
||||
def run(self):
|
||||
"""Run the main method."""
|
||||
@@ -677,8 +628,8 @@ class LXDContainerManagement(object):
|
||||
self.client.authenticate(self.trust_password)
|
||||
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
||||
|
||||
self.old_instance_json = self._get_instance_json()
|
||||
self.old_state = self._instance_json_to_module_state(self.old_instance_json)
|
||||
self.old_container_json = self._get_container_json()
|
||||
self.old_state = self._container_json_to_module_state(self.old_container_json)
|
||||
action = getattr(self, LXD_ANSIBLE_STATES[self.state])
|
||||
action()
|
||||
|
||||
@@ -723,6 +674,7 @@ def main():
|
||||
),
|
||||
ignore_volatile_options=dict(
|
||||
type='bool',
|
||||
default=True
|
||||
),
|
||||
devices=dict(
|
||||
type='dict',
|
||||
@@ -748,15 +700,6 @@ def main():
|
||||
type='int',
|
||||
default=30
|
||||
),
|
||||
type=dict(
|
||||
type='str',
|
||||
default='container',
|
||||
choices=['container', 'virtual-machine'],
|
||||
),
|
||||
wait_for_container=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
wait_for_ipv4_addresses=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
@@ -785,17 +728,13 @@ def main():
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
if module.params['ignore_volatile_options'] is None:
|
||||
module.params['ignore_volatile_options'] = True
|
||||
module.deprecate(
|
||||
'If the keyword "volatile" is used in a playbook in the config'
|
||||
'section, a "changed" message will appear with every run, even without a change'
|
||||
'to the playbook.'
|
||||
'This will change in the future. Please test your scripts'
|
||||
'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
|
||||
# if module.params['ignore_volatile_options'] is None:
|
||||
# module.params['ignore_volatile_options'] = True
|
||||
# module.deprecate(
|
||||
# 'If the keyword "volatile" is used in a playbook in the config section, a
|
||||
# "changed" message will appear with every run, even without a change to the playbook.
|
||||
# This will change in the future.
|
||||
# Please test your scripts by "ignore_volatile_options: false"', version='5.0.0', collection_name='community.general')
|
||||
lxd_manage = LXDContainerManagement(module=module)
|
||||
lxd_manage.run()
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ short_description: management of instances in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
||||
- Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
|
||||
- From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
|
||||
options:
|
||||
password:
|
||||
description:
|
||||
@@ -40,27 +40,37 @@ options:
|
||||
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
|
||||
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
|
||||
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: str
|
||||
cores:
|
||||
description:
|
||||
- Specify number of cores per socket.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
cpus:
|
||||
description:
|
||||
- numbers of allocated cpus for instance
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
memory:
|
||||
description:
|
||||
- memory size in MB for instance
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
swap:
|
||||
description:
|
||||
- swap memory size in MB for instance
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
netif:
|
||||
description:
|
||||
@@ -84,7 +94,9 @@ options:
|
||||
onboot:
|
||||
description:
|
||||
- specifies whether a VM will be started during system bootup
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
storage:
|
||||
description:
|
||||
@@ -94,7 +106,9 @@ options:
|
||||
cpuunits:
|
||||
description:
|
||||
- CPU weight for a VM
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
nameserver:
|
||||
description:
|
||||
@@ -154,38 +168,20 @@ options:
|
||||
version_added: '0.2.0'
|
||||
proxmox_default_behavior:
|
||||
description:
|
||||
- As of community.general 4.0.0, various options no longer have default values.
|
||||
These default values caused problems when users expected different behavior from Proxmox
|
||||
by default or filled options which caused problems when set.
|
||||
- The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
|
||||
are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
|
||||
which makes sure these options have no defaults.
|
||||
- Various module options used to have default values. This cause problems when
|
||||
user expects different behavior from proxmox by default or fill options which cause
|
||||
problems when they have been set.
|
||||
- The default value is C(compatibility), which will ensure that the default values
|
||||
are used when the values are not explicitly specified by the user.
|
||||
- From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
|
||||
deprecation warnings, please set I(proxmox_default_behavior) to an explicit
|
||||
value.
|
||||
- This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
|
||||
type: str
|
||||
default: no_defaults
|
||||
choices:
|
||||
- compatibility
|
||||
- no_defaults
|
||||
version_added: "1.3.0"
|
||||
clone:
|
||||
description:
|
||||
- ID of the container to be cloned.
|
||||
- I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
|
||||
- The type of clone created is defined by the I(clone_type) parameter.
|
||||
- This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
|
||||
type: int
|
||||
version_added: 4.3.0
|
||||
clone_type:
|
||||
description:
|
||||
- Type of the clone created.
|
||||
- C(full) creates a full clone, and I(storage) must be specified.
|
||||
- C(linked) creates a linked clone, and the cloned container must be a template container.
|
||||
- C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
|
||||
I(storage) may be specified, if not it will fall back to the default.
|
||||
type: str
|
||||
choices: ['full', 'linked', 'opportunistic']
|
||||
default: opportunistic
|
||||
version_added: 4.3.0
|
||||
author: Sergei Antipov (@UnderGreen)
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
@@ -311,28 +307,6 @@ EXAMPLES = r'''
|
||||
- nesting=1
|
||||
- mount=cifs,nfs
|
||||
|
||||
- name: >
|
||||
Create a linked clone of the template container with id 100. The newly created container with be a
|
||||
linked clone, because no storage parameter is defined
|
||||
community.general.proxmox:
|
||||
vmid: 201
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
clone: 100
|
||||
hostname: clone.example.org
|
||||
|
||||
- name: Create a full clone of the container with id 100
|
||||
community.general.proxmox:
|
||||
vmid: 201
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
clone: 100
|
||||
hostname: clone.example.org
|
||||
storage: local
|
||||
|
||||
- name: Start container
|
||||
community.general.proxmox:
|
||||
@@ -430,13 +404,6 @@ def content_check(proxmox, node, ostemplate, template_store):
|
||||
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
|
||||
|
||||
|
||||
def is_template_container(proxmox, node, vmid):
|
||||
"""Check if the specified container is a template."""
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
|
||||
return config['template']
|
||||
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
|
||||
|
||||
@@ -446,10 +413,8 @@ def proxmox_version(proxmox):
|
||||
return LooseVersion(apireturn['version'])
|
||||
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
|
||||
# Remove all empty kwarg entries
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
|
||||
if VZ_TYPE == 'lxc':
|
||||
@@ -469,49 +434,7 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw
|
||||
kwargs['cpus'] = cpus
|
||||
kwargs['disk'] = disk
|
||||
|
||||
if clone is not None:
|
||||
if VZ_TYPE != 'lxc':
|
||||
module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
|
||||
|
||||
clone_is_template = is_template_container(proxmox, node, clone)
|
||||
|
||||
# By default, create a full copy only when the cloned container is not a template.
|
||||
create_full_copy = not clone_is_template
|
||||
|
||||
# Only accept parameters that are compatible with the clone endpoint.
|
||||
valid_clone_parameters = ['hostname', 'pool', 'description']
|
||||
if module.params['storage'] is not None and clone_is_template:
|
||||
# Cloning a template, so create a full copy instead of a linked copy
|
||||
create_full_copy = True
|
||||
elif module.params['storage'] is None and not clone_is_template:
|
||||
# Not cloning a template, but also no defined storage. This isn't possible.
|
||||
module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
|
||||
|
||||
if module.params['clone_type'] == 'linked':
|
||||
if not clone_is_template:
|
||||
module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
|
||||
# Don't need to do more, by default create_full_copy is set to false already
|
||||
elif module.params['clone_type'] == 'opportunistic':
|
||||
if not clone_is_template:
|
||||
# Cloned container is not a template, so we need our 'storage' parameter
|
||||
valid_clone_parameters.append('storage')
|
||||
elif module.params['clone_type'] == 'full':
|
||||
create_full_copy = True
|
||||
valid_clone_parameters.append('storage')
|
||||
|
||||
clone_parameters = {}
|
||||
|
||||
if create_full_copy:
|
||||
clone_parameters['full'] = '1'
|
||||
else:
|
||||
clone_parameters['full'] = '0'
|
||||
for param in valid_clone_parameters:
|
||||
if module.params[param] is not None:
|
||||
clone_parameters[param] = module.params[param]
|
||||
|
||||
taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
|
||||
else:
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
@@ -611,20 +534,11 @@ def main():
|
||||
unprivileged=dict(type='bool', default=False),
|
||||
description=dict(type='str'),
|
||||
hookscript=dict(type='str'),
|
||||
proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||
clone=dict(type='int'),
|
||||
clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['node', 'hostname']),
|
||||
('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
|
||||
# either clone a container or create a new one from a template file.
|
||||
],
|
||||
required_together=[
|
||||
('api_token_id', 'api_token_secret')
|
||||
],
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
@@ -648,8 +562,14 @@ def main():
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
clone = module.params['clone']
|
||||
|
||||
if module.params['proxmox_default_behavior'] is None:
|
||||
module.params['proxmox_default_behavior'] = 'compatibility'
|
||||
module.deprecate(
|
||||
'The proxmox_default_behavior option will change its default value from "compatibility" to '
|
||||
'"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
|
||||
version='4.0.0', collection_name='community.general'
|
||||
)
|
||||
if module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
old_default_values = dict(
|
||||
disk="3",
|
||||
@@ -690,8 +610,7 @@ def main():
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
# Create a new container
|
||||
if state == 'present' and clone is None:
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
@@ -703,11 +622,8 @@ def main():
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
try:
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
cores=module.params['cores'],
|
||||
pool=module.params['pool'],
|
||||
password=module.params['password'],
|
||||
@@ -727,29 +643,9 @@ def main():
|
||||
description=module.params['description'],
|
||||
hookscript=module.params['hookscript'])
|
||||
|
||||
module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
# Clone a container
|
||||
elif state == 'present' and clone is not None:
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
if not get_instance(proxmox, clone):
|
||||
module.exit_json(changed=False, msg="Container to be cloned does not exist")
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
try:
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
|
||||
|
||||
module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
|
||||
@@ -76,7 +76,7 @@ proxmox_domains:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible)
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
|
||||
@@ -114,6 +114,9 @@ def main():
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxDomainInfoAnsible(module)
|
||||
domain = module.params['domain']
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ proxmox_groups:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible)
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
|
||||
@@ -124,6 +124,9 @@ def main():
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxGroupInfoAnsible(module)
|
||||
group = module.params['group']
|
||||
|
||||
|
||||
@@ -13,13 +13,15 @@ module: proxmox_kvm
|
||||
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||
description:
|
||||
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||
- Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
|
||||
- From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
|
||||
author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
|
||||
options:
|
||||
acpi:
|
||||
description:
|
||||
- Specify if ACPI should be enabled/disabled.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
agent:
|
||||
description:
|
||||
@@ -29,19 +31,24 @@ options:
|
||||
description:
|
||||
- Pass arbitrary arguments to kvm.
|
||||
- This option is for experts only!
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of
|
||||
C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
|
||||
Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
|
||||
type: str
|
||||
autostart:
|
||||
description:
|
||||
- Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
balloon:
|
||||
description:
|
||||
- Specify the amount of RAM for the VM in MB.
|
||||
- Using zero disables the balloon driver.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
bios:
|
||||
description:
|
||||
@@ -52,7 +59,9 @@ options:
|
||||
description:
|
||||
- Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
|
||||
- You can combine to set order.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: str
|
||||
bootdisk:
|
||||
description:
|
||||
@@ -88,12 +97,16 @@ options:
|
||||
cores:
|
||||
description:
|
||||
- Specify number of cores per socket.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
cpu:
|
||||
description:
|
||||
- Specify emulated CPU type.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: str
|
||||
cpulimit:
|
||||
description:
|
||||
@@ -104,7 +117,9 @@ options:
|
||||
description:
|
||||
- Specify CPU weight for a VM.
|
||||
- You can disable fair-scheduler configuration by setting this to 0
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
delete:
|
||||
description:
|
||||
@@ -124,15 +139,19 @@ options:
|
||||
description:
|
||||
- Allow to force stop VM.
|
||||
- Can be used with states C(stopped), C(restarted) and C(absent).
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
format:
|
||||
description:
|
||||
- Target drive's backing file's data format.
|
||||
- Used only with clone
|
||||
- Use I(format=unspecified) and I(full=false) for a linked clone.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2).
|
||||
If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
|
||||
not specifying this option is equivalent to setting it to C(unspecified).
|
||||
Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
|
||||
type: str
|
||||
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
|
||||
freeze:
|
||||
@@ -197,7 +216,9 @@ options:
|
||||
kvm:
|
||||
description:
|
||||
- Enable/disable KVM hardware virtualization.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
localtime:
|
||||
description:
|
||||
@@ -217,7 +238,9 @@ options:
|
||||
memory:
|
||||
description:
|
||||
- Memory size in MB for instance.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
migrate_downtime:
|
||||
description:
|
||||
@@ -273,13 +296,17 @@ options:
|
||||
onboot:
|
||||
description:
|
||||
- Specifies whether a VM will be started during system bootup.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
ostype:
|
||||
description:
|
||||
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
|
||||
- The l26 is Linux 2.6/3.X Kernel.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: str
|
||||
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
|
||||
parallel:
|
||||
@@ -360,7 +387,9 @@ options:
|
||||
sockets:
|
||||
description:
|
||||
- Sets the number of CPU sockets. (1 - N).
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: int
|
||||
sshkeys:
|
||||
description:
|
||||
@@ -392,7 +421,9 @@ options:
|
||||
tablet:
|
||||
description:
|
||||
- Enables/disables the USB tablet device.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
tags:
|
||||
description:
|
||||
@@ -414,7 +445,9 @@ options:
|
||||
template:
|
||||
description:
|
||||
- Enables/disables the template.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
timeout:
|
||||
description:
|
||||
@@ -436,7 +469,9 @@ options:
|
||||
vga:
|
||||
description:
|
||||
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
|
||||
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std).
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: str
|
||||
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
|
||||
virtio:
|
||||
@@ -454,17 +489,18 @@ options:
|
||||
type: str
|
||||
proxmox_default_behavior:
|
||||
description:
|
||||
- As of community.general 4.0.0, various options no longer have default values.
|
||||
These default values caused problems when users expected different behavior from Proxmox
|
||||
by default or filled options which caused problems when set.
|
||||
- The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
|
||||
are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
|
||||
which makes sure these options have no defaults.
|
||||
- Various module options used to have default values. This cause problems when
|
||||
user expects different behavior from proxmox by default or fill options which cause
|
||||
problems when they have been set.
|
||||
- The default value is C(compatibility), which will ensure that the default values
|
||||
are used when the values are not explicitly specified by the user.
|
||||
- From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
|
||||
deprecation warnings, please set I(proxmox_default_behavior) to an explicit
|
||||
value.
|
||||
- This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
|
||||
I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
|
||||
I(tablet), I(template), I(vga), options.
|
||||
type: str
|
||||
default: no_defaults
|
||||
choices:
|
||||
- compatibility
|
||||
- no_defaults
|
||||
@@ -1056,7 +1092,7 @@ def main():
|
||||
virtio=dict(type='dict'),
|
||||
vmid=dict(type='int'),
|
||||
watchdog=dict(),
|
||||
proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
),
|
||||
mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
@@ -1087,6 +1123,13 @@ def main():
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
if module.params['proxmox_default_behavior'] is None:
|
||||
module.params['proxmox_default_behavior'] = 'compatibility'
|
||||
module.deprecate(
|
||||
'The proxmox_default_behavior option will change its default value from "compatibility" to '
|
||||
'"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
|
||||
version='4.0.0', collection_name='community.general'
|
||||
)
|
||||
if module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
old_default_values = dict(
|
||||
acpi=True,
|
||||
|
||||
@@ -197,7 +197,7 @@ def main():
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'),
|
||||
module.fail_json(msg=missing_required_lib('python-proxmoxer'),
|
||||
exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
state = module.params['state']
|
||||
|
||||
@@ -111,7 +111,7 @@ proxmox_storages:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR, proxmox_to_ansible_bool)
|
||||
|
||||
|
||||
class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
|
||||
@@ -170,6 +170,9 @@ def main():
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxStorageInfoAnsible(module)
|
||||
storage = module.params['storage']
|
||||
storagetype = module.params['type']
|
||||
|
||||
@@ -116,7 +116,7 @@ msg:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible)
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
|
||||
@@ -163,6 +163,9 @@ def main():
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib(
|
||||
'proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
proxmox = ProxmoxTaskInfoAnsible(module)
|
||||
upid = module.params['task']
|
||||
node = module.params['node']
|
||||
|
||||
@@ -156,7 +156,7 @@ proxmox_users:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxUserInfoAnsible(ProxmoxAnsible):
|
||||
@@ -232,6 +232,9 @@ def main():
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxUserInfoAnsible(module)
|
||||
domain = module.params['domain']
|
||||
user = module.params['user']
|
||||
|
||||
@@ -1260,11 +1260,6 @@ def resume_vm(module, client, vm):
|
||||
vm = client.vm.info(vm.ID)
|
||||
changed = False
|
||||
|
||||
state = vm.STATE
|
||||
if state in [VM_STATES.index('HOLD')]:
|
||||
changed = release_vm(module, client, vm)
|
||||
return changed
|
||||
|
||||
lcm_state = vm.LCM_STATE
|
||||
if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
|
||||
module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
|
||||
@@ -1287,23 +1282,6 @@ def resume_vms(module, client, vms):
|
||||
return changed
|
||||
|
||||
|
||||
def release_vm(module, client, vm):
|
||||
vm = client.vm.info(vm.ID)
|
||||
changed = False
|
||||
|
||||
state = vm.STATE
|
||||
if state != VM_STATES.index('HOLD'):
|
||||
module.fail_json(msg="Cannot perform action 'release' because this action is not available " +
|
||||
"because VM is not in state 'HOLD'.")
|
||||
else:
|
||||
changed = True
|
||||
|
||||
if changed and not module.check_mode:
|
||||
client.vm.action('release', vm.ID)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def check_name_attribute(module, attributes):
|
||||
if attributes.get("NAME"):
|
||||
import re
|
||||
|
||||
@@ -54,15 +54,8 @@ options:
|
||||
organization:
|
||||
type: str
|
||||
description:
|
||||
- Organization identifier.
|
||||
- Exactly one of I(project) and I(organization) must be specified.
|
||||
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- Project identifier.
|
||||
- Exactly one of I(project) and I(organization) must be specified.
|
||||
version_added: 4.3.0
|
||||
- Organization identifier
|
||||
required: true
|
||||
|
||||
state:
|
||||
type: str
|
||||
@@ -139,7 +132,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
tags:
|
||||
@@ -151,7 +144,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
|
||||
@@ -164,7 +157,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: absent
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
'''
|
||||
@@ -276,15 +269,10 @@ def create_server(compute_api, server):
|
||||
"commercial_type": server["commercial_type"],
|
||||
"image": server["image"],
|
||||
"dynamic_ip_required": server["dynamic_ip_required"],
|
||||
"name": server["name"]
|
||||
"name": server["name"],
|
||||
"organization": server["organization"]
|
||||
}
|
||||
|
||||
if server["project"]:
|
||||
data["project"] = server["project"]
|
||||
|
||||
if server["organization"]:
|
||||
data["organization"] = server["organization"]
|
||||
|
||||
if server["security_group"]:
|
||||
data["security_group"] = server["security_group"]
|
||||
|
||||
@@ -640,7 +628,6 @@ def core(module):
|
||||
"enable_ipv6": module.params["enable_ipv6"],
|
||||
"tags": module.params["tags"],
|
||||
"organization": module.params["organization"],
|
||||
"project": module.params["project"],
|
||||
"security_group": module.params["security_group"]
|
||||
}
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
@@ -668,8 +655,7 @@ def main():
|
||||
public_ip=dict(default="absent"),
|
||||
state=dict(choices=list(state_strategy.keys()), default='present'),
|
||||
tags=dict(type="list", elements="str", default=[]),
|
||||
organization=dict(),
|
||||
project=dict(),
|
||||
organization=dict(required=True),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
wait_sleep_time=dict(type="int", default=3),
|
||||
@@ -678,12 +664,6 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
required_one_of=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
@@ -18,12 +18,11 @@ module: scaleway_security_group_rule
|
||||
short_description: Scaleway Security Group Rule management module
|
||||
author: Antoine Barbare (@abarbare)
|
||||
description:
|
||||
- This module manages Security Group Rule on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
- This module manages Security Group Rule on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
requirements:
|
||||
- ipaddress
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
options:
|
||||
state:
|
||||
@@ -131,19 +130,10 @@ data:
|
||||
}
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
|
||||
from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
try:
|
||||
from ipaddress import ip_network
|
||||
except ImportError:
|
||||
IPADDRESS_IMP_ERR = traceback.format_exc()
|
||||
HAS_IPADDRESS = False
|
||||
else:
|
||||
HAS_IPADDRESS = True
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def get_sgr_from_api(security_group_rules, security_group_rule):
|
||||
@@ -266,8 +256,6 @@ def main():
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
if not HAS_IPADDRESS:
|
||||
module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
|
||||
|
||||
core(module)
|
||||
|
||||
|
||||
@@ -51,11 +51,6 @@ options:
|
||||
description:
|
||||
- Name used to identify the volume.
|
||||
required: true
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway project ID to which volume belongs.
|
||||
version_added: 4.3.0
|
||||
organization:
|
||||
type: str
|
||||
description:
|
||||
@@ -76,7 +71,7 @@ EXAMPLES = '''
|
||||
name: my-volume
|
||||
state: present
|
||||
region: par1
|
||||
project: "{{ scw_org }}"
|
||||
organization: "{{ scw_org }}"
|
||||
"size": 10000000000
|
||||
volume_type: l_ssd
|
||||
register: server_creation_check_task
|
||||
@@ -98,7 +93,7 @@ data:
|
||||
"export_uri": null,
|
||||
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
|
||||
"name": "volume-0-3",
|
||||
"project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||
"organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||
"server": null,
|
||||
"size": 10000000000,
|
||||
"volume_type": "l_ssd"
|
||||
@@ -111,37 +106,31 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def core(module):
|
||||
region = module.params["region"]
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
organization = module.params['organization']
|
||||
project = module.params['project']
|
||||
size = module.params['size']
|
||||
volume_type = module.params['volume_type']
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
|
||||
account_api = Scaleway(module)
|
||||
response = account_api.get('volumes')
|
||||
status_code = response.status_code
|
||||
volumes_json = response.json
|
||||
|
||||
if project is None:
|
||||
project = organization
|
||||
|
||||
if not response.ok:
|
||||
module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
|
||||
status_code, response.json['message']))
|
||||
|
||||
volumeByName = None
|
||||
for volume in volumes_json['volumes']:
|
||||
if volume['project'] == project and volume['name'] == name:
|
||||
if volume['organization'] == organization and volume['name'] == name:
|
||||
volumeByName = volume
|
||||
|
||||
if state in ('present',):
|
||||
if volumeByName is not None:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type}
|
||||
payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
|
||||
|
||||
response = account_api.post('/volumes', payload)
|
||||
|
||||
@@ -172,7 +161,6 @@ def main():
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
name=dict(required=True),
|
||||
size=dict(type='int'),
|
||||
project=dict(),
|
||||
organization=dict(),
|
||||
volume_type=dict(),
|
||||
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
|
||||
@@ -180,12 +168,6 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
required_one_of=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
@@ -60,7 +60,6 @@ extends_documentation_fragment:
|
||||
- community.general.redis.documentation
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data_incr
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redis_data_incr
|
||||
short_description: Increment keys in Redis
|
||||
version_added: 4.0.0
|
||||
description:
|
||||
- Increment integers or float keys in Redis database and get new value.
|
||||
- Default increment for all keys is 1. For specific increments use the
|
||||
I(increment_int) and I(increment_float) options.
|
||||
- When using I(check_mode) the module will try to calculate the value that
|
||||
Redis would return. If the key is not present, 0.0 is used as value.
|
||||
author: "Andreas Botzner (@paginabianca)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- Database key.
|
||||
type: str
|
||||
required: true
|
||||
increment_int:
|
||||
description:
|
||||
- Integer amount to increment the key by.
|
||||
required: false
|
||||
type: int
|
||||
increment_float:
|
||||
description:
|
||||
- Float amount to increment the key by.
|
||||
- This only works with keys that contain float values
|
||||
in their string representation.
|
||||
type: float
|
||||
required: false
|
||||
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis.documentation
|
||||
|
||||
notes:
|
||||
- For C(check_mode) to work, the specified I(redis_user) needs permission to
|
||||
run the C(GET) command on the key, otherwise the module will fail.
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Increment integer key foo on localhost with no username and print new value
|
||||
community.general.redis_data_incr:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
increment_int: 1
|
||||
register: result
|
||||
- name: Print new value
|
||||
debug:
|
||||
var: result.value
|
||||
|
||||
- name: Increment float key foo by 20.4
|
||||
community.general.redis_data_incr:
|
||||
login_host: redishost
|
||||
login_user: redisuser
|
||||
login_password: somepass
|
||||
key: foo
|
||||
increment_float: '20.4'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
value:
|
||||
description: Incremented value of key
|
||||
returned: on success
|
||||
type: float
|
||||
sample: '4039.4'
|
||||
msg:
|
||||
description: A short message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Incremented key: foo by 20.4 to 65.9'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||
|
||||
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec()
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
increment_int=dict(type='int', required=False),
|
||||
increment_float=dict(type='float', required=False),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['increment_int', 'increment_float']],
|
||||
)
|
||||
fail_imports(module)
|
||||
|
||||
redis = RedisAnsible(module)
|
||||
key = module.params['key']
|
||||
increment_float = module.params['increment_float']
|
||||
increment_int = module.params['increment_int']
|
||||
increment = 1
|
||||
if increment_float is not None:
|
||||
increment = increment_float
|
||||
elif increment_int is not None:
|
||||
increment = increment_int
|
||||
|
||||
result = {'changed': False}
|
||||
if module.check_mode:
|
||||
value = 0.0
|
||||
try:
|
||||
res = redis.connection.get(key)
|
||||
if res is not None:
|
||||
value = float(res)
|
||||
except ValueError as e:
|
||||
msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format(
|
||||
res, key)
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||
key, increment, value + increment)
|
||||
result['msg'] = msg
|
||||
result['value'] = float(value + increment)
|
||||
module.exit_json(**result)
|
||||
|
||||
if increment_float is not None:
|
||||
try:
|
||||
value = redis.connection.incrbyfloat(key, increment)
|
||||
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||
key, increment, value)
|
||||
result['msg'] = msg
|
||||
result['value'] = float(value)
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
|
||||
key, increment, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
elif increment_int is not None:
|
||||
try:
|
||||
value = redis.connection.incrby(key, increment)
|
||||
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||
key, increment, value)
|
||||
result['msg'] = msg
|
||||
result['value'] = float(value)
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
|
||||
key, increment, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
try:
|
||||
value = redis.connection.incr(key)
|
||||
msg = 'Incremented key: {0} to {1}'.format(key, value)
|
||||
result['msg'] = msg
|
||||
result['value'] = float(value)
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to increment key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user