mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-30 10:26:52 +00:00
Compare commits
550 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c85bb8713e | ||
|
|
5cdc8f4b07 | ||
|
|
50131f5dfa | ||
|
|
c734e7c2e5 | ||
|
|
7e6e8f7749 | ||
|
|
687acdc961 | ||
|
|
16092feaab | ||
|
|
6676fb8fb4 | ||
|
|
a860f537dd | ||
|
|
f1a9c2f00a | ||
|
|
f8de068e32 | ||
|
|
70b4bacf0f | ||
|
|
41f5d1741c | ||
|
|
54ede7dd7f | ||
|
|
7f0702b786 | ||
|
|
89a3abe64a | ||
|
|
59eff2e3e0 | ||
|
|
1115b463fe | ||
|
|
77bf1fedf5 | ||
|
|
89560ea2e7 | ||
|
|
f9919d28d4 | ||
|
|
7b4660d28a | ||
|
|
29496be80e | ||
|
|
991c96615c | ||
|
|
fe5ad997c1 | ||
|
|
468b28bbb8 | ||
|
|
9b57221d9a | ||
|
|
cd1a92d417 | ||
|
|
7486e3a074 | ||
|
|
6661917370 | ||
|
|
ec0bd3143a | ||
|
|
cce68def8b | ||
|
|
6f5ad22d28 | ||
|
|
6c53a09eef | ||
|
|
9b6e75f7f4 | ||
|
|
e09650140d | ||
|
|
67388be1a9 | ||
|
|
130d07948a | ||
|
|
5d6fcaef53 | ||
|
|
f044a83c49 | ||
|
|
e3f7e8dadf | ||
|
|
8d1a028dbd | ||
|
|
8823e5c061 | ||
|
|
102456d033 | ||
|
|
aad4c55d3d | ||
|
|
e31c98f17f | ||
|
|
6a5dfc5579 | ||
|
|
ab7efef9df | ||
|
|
ca9c763b57 | ||
|
|
cfeb40ed23 | ||
|
|
c495d136fa | ||
|
|
d9e2d6682b | ||
|
|
d7fe288ffd | ||
|
|
7de89699f7 | ||
|
|
b0a9cceeb5 | ||
|
|
b08f0b2f82 | ||
|
|
f23f409bd6 | ||
|
|
cfea62793f | ||
|
|
62bda91466 | ||
|
|
473d5fa2af | ||
|
|
cc76d684d5 | ||
|
|
7a6770c731 | ||
|
|
d2214af6e8 | ||
|
|
fad1220869 | ||
|
|
fe09516235 | ||
|
|
78cd8886f4 | ||
|
|
6b99d48f06 | ||
|
|
6e0e17a7e3 | ||
|
|
90de95c7b2 | ||
|
|
07c6b8b24e | ||
|
|
d106de6d51 | ||
|
|
e96101fb3f | ||
|
|
a60d55f03c | ||
|
|
d6a09ada98 | ||
|
|
9ddb75a3a2 | ||
|
|
b85ff2a997 | ||
|
|
3d1ca5638b | ||
|
|
35fd4700bf | ||
|
|
9add9df7d6 | ||
|
|
cdb747b41d | ||
|
|
7a70fda784 | ||
|
|
13d1b9569e | ||
|
|
c8c5021773 | ||
|
|
206ac72bd8 | ||
|
|
1ee123bb1e | ||
|
|
aa0bcad9df | ||
|
|
6e51690a95 | ||
|
|
3eab4faf0b | ||
|
|
c6589a772b | ||
|
|
cb06c4ff77 | ||
|
|
78316fbb75 | ||
|
|
c7df82652f | ||
|
|
342a1a7faa | ||
|
|
17431bd42c | ||
|
|
eacf663999 | ||
|
|
84a806be08 | ||
|
|
7db5c86dc8 | ||
|
|
1e82b5c580 | ||
|
|
7f6be665f9 | ||
|
|
174b00cd29 | ||
|
|
a7c92f491d | ||
|
|
592cd3747b | ||
|
|
4295ee0bb4 | ||
|
|
7e1ff300f8 | ||
|
|
b3ddec2b29 | ||
|
|
227f6e333e | ||
|
|
94711ca506 | ||
|
|
d53052f27a | ||
|
|
95d57c338a | ||
|
|
4fd7a65a52 | ||
|
|
0e3c1f867d | ||
|
|
fa24edf89c | ||
|
|
ac3e803a36 | ||
|
|
8168ddca4f | ||
|
|
cc264be644 | ||
|
|
9bd2d1ec90 | ||
|
|
01b2c48161 | ||
|
|
a26792418e | ||
|
|
485834526f | ||
|
|
5e68ea41e9 | ||
|
|
29af59822d | ||
|
|
5b10f8234c | ||
|
|
c69810bf04 | ||
|
|
9eb638f651 | ||
|
|
01887bf359 | ||
|
|
927356dad3 | ||
|
|
0df41241dd | ||
|
|
aaa0f39f72 | ||
|
|
2324f350bc | ||
|
|
bd96616e6f | ||
|
|
b429c520f5 | ||
|
|
d6e14276c8 | ||
|
|
96de25fc94 | ||
|
|
753df78877 | ||
|
|
3b82a6ff99 | ||
|
|
ca5a2b291a | ||
|
|
38e0d97c8b | ||
|
|
cdfc4dcf49 | ||
|
|
9fb76efde0 | ||
|
|
40ccd1501b | ||
|
|
8ba7fd5d61 | ||
|
|
3a2f52c1db | ||
|
|
c40db6789a | ||
|
|
73acdaa489 | ||
|
|
a362879ff6 | ||
|
|
7e1412e5e1 | ||
|
|
996dc617ed | ||
|
|
9b4b175ca3 | ||
|
|
86a22b5ed2 | ||
|
|
45d3661ccf | ||
|
|
481fc48e51 | ||
|
|
e7e2ab94da | ||
|
|
a68445486e | ||
|
|
6580e7559b | ||
|
|
d9e8724b1d | ||
|
|
ef0b83fdf1 | ||
|
|
e8c37ca605 | ||
|
|
02c534bb8e | ||
|
|
3731064368 | ||
|
|
c3813d4533 | ||
|
|
191d2e08bb | ||
|
|
249d490f10 | ||
|
|
71ea99d10f | ||
|
|
18f8195983 | ||
|
|
d25554df9d | ||
|
|
9546bbb55e | ||
|
|
7038812465 | ||
|
|
316adebd68 | ||
|
|
ed2c1e4ac9 | ||
|
|
d44c85aa90 | ||
|
|
9772485d3c | ||
|
|
8ece0d3609 | ||
|
|
905f4dcfa2 | ||
|
|
9de01e04f2 | ||
|
|
d1f820ed06 | ||
|
|
293c7a9fb3 | ||
|
|
f0fcb221cd | ||
|
|
80bb42325b | ||
|
|
3a460751a4 | ||
|
|
e22fff2b12 | ||
|
|
57e5f8c7be | ||
|
|
f1807d3323 | ||
|
|
0bc4518f3b | ||
|
|
e95f63b067 | ||
|
|
106856ed86 | ||
|
|
5895e50185 | ||
|
|
774b2f642b | ||
|
|
316e1d6bf2 | ||
|
|
0d3c4de6a2 | ||
|
|
a14392fab0 | ||
|
|
3fee872d58 | ||
|
|
d0f82e2c27 | ||
|
|
7cfdc2ce8c | ||
|
|
53fc2c477b | ||
|
|
71a655193c | ||
|
|
845c406419 | ||
|
|
d6a1df3706 | ||
|
|
806f1ea3c9 | ||
|
|
d96b85af9f | ||
|
|
ef07f9b9ff | ||
|
|
1bb2ff5128 | ||
|
|
ba2917a7dc | ||
|
|
b6b7601615 | ||
|
|
43a9f09a17 | ||
|
|
147ca2fe66 | ||
|
|
3715b6ef46 | ||
|
|
d0563e34a6 | ||
|
|
935348ae78 | ||
|
|
0d2bcf545e | ||
|
|
ae6cbc2d82 | ||
|
|
e1cdad3537 | ||
|
|
98d071f61e | ||
|
|
8e7d49c1c6 | ||
|
|
424af85929 | ||
|
|
2ad7ed4f83 | ||
|
|
2589e9a030 | ||
|
|
02d0e3d286 | ||
|
|
9ce1009643 | ||
|
|
e48f9fdf74 | ||
|
|
7aae8d5386 | ||
|
|
8ab96d9533 | ||
|
|
7a2efb4775 | ||
|
|
331f5bdf24 | ||
|
|
06345839c6 | ||
|
|
b20fc7a7c3 | ||
|
|
517570a64f | ||
|
|
dc8d076a25 | ||
|
|
bd63da680d | ||
|
|
4e39a4b825 | ||
|
|
118c040879 | ||
|
|
0a5db85ad5 | ||
|
|
29e4066944 | ||
|
|
612543919e | ||
|
|
58d018ebbd | ||
|
|
05fe587a3e | ||
|
|
7c43cc3faa | ||
|
|
6b207bce4c | ||
|
|
dd25c0d3bf | ||
|
|
a20862797e | ||
|
|
0f9311c3d9 | ||
|
|
76317d1f64 | ||
|
|
3502f3b486 | ||
|
|
7c493eb4e5 | ||
|
|
c121e8685f | ||
|
|
bf8df21d27 | ||
|
|
135faf4421 | ||
|
|
fce562ad6d | ||
|
|
1f5345881d | ||
|
|
edd7b84285 | ||
|
|
2d6816e11e | ||
|
|
b2bb7e3f9c | ||
|
|
baa721ac22 | ||
|
|
58c6f6c95a | ||
|
|
97e2c3dec9 | ||
|
|
d9dcdcbbe4 | ||
|
|
1ce79db763 | ||
|
|
df8fdcda79 | ||
|
|
cf43356753 | ||
|
|
a91eb6ae4f | ||
|
|
69641d36e1 | ||
|
|
4e2d4e3c68 | ||
|
|
e77adff0b7 | ||
|
|
825e17c1cf | ||
|
|
cc458f7c37 | ||
|
|
b8a081b9b2 | ||
|
|
e40aa69e77 | ||
|
|
cbcb942b0e | ||
|
|
f2fa56b485 | ||
|
|
1ca9c35010 | ||
|
|
23e7ef0255 | ||
|
|
8a62b79ef2 | ||
|
|
bcccf4e388 | ||
|
|
c7fccb2c01 | ||
|
|
6ac410b3f6 | ||
|
|
41101e55a0 | ||
|
|
f19e191467 | ||
|
|
fccae19177 | ||
|
|
8a4cdd2b8a | ||
|
|
16945d3847 | ||
|
|
432c891487 | ||
|
|
25267b8094 | ||
|
|
f7dba23e50 | ||
|
|
e123623f5c | ||
|
|
1fec1d0c81 | ||
|
|
5855ef558a | ||
|
|
1e466df863 | ||
|
|
6033ce695b | ||
|
|
b5d6457611 | ||
|
|
1705335ba7 | ||
|
|
56b5be0630 | ||
|
|
429359e977 | ||
|
|
7f96b7df60 | ||
|
|
2831bc45f5 | ||
|
|
85bcef3f5a | ||
|
|
87ba15fa45 | ||
|
|
771e9de010 | ||
|
|
6bfa6e40f4 | ||
|
|
da11a98cb7 | ||
|
|
ff586f1105 | ||
|
|
16476f5cb9 | ||
|
|
e9494c12f2 | ||
|
|
a73720c103 | ||
|
|
3dba697e33 | ||
|
|
75688cb632 | ||
|
|
f2df1a7581 | ||
|
|
857d2eee50 | ||
|
|
afe2842b1b | ||
|
|
047b7ada3c | ||
|
|
73c27d6a0e | ||
|
|
789f06dffe | ||
|
|
5f8d6a73d3 | ||
|
|
9ccce82113 | ||
|
|
43fe26d83c | ||
|
|
d9533c44aa | ||
|
|
d974ca32ae | ||
|
|
2935b011ed | ||
|
|
cde9564163 | ||
|
|
549dfaae64 | ||
|
|
0b70b3baff | ||
|
|
5be4adc434 | ||
|
|
87baa5860a | ||
|
|
7da2c16b4a | ||
|
|
024e7419da | ||
|
|
4982eaf935 | ||
|
|
21d5668c97 | ||
|
|
ac03881002 | ||
|
|
c8b2d7c1e5 | ||
|
|
95ceb53676 | ||
|
|
20db4fc560 | ||
|
|
d54d2fa4a6 | ||
|
|
dc3e16113d | ||
|
|
20f46f7669 | ||
|
|
31189e9645 | ||
|
|
d057b2e3b2 | ||
|
|
99c28313e4 | ||
|
|
9631de4939 | ||
|
|
35e0a61217 | ||
|
|
32e9a0c250 | ||
|
|
38e70ae0e4 | ||
|
|
11cdb1b661 | ||
|
|
4a392372a8 | ||
|
|
d7c6ba89f8 | ||
|
|
a3607a745e | ||
|
|
9fd2ba60df | ||
|
|
7b9687f758 | ||
|
|
7734430f23 | ||
|
|
27ba98a68e | ||
|
|
9b1c6f0743 | ||
|
|
ea822c7bdd | ||
|
|
a3a40f6de3 | ||
|
|
28193b699b | ||
|
|
9ffc1ef393 | ||
|
|
3fc97bf80a | ||
|
|
5079ef0e82 | ||
|
|
d56d34bce6 | ||
|
|
c5cbe2943b | ||
|
|
7a41833e59 | ||
|
|
111c5de550 | ||
|
|
9023d4dba1 | ||
|
|
4ae392e5de | ||
|
|
0e90ff48b5 | ||
|
|
1990f79d8a | ||
|
|
ad8c4e4de6 | ||
|
|
288fe1cfc6 | ||
|
|
1b80a9c587 | ||
|
|
d97a9b5961 | ||
|
|
518ace2562 | ||
|
|
56acd4356f | ||
|
|
c0740ca398 | ||
|
|
b2b4877532 | ||
|
|
9b02230477 | ||
|
|
a0915036f9 | ||
|
|
ffe505a798 | ||
|
|
00aa1250ee | ||
|
|
c63dc624b7 | ||
|
|
a97d82be88 | ||
|
|
0e829e6a23 | ||
|
|
677e88b257 | ||
|
|
9c7b539ef6 | ||
|
|
2d1527a564 | ||
|
|
debb15efbe | ||
|
|
0a9cf38118 | ||
|
|
c7cf6f2eb7 | ||
|
|
199ead85d0 | ||
|
|
2fb0877577 | ||
|
|
9dd91f949a | ||
|
|
147425ef93 | ||
|
|
1b94d09209 | ||
|
|
acf7b106c9 | ||
|
|
fafabed9e6 | ||
|
|
d180390dbc | ||
|
|
d2a984ded1 | ||
|
|
2d1f5408d3 | ||
|
|
24c5d4320f | ||
|
|
24dabda95b | ||
|
|
860b2b89a3 | ||
|
|
07085785a3 | ||
|
|
d6d0b6f0c1 | ||
|
|
ce35d88094 | ||
|
|
519411e026 | ||
|
|
2768eda895 | ||
|
|
db713bd0f5 | ||
|
|
08f7ad06be | ||
|
|
67cabcb2aa | ||
|
|
1ed4394c5e | ||
|
|
ee23c26150 | ||
|
|
c9cf641188 | ||
|
|
e9f3455b62 | ||
|
|
13ab8f412d | ||
|
|
3997d5fcc8 | ||
|
|
4a47d121aa | ||
|
|
3ca98c2edd | ||
|
|
2f2f384b4e | ||
|
|
adf50b106a | ||
|
|
b1b34ee12e | ||
|
|
bccf317814 | ||
|
|
0bd345bfb0 | ||
|
|
a55c96d5c1 | ||
|
|
d4c4d00ad1 | ||
|
|
343339655d | ||
|
|
4b37b1bca6 | ||
|
|
19549058ce | ||
|
|
c2ce7a0752 | ||
|
|
f44300cec5 | ||
|
|
1e968bce27 | ||
|
|
dab5d941e6 | ||
|
|
eef645c3f7 | ||
|
|
9f344d7165 | ||
|
|
bb37b67166 | ||
|
|
94a53adff1 | ||
|
|
4c50f1add7 | ||
|
|
7c3f2ae4af | ||
|
|
1e34df7ca0 | ||
|
|
6a41fba2f8 | ||
|
|
f74b83663b | ||
|
|
463c576a67 | ||
|
|
9d8bea9d36 | ||
|
|
0e6d70697c | ||
|
|
1a4af9bfc3 | ||
|
|
c49a384a65 | ||
|
|
a343756e6f | ||
|
|
4396ec9631 | ||
|
|
d49783280e | ||
|
|
2e8746a8aa | ||
|
|
a4f46b881a | ||
|
|
5ddf0041ec | ||
|
|
d93bc039b2 | ||
|
|
efbda2389d | ||
|
|
fe5717c1aa | ||
|
|
ca1506fb26 | ||
|
|
1ad85849af | ||
|
|
3516acf8d4 | ||
|
|
b6c0cc0b61 | ||
|
|
bef3c04d1c | ||
|
|
f09c39b71e | ||
|
|
b281d3d699 | ||
|
|
c3cab7c68c | ||
|
|
14813a6287 | ||
|
|
14f13904d6 | ||
|
|
3afcf7e75d | ||
|
|
43c12b82fa | ||
|
|
95794f31e3 | ||
|
|
795125fec4 | ||
|
|
285639a4f9 | ||
|
|
7cd96d963e | ||
|
|
dc793ea32b | ||
|
|
b79969da68 | ||
|
|
909e9fe950 | ||
|
|
b45298bc43 | ||
|
|
4aa50962cb | ||
|
|
26757edfb2 | ||
|
|
0b4a2bea01 | ||
|
|
d0f8eac7fd | ||
|
|
4764a5deba | ||
|
|
aa74cf4d61 | ||
|
|
6df3685d42 | ||
|
|
d871399220 | ||
|
|
63012eef82 | ||
|
|
593d622438 | ||
|
|
cc293f90a2 | ||
|
|
d7e55db99b | ||
|
|
3100c32a00 | ||
|
|
8f083d5d85 | ||
|
|
852e240525 | ||
|
|
7a169af053 | ||
|
|
1403f5edcc | ||
|
|
452a185a23 | ||
|
|
c4624d3ad8 | ||
|
|
31687a524e | ||
|
|
2c1ab2d384 | ||
|
|
f6db0745fc | ||
|
|
d24fc92466 | ||
|
|
b89eb87ad6 | ||
|
|
2a376642dd | ||
|
|
2b1eff2783 | ||
|
|
dc0a56141f | ||
|
|
345d5f2dfa | ||
|
|
350380ba8c | ||
|
|
da7e4e1dc2 | ||
|
|
2cc848fe1a | ||
|
|
448b8cbcda | ||
|
|
ea200c9d8c | ||
|
|
5b77515308 | ||
|
|
c8f402806f | ||
|
|
a385cbb11d | ||
|
|
5d0a7f40f2 | ||
|
|
e2dfd42dd4 | ||
|
|
054eb90ae5 | ||
|
|
ee9770cff7 | ||
|
|
384655e15c | ||
|
|
23dda56913 | ||
|
|
265d034e31 | ||
|
|
83a0c32269 | ||
|
|
b9fa9116c1 | ||
|
|
0912e8cc7a | ||
|
|
d22dd5056e | ||
|
|
9d46ccf1b2 | ||
|
|
eea4f45965 | ||
|
|
624eb7171e | ||
|
|
8e7aff00b5 | ||
|
|
2e58dfe52a | ||
|
|
4cdff8654a | ||
|
|
7386326258 | ||
|
|
9906b9dbc7 | ||
|
|
188a4eeb0c | ||
|
|
aaa561163b | ||
|
|
1f41e66f09 | ||
|
|
06bdabcad9 | ||
|
|
7007c68ab7 | ||
|
|
5064aa8ec6 | ||
|
|
6a72c3b338 | ||
|
|
4b0d2dcfe0 | ||
|
|
7359b1fbe5 | ||
|
|
b5f8ae4320 | ||
|
|
26aba8e766 | ||
|
|
cd957fae4c | ||
|
|
c0221b75af | ||
|
|
4e90ee752e | ||
|
|
eb455c69a2 | ||
|
|
26c3bd25f6 | ||
|
|
276880aac1 | ||
|
|
ae21af8820 | ||
|
|
5fbe946c3a | ||
|
|
b3f436aa63 | ||
|
|
77d4bc2942 | ||
|
|
48ef05def3 | ||
|
|
9d13acd68e | ||
|
|
2ad004b97b | ||
|
|
cd116120ad | ||
|
|
9931cdc1e7 |
@@ -24,14 +24,15 @@ schedules:
|
|||||||
always: true
|
always: true
|
||||||
branches:
|
branches:
|
||||||
include:
|
include:
|
||||||
- stable-2
|
|
||||||
- stable-3
|
- stable-3
|
||||||
|
- stable-4
|
||||||
- cron: 0 11 * * 0
|
- cron: 0 11 * * 0
|
||||||
displayName: Weekly (old stable branches)
|
displayName: Weekly (old stable branches)
|
||||||
always: true
|
always: true
|
||||||
branches:
|
branches:
|
||||||
include:
|
include:
|
||||||
- stable-1
|
- stable-1
|
||||||
|
- stable-2
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
- name: checkoutPath
|
- name: checkoutPath
|
||||||
|
|||||||
86
.github/BOTMETA.yml
vendored
86
.github/BOTMETA.yml
vendored
@@ -118,6 +118,8 @@ files:
|
|||||||
$doc_fragments/xenserver.py:
|
$doc_fragments/xenserver.py:
|
||||||
maintainers: bvitnik
|
maintainers: bvitnik
|
||||||
labels: xenserver
|
labels: xenserver
|
||||||
|
$filters/counter.py:
|
||||||
|
maintainers: keilr
|
||||||
$filters/dict.py:
|
$filters/dict.py:
|
||||||
maintainers: felixfontein
|
maintainers: felixfontein
|
||||||
$filters/dict_kv.py:
|
$filters/dict_kv.py:
|
||||||
@@ -156,15 +158,17 @@ files:
|
|||||||
maintainers: conloos
|
maintainers: conloos
|
||||||
$inventories/nmap.py: {}
|
$inventories/nmap.py: {}
|
||||||
$inventories/online.py:
|
$inventories/online.py:
|
||||||
maintainers: sieben
|
maintainers: remyleone
|
||||||
$inventories/opennebula.py:
|
$inventories/opennebula.py:
|
||||||
maintainers: feldsam
|
maintainers: feldsam
|
||||||
labels: cloud opennebula
|
labels: cloud opennebula
|
||||||
keywords: opennebula dynamic inventory script
|
keywords: opennebula dynamic inventory script
|
||||||
$inventories/proxmox.py:
|
$inventories/proxmox.py:
|
||||||
maintainers: $team_virt ilijamt
|
maintainers: $team_virt ilijamt
|
||||||
|
$inventories/xen_orchestra.py:
|
||||||
|
maintainers: ddelnano shinuza
|
||||||
$inventories/icinga2.py:
|
$inventories/icinga2.py:
|
||||||
maintainers: bongoeadgc6
|
maintainers: BongoEADGC6
|
||||||
$inventories/scaleway.py:
|
$inventories/scaleway.py:
|
||||||
maintainers: $team_scaleway
|
maintainers: $team_scaleway
|
||||||
labels: cloud scaleway
|
labels: cloud scaleway
|
||||||
@@ -175,6 +179,8 @@ files:
|
|||||||
labels: lookups
|
labels: lookups
|
||||||
$lookups/cartesian.py: {}
|
$lookups/cartesian.py: {}
|
||||||
$lookups/chef_databag.py: {}
|
$lookups/chef_databag.py: {}
|
||||||
|
$lookups/collection_version.py:
|
||||||
|
maintainers: felixfontein
|
||||||
$lookups/consul_kv.py: {}
|
$lookups/consul_kv.py: {}
|
||||||
$lookups/credstash.py: {}
|
$lookups/credstash.py: {}
|
||||||
$lookups/cyberarkpassword.py:
|
$lookups/cyberarkpassword.py:
|
||||||
@@ -205,9 +211,6 @@ files:
|
|||||||
$lookups/manifold.py:
|
$lookups/manifold.py:
|
||||||
maintainers: galanoff
|
maintainers: galanoff
|
||||||
labels: manifold
|
labels: manifold
|
||||||
$lookups/nios:
|
|
||||||
maintainers: $team_networking sganesh-infoblox
|
|
||||||
labels: infoblox networking
|
|
||||||
$lookups/onepass:
|
$lookups/onepass:
|
||||||
maintainers: samdoran
|
maintainers: samdoran
|
||||||
labels: onepassword
|
labels: onepassword
|
||||||
@@ -220,8 +223,12 @@ files:
|
|||||||
maintainers: Akasurde
|
maintainers: Akasurde
|
||||||
$lookups/random_string.py:
|
$lookups/random_string.py:
|
||||||
maintainers: Akasurde
|
maintainers: Akasurde
|
||||||
|
$lookups/random_words.py:
|
||||||
|
maintainers: konstruktoid
|
||||||
$lookups/redis.py:
|
$lookups/redis.py:
|
||||||
maintainers: $team_ansible_core jpmens
|
maintainers: $team_ansible_core jpmens
|
||||||
|
$lookups/revbitspss.py:
|
||||||
|
maintainers: RevBits
|
||||||
$lookups/shelvefile.py: {}
|
$lookups/shelvefile.py: {}
|
||||||
$lookups/tss.py:
|
$lookups/tss.py:
|
||||||
maintainers: amigus endlesstrax
|
maintainers: amigus endlesstrax
|
||||||
@@ -253,9 +260,6 @@ files:
|
|||||||
$module_utils/module_helper.py:
|
$module_utils/module_helper.py:
|
||||||
maintainers: russoz
|
maintainers: russoz
|
||||||
labels: module_helper
|
labels: module_helper
|
||||||
$module_utils/net_tools/nios/api.py:
|
|
||||||
maintainers: $team_networking sganesh-infoblox
|
|
||||||
labels: infoblox networking
|
|
||||||
$module_utils/oracle/oci_utils.py:
|
$module_utils/oracle/oci_utils.py:
|
||||||
maintainers: $team_oracle
|
maintainers: $team_oracle
|
||||||
labels: cloud
|
labels: cloud
|
||||||
@@ -320,6 +324,10 @@ files:
|
|||||||
$modules/cloud/misc/proxmox_kvm.py:
|
$modules/cloud/misc/proxmox_kvm.py:
|
||||||
maintainers: helldorado
|
maintainers: helldorado
|
||||||
ignore: skvidal
|
ignore: skvidal
|
||||||
|
$modules/cloud/misc/proxmox_nic.py:
|
||||||
|
maintainers: Kogelvis
|
||||||
|
$modules/cloud/misc/proxmox_tasks_info:
|
||||||
|
maintainers: paginabianca
|
||||||
$modules/cloud/misc/proxmox_template.py:
|
$modules/cloud/misc/proxmox_template.py:
|
||||||
maintainers: UnderGreen
|
maintainers: UnderGreen
|
||||||
ignore: skvidal
|
ignore: skvidal
|
||||||
@@ -339,7 +347,7 @@ files:
|
|||||||
$modules/cloud/oneandone/:
|
$modules/cloud/oneandone/:
|
||||||
maintainers: aajdinov edevenport
|
maintainers: aajdinov edevenport
|
||||||
$modules/cloud/online/:
|
$modules/cloud/online/:
|
||||||
maintainers: sieben
|
maintainers: remyleone
|
||||||
$modules/cloud/opennebula/:
|
$modules/cloud/opennebula/:
|
||||||
maintainers: $team_opennebula
|
maintainers: $team_opennebula
|
||||||
$modules/cloud/opennebula/one_host.py:
|
$modules/cloud/opennebula/one_host.py:
|
||||||
@@ -409,11 +417,11 @@ files:
|
|||||||
$modules/cloud/scaleway/scaleway_ip_info.py:
|
$modules/cloud/scaleway/scaleway_ip_info.py:
|
||||||
maintainers: Spredzy
|
maintainers: Spredzy
|
||||||
$modules/cloud/scaleway/scaleway_organization_info.py:
|
$modules/cloud/scaleway/scaleway_organization_info.py:
|
||||||
maintainers: sieben Spredzy
|
maintainers: Spredzy
|
||||||
$modules/cloud/scaleway/scaleway_security_group.py:
|
$modules/cloud/scaleway/scaleway_security_group.py:
|
||||||
maintainers: DenBeke
|
maintainers: DenBeke
|
||||||
$modules/cloud/scaleway/scaleway_security_group_info.py:
|
$modules/cloud/scaleway/scaleway_security_group_info.py:
|
||||||
maintainers: sieben Spredzy
|
maintainers: Spredzy
|
||||||
$modules/cloud/scaleway/scaleway_security_group_rule.py:
|
$modules/cloud/scaleway/scaleway_security_group_rule.py:
|
||||||
maintainers: DenBeke
|
maintainers: DenBeke
|
||||||
$modules/cloud/scaleway/scaleway_server_info.py:
|
$modules/cloud/scaleway/scaleway_server_info.py:
|
||||||
@@ -477,11 +485,16 @@ files:
|
|||||||
maintainers: paginabianca
|
maintainers: paginabianca
|
||||||
$modules/database/misc/redis_data.py:
|
$modules/database/misc/redis_data.py:
|
||||||
maintainers: paginabianca
|
maintainers: paginabianca
|
||||||
|
$modules/database/misc/redis_data_incr.py:
|
||||||
|
maintainers: paginabianca
|
||||||
$modules/database/misc/riak.py:
|
$modules/database/misc/riak.py:
|
||||||
maintainers: drewkerrigan jsmartin
|
maintainers: drewkerrigan jsmartin
|
||||||
$modules/database/mssql/mssql_db.py:
|
$modules/database/mssql/mssql_db.py:
|
||||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||||
labels: mssql_db
|
labels: mssql_db
|
||||||
|
$modules/database/mssql/mssql_script.py:
|
||||||
|
maintainers: kbudde
|
||||||
|
labels: mssql_script
|
||||||
$modules/database/saphana/hana_query.py:
|
$modules/database/saphana/hana_query.py:
|
||||||
maintainers: rainerleber
|
maintainers: rainerleber
|
||||||
$modules/database/vertica/:
|
$modules/database/vertica/:
|
||||||
@@ -527,6 +540,8 @@ files:
|
|||||||
maintainers: adamgoossens
|
maintainers: adamgoossens
|
||||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||||
maintainers: laurpaum
|
maintainers: laurpaum
|
||||||
|
$modules/identity/keycloak/keycloak_realm_info.py:
|
||||||
|
maintainers: fynncfchen
|
||||||
$modules/identity/keycloak/keycloak_realm.py:
|
$modules/identity/keycloak/keycloak_realm.py:
|
||||||
maintainers: kris2kris
|
maintainers: kris2kris
|
||||||
$modules/identity/keycloak/keycloak_role.py:
|
$modules/identity/keycloak/keycloak_role.py:
|
||||||
@@ -612,6 +627,8 @@ files:
|
|||||||
labels: cloudflare_dns
|
labels: cloudflare_dns
|
||||||
$modules/net_tools/dnsimple.py:
|
$modules/net_tools/dnsimple.py:
|
||||||
maintainers: drcapulet
|
maintainers: drcapulet
|
||||||
|
$modules/net_tools/dnsimple_info.py:
|
||||||
|
maintainers: edhilgendorf
|
||||||
$modules/net_tools/dnsmadeeasy.py:
|
$modules/net_tools/dnsmadeeasy.py:
|
||||||
maintainers: briceburg
|
maintainers: briceburg
|
||||||
$modules/net_tools/gandi_livedns.py:
|
$modules/net_tools/gandi_livedns.py:
|
||||||
@@ -647,31 +664,6 @@ files:
|
|||||||
maintainers: amasolov nerzhul
|
maintainers: amasolov nerzhul
|
||||||
$modules/net_tools/pritunl/:
|
$modules/net_tools/pritunl/:
|
||||||
maintainers: Lowess
|
maintainers: Lowess
|
||||||
$modules/net_tools/nios/:
|
|
||||||
maintainers: $team_networking
|
|
||||||
labels: infoblox networking
|
|
||||||
$modules/net_tools/nios/nios_a_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_aaaa_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_cname_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_fixed_address.py:
|
|
||||||
maintainers: sjaiswal
|
|
||||||
$modules/net_tools/nios/nios_member.py:
|
|
||||||
maintainers: krisvasudevan
|
|
||||||
$modules/net_tools/nios/nios_mx_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_naptr_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_nsgroup.py:
|
|
||||||
maintainers: ebirn sjaiswal
|
|
||||||
$modules/net_tools/nios/nios_ptr_record.py:
|
|
||||||
maintainers: clementtrebuchet
|
|
||||||
$modules/net_tools/nios/nios_srv_record.py:
|
|
||||||
maintainers: brampling
|
|
||||||
$modules/net_tools/nios/nios_txt_record.py:
|
|
||||||
maintainers: coreywan
|
|
||||||
$modules/net_tools/nmcli.py:
|
$modules/net_tools/nmcli.py:
|
||||||
maintainers: alcamie101
|
maintainers: alcamie101
|
||||||
$modules/net_tools/snmp_facts.py:
|
$modules/net_tools/snmp_facts.py:
|
||||||
@@ -738,6 +730,8 @@ files:
|
|||||||
maintainers: mwarkentin
|
maintainers: mwarkentin
|
||||||
$modules/packaging/language/bundler.py:
|
$modules/packaging/language/bundler.py:
|
||||||
maintainers: thoiberg
|
maintainers: thoiberg
|
||||||
|
$modules/packaging/language/cargo.py:
|
||||||
|
maintainers: radek-sprta
|
||||||
$modules/packaging/language/composer.py:
|
$modules/packaging/language/composer.py:
|
||||||
maintainers: dmtrs
|
maintainers: dmtrs
|
||||||
ignore: resmo
|
ignore: resmo
|
||||||
@@ -775,6 +769,8 @@ files:
|
|||||||
maintainers: evgkrsk
|
maintainers: evgkrsk
|
||||||
$modules/packaging/os/copr.py:
|
$modules/packaging/os/copr.py:
|
||||||
maintainers: schlupov
|
maintainers: schlupov
|
||||||
|
$modules/packaging/os/dnf_versionlock.py:
|
||||||
|
maintainers: moreda
|
||||||
$modules/packaging/os/flatpak.py:
|
$modules/packaging/os/flatpak.py:
|
||||||
maintainers: $team_flatpak
|
maintainers: $team_flatpak
|
||||||
$modules/packaging/os/flatpak_remote.py:
|
$modules/packaging/os/flatpak_remote.py:
|
||||||
@@ -871,6 +867,9 @@ files:
|
|||||||
$modules/packaging/os/snap.py:
|
$modules/packaging/os/snap.py:
|
||||||
maintainers: angristan vcarceler
|
maintainers: angristan vcarceler
|
||||||
labels: snap
|
labels: snap
|
||||||
|
$modules/packaging/os/snap_alias.py:
|
||||||
|
maintainers: russoz
|
||||||
|
labels: snap
|
||||||
$modules/packaging/os/sorcery.py:
|
$modules/packaging/os/sorcery.py:
|
||||||
maintainers: vaygr
|
maintainers: vaygr
|
||||||
$modules/packaging/os/svr4pkg.py:
|
$modules/packaging/os/svr4pkg.py:
|
||||||
@@ -914,6 +913,10 @@ files:
|
|||||||
$modules/remote_management/manageiq/:
|
$modules/remote_management/manageiq/:
|
||||||
labels: manageiq
|
labels: manageiq
|
||||||
maintainers: $team_manageiq
|
maintainers: $team_manageiq
|
||||||
|
$modules/remote_management/manageiq/manageiq_alert_profiles.py:
|
||||||
|
maintainers: elad661
|
||||||
|
$modules/remote_management/manageiq/manageiq_alerts.py:
|
||||||
|
maintainers: elad661
|
||||||
$modules/remote_management/manageiq/manageiq_group.py:
|
$modules/remote_management/manageiq/manageiq_group.py:
|
||||||
maintainers: evertmulder
|
maintainers: evertmulder
|
||||||
$modules/remote_management/manageiq/manageiq_tenant.py:
|
$modules/remote_management/manageiq/manageiq_tenant.py:
|
||||||
@@ -964,6 +967,8 @@ files:
|
|||||||
maintainers: SamyCoenen
|
maintainers: SamyCoenen
|
||||||
$modules/source_control/gitlab/gitlab_user.py:
|
$modules/source_control/gitlab/gitlab_user.py:
|
||||||
maintainers: LennertMertens stgrace
|
maintainers: LennertMertens stgrace
|
||||||
|
$modules/source_control/gitlab/gitlab_branch.py:
|
||||||
|
maintainers: paytroff
|
||||||
$modules/source_control/hg.py:
|
$modules/source_control/hg.py:
|
||||||
maintainers: yeukhon
|
maintainers: yeukhon
|
||||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||||
@@ -1099,6 +1104,8 @@ files:
|
|||||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||||
$modules/system/ssh_config.py:
|
$modules/system/ssh_config.py:
|
||||||
maintainers: gaqzi Akasurde
|
maintainers: gaqzi Akasurde
|
||||||
|
$modules/system/sudoers.py:
|
||||||
|
maintainers: JonEllis
|
||||||
$modules/system/svc.py:
|
$modules/system/svc.py:
|
||||||
maintainers: bcoca
|
maintainers: bcoca
|
||||||
$modules/system/syspatch.py:
|
$modules/system/syspatch.py:
|
||||||
@@ -1188,6 +1195,8 @@ files:
|
|||||||
maintainers: inetfuture mattupstate
|
maintainers: inetfuture mattupstate
|
||||||
$modules/web_infrastructure/taiga_issue.py:
|
$modules/web_infrastructure/taiga_issue.py:
|
||||||
maintainers: lekum
|
maintainers: lekum
|
||||||
|
$tests/a_module.py:
|
||||||
|
maintainers: felixfontein
|
||||||
#########################
|
#########################
|
||||||
tests/:
|
tests/:
|
||||||
labels: tests
|
labels: tests
|
||||||
@@ -1214,6 +1223,7 @@ macros:
|
|||||||
module_utils: plugins/module_utils
|
module_utils: plugins/module_utils
|
||||||
modules: plugins/modules
|
modules: plugins/modules
|
||||||
terminals: plugins/terminal
|
terminals: plugins/terminal
|
||||||
|
tests: plugins/test
|
||||||
team_ansible_core:
|
team_ansible_core:
|
||||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||||
@@ -1234,9 +1244,9 @@ macros:
|
|||||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||||
team_oracle: manojmeda mross22 nalsaber
|
team_oracle: manojmeda mross22 nalsaber
|
||||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
|
||||||
team_rhn: FlossWare alikins barnabycourt vritant
|
team_rhn: FlossWare alikins barnabycourt vritant
|
||||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
team_scaleway: remyleone abarbare
|
||||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||||
|
|||||||
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -62,6 +62,20 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Community.general Version
|
||||||
|
description: >-
|
||||||
|
Paste verbatim output from "ansible-galaxy collection list community.general"
|
||||||
|
between tripple backticks.
|
||||||
|
value: |
|
||||||
|
```console (paste below)
|
||||||
|
$ ansible-galaxy collection list community.general
|
||||||
|
|
||||||
|
```
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
|
|||||||
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -62,6 +62,20 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Community.general Version
|
||||||
|
description: >-
|
||||||
|
Paste verbatim output from "ansible-galaxy collection list community.general"
|
||||||
|
between tripple backticks.
|
||||||
|
value: |
|
||||||
|
```console (paste below)
|
||||||
|
$ ansible-galaxy collection list community.general
|
||||||
|
|
||||||
|
```
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,7 +21,7 @@ body:
|
|||||||
placeholder: >-
|
placeholder: >-
|
||||||
I am trying to do X with the collection from the main branch on GitHub and
|
I am trying to do X with the collection from the main branch on GitHub and
|
||||||
I think that implementing a feature Y would be very helpful for me and
|
I think that implementing a feature Y would be very helpful for me and
|
||||||
every other user of ansible-core because of Z.
|
every other user of community.general because of Z.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
|||||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
interval:
|
||||||
|
schedule: "weekly"
|
||||||
1690
CHANGELOG.rst
1690
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
12
README.md
12
README.md
@@ -1,6 +1,6 @@
|
|||||||
# Community General Collection
|
# Community General Collection
|
||||||
|
|
||||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||||
|
|
||||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||||
@@ -64,13 +64,13 @@ We are actively accepting new contributors.
|
|||||||
|
|
||||||
All types of contributions are very welcome.
|
All types of contributions are very welcome.
|
||||||
|
|
||||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)!
|
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md)!
|
||||||
|
|
||||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||||
|
|
||||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||||
|
|
||||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md).
|
||||||
|
|
||||||
### Running tests
|
### Running tests
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
|||||||
|
|
||||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||||
|
|
||||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md).
|
||||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||||
|
|
||||||
It is necessary for maintainers of this collection to be subscribed to:
|
It is necessary for maintainers of this collection to be subscribed to:
|
||||||
@@ -108,7 +108,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma
|
|||||||
|
|
||||||
## Release notes
|
## Release notes
|
||||||
|
|
||||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-4/CHANGELOG.rst).
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,3 +3,4 @@ sections:
|
|||||||
- title: Guides
|
- title: Guides
|
||||||
toctree:
|
toctree:
|
||||||
- filter_guide
|
- filter_guide
|
||||||
|
- test_guide
|
||||||
|
|||||||
@@ -297,6 +297,84 @@ This produces:
|
|||||||
|
|
||||||
.. versionadded: 2.0.0
|
.. versionadded: 2.0.0
|
||||||
|
|
||||||
|
Counting elements in a sequence
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
|
||||||
|
|
||||||
|
.. code-block:: yaml+jinja
|
||||||
|
|
||||||
|
- name: Count character occurrences in a string
|
||||||
|
debug:
|
||||||
|
msg: "{{ 'abccbaabca' | community.general.counter }}"
|
||||||
|
|
||||||
|
- name: Count items in a list
|
||||||
|
debug:
|
||||||
|
msg: "{{ ['car', 'car', 'bike', 'plane', 'bike'] | community.general.counter }}"
|
||||||
|
|
||||||
|
This produces:
|
||||||
|
|
||||||
|
.. code-block:: ansible-output
|
||||||
|
|
||||||
|
TASK [Count character occurrences in a string] ********************************************
|
||||||
|
ok: [localhost] => {
|
||||||
|
"msg": {
|
||||||
|
"a": 4,
|
||||||
|
"b": 3,
|
||||||
|
"c": 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TASK [Count items in a list] **************************************************************
|
||||||
|
ok: [localhost] => {
|
||||||
|
"msg": {
|
||||||
|
"bike": 2,
|
||||||
|
"car": 2,
|
||||||
|
"plane": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
This plugin is useful for selecting resources based on current allocation:
|
||||||
|
|
||||||
|
.. code-block:: yaml+jinja
|
||||||
|
|
||||||
|
- name: Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks
|
||||||
|
debug:
|
||||||
|
msg: >-
|
||||||
|
{{
|
||||||
|
( disks | dict2items | map(attribute='value.adapter') | list
|
||||||
|
| community.general.counter | dict2items
|
||||||
|
| rejectattr('value', '>=', 4) | sort(attribute='value') | first
|
||||||
|
).key
|
||||||
|
}}
|
||||||
|
vars:
|
||||||
|
disks:
|
||||||
|
sda:
|
||||||
|
adapter: scsi_1
|
||||||
|
sdb:
|
||||||
|
adapter: scsi_1
|
||||||
|
sdc:
|
||||||
|
adapter: scsi_1
|
||||||
|
sdd:
|
||||||
|
adapter: scsi_1
|
||||||
|
sde:
|
||||||
|
adapter: scsi_2
|
||||||
|
sdf:
|
||||||
|
adapter: scsi_3
|
||||||
|
sdg:
|
||||||
|
adapter: scsi_3
|
||||||
|
|
||||||
|
This produces:
|
||||||
|
|
||||||
|
.. code-block:: ansible-output
|
||||||
|
|
||||||
|
TASK [Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks]
|
||||||
|
ok: [localhost] => {
|
||||||
|
"msg": "scsi_2"
|
||||||
|
}
|
||||||
|
|
||||||
|
.. versionadded:: 4.3.0
|
||||||
|
|
||||||
Working with times
|
Working with times
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
|
|||||||
28
docs/docsite/rst/test_guide.rst
Normal file
28
docs/docsite/rst/test_guide.rst
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
.. _ansible_collections.community.general.docsite.test_guide:
|
||||||
|
|
||||||
|
community.general Test (Plugin) Guide
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
The :ref:`community.general collection <plugins_in_community.general>` offers currently one test plugin.
|
||||||
|
|
||||||
|
.. contents:: Topics
|
||||||
|
|
||||||
|
Feature Tests
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
|
||||||
|
|
||||||
|
.. code-block:: yaml+jinja
|
||||||
|
|
||||||
|
- name: Make sure that community.aws.route53 is available
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- >
|
||||||
|
'community.aws.route53' is community.general.a_module
|
||||||
|
|
||||||
|
- name: Make sure that community.general.does_not_exist is not a module or action plugin
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- "'community.general.does_not_exist' is not community.general.a_module"
|
||||||
|
|
||||||
|
.. versionadded:: 4.0.0
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
namespace: community
|
namespace: community
|
||||||
name: general
|
name: general
|
||||||
version: 3.8.2
|
version: 4.3.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- Ansible (https://github.com/ansible)
|
- Ansible (https://github.com/ansible)
|
||||||
|
|||||||
105
meta/runtime.yml
105
meta/runtime.yml
@@ -12,20 +12,11 @@ plugin_routing:
|
|||||||
hashi_vault:
|
hashi_vault:
|
||||||
redirect: community.hashi_vault.hashi_vault
|
redirect: community.hashi_vault.hashi_vault
|
||||||
nios:
|
nios:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_lookup
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios lookup plugin has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_lookup instead.
|
|
||||||
nios_next_ip:
|
nios_next_ip:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_next_ip
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_next_ip lookup plugin has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_next_ip instead.
|
|
||||||
nios_next_network:
|
nios_next_network:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_next_network
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_next_network lookup plugin has been
|
|
||||||
deprecated. Please use infoblox.nios_modules.nios_next_network instead.
|
|
||||||
modules:
|
modules:
|
||||||
ali_instance_facts:
|
ali_instance_facts:
|
||||||
tombstone:
|
tombstone:
|
||||||
@@ -266,85 +257,37 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: Use community.general.nginx_status_info instead.
|
warning_text: Use community.general.nginx_status_info instead.
|
||||||
nios_a_record:
|
nios_a_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_a_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_a_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_a_record instead.
|
|
||||||
nios_aaaa_record:
|
nios_aaaa_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_aaaa_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_aaaa_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_aaaa_record instead.
|
|
||||||
nios_cname_record:
|
nios_cname_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_cname_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_cname_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_cname_record instead.
|
|
||||||
nios_dns_view:
|
nios_dns_view:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_dns_view
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_dns_view module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_dns_view instead.
|
|
||||||
nios_fixed_address:
|
nios_fixed_address:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_fixed_address
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_fixed_address module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_fixed_address instead.
|
|
||||||
nios_host_record:
|
nios_host_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_host_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_host_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_host_record instead.
|
|
||||||
nios_member:
|
nios_member:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_member
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_member module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_member instead.
|
|
||||||
nios_mx_record:
|
nios_mx_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_mx_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_mx_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_mx_record instead.
|
|
||||||
nios_naptr_record:
|
nios_naptr_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_naptr_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_naptr_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_naptr_record instead.
|
|
||||||
nios_network:
|
nios_network:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_network
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_network module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_network instead.
|
|
||||||
nios_network_view:
|
nios_network_view:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_network_view
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_network_view module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_network_view instead.
|
|
||||||
nios_nsgroup:
|
nios_nsgroup:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_nsgroup
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_nsgroup module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_nsgroup instead.
|
|
||||||
nios_ptr_record:
|
nios_ptr_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_ptr_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_ptr_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_ptr_record instead.
|
|
||||||
nios_srv_record:
|
nios_srv_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_srv_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_srv_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_srv_record instead.
|
|
||||||
nios_txt_record:
|
nios_txt_record:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_txt_record
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_txt_record module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_txt_record instead.
|
|
||||||
nios_zone:
|
nios_zone:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios_zone
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios_zone module has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios_zone instead.
|
|
||||||
ome_device_info:
|
ome_device_info:
|
||||||
redirect: dellemc.openmanage.ome_device_info
|
redirect: dellemc.openmanage.ome_device_info
|
||||||
one_image_facts:
|
one_image_facts:
|
||||||
@@ -628,10 +571,7 @@ plugin_routing:
|
|||||||
kubevirt_vm_options:
|
kubevirt_vm_options:
|
||||||
redirect: community.kubevirt.kubevirt_vm_options
|
redirect: community.kubevirt.kubevirt_vm_options
|
||||||
nios:
|
nios:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.nios
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.nios document fragment has been deprecated.
|
|
||||||
Please use infoblox.nios_modules.nios instead.
|
|
||||||
postgresql:
|
postgresql:
|
||||||
redirect: community.postgresql.postgresql
|
redirect: community.postgresql.postgresql
|
||||||
module_utils:
|
module_utils:
|
||||||
@@ -650,10 +590,7 @@ plugin_routing:
|
|||||||
kubevirt:
|
kubevirt:
|
||||||
redirect: community.kubevirt.kubevirt
|
redirect: community.kubevirt.kubevirt
|
||||||
net_tools.nios.api:
|
net_tools.nios.api:
|
||||||
deprecation:
|
redirect: infoblox.nios_modules.api
|
||||||
removal_version: 5.0.0
|
|
||||||
warning_text: The community.general.net_tools.nios.api module_utils has been
|
|
||||||
deprecated. Please use infoblox.nios_modules.api instead.
|
|
||||||
postgresql:
|
postgresql:
|
||||||
redirect: community.postgresql.postgresql
|
redirect: community.postgresql.postgresql
|
||||||
remote_management.dellemc.dellemc_idrac:
|
remote_management.dellemc.dellemc_idrac:
|
||||||
|
|||||||
@@ -226,18 +226,15 @@ class ElasticSource(object):
|
|||||||
|
|
||||||
message = "success"
|
message = "success"
|
||||||
status = "success"
|
status = "success"
|
||||||
|
enriched_error_message = None
|
||||||
if host_data.status == 'included':
|
if host_data.status == 'included':
|
||||||
rc = 0
|
rc = 0
|
||||||
else:
|
else:
|
||||||
res = host_data.result._result
|
res = host_data.result._result
|
||||||
rc = res.get('rc', 0)
|
rc = res.get('rc', 0)
|
||||||
if host_data.status == 'failed':
|
if host_data.status == 'failed':
|
||||||
if res.get('exception') is not None:
|
message = self.get_error_message(res)
|
||||||
message = res['exception'].strip().split('\n')[-1]
|
enriched_error_message = self.enrich_error_message(res)
|
||||||
elif 'msg' in res:
|
|
||||||
message = res['msg']
|
|
||||||
else:
|
|
||||||
message = 'failed'
|
|
||||||
status = "failure"
|
status = "failure"
|
||||||
elif host_data.status == 'skipped':
|
elif host_data.status == 'skipped':
|
||||||
if 'skip_reason' in res:
|
if 'skip_reason' in res:
|
||||||
@@ -259,7 +256,7 @@ class ElasticSource(object):
|
|||||||
"ansible.task.host.status": host_data.status}) as span:
|
"ansible.task.host.status": host_data.status}) as span:
|
||||||
span.outcome = status
|
span.outcome = status
|
||||||
if 'failure' in status:
|
if 'failure' in status:
|
||||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
|
||||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||||
|
|
||||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||||
@@ -272,6 +269,24 @@ class ElasticSource(object):
|
|||||||
use_elastic_traceparent_header=True,
|
use_elastic_traceparent_header=True,
|
||||||
debug=True)
|
debug=True)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_error_message(result):
|
||||||
|
if result.get('exception') is not None:
|
||||||
|
return ElasticSource._last_line(result['exception'])
|
||||||
|
return result.get('msg', 'failed')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _last_line(text):
|
||||||
|
lines = text.strip().split('\n')
|
||||||
|
return lines[-1]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def enrich_error_message(result):
|
||||||
|
message = result.get('msg', 'failed')
|
||||||
|
exception = result.get('exception')
|
||||||
|
stderr = result.get('stderr')
|
||||||
|
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
class CallbackModule(CallbackBase):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -70,6 +70,7 @@ import os
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||||
|
from ansible.module_utils.common.text.converters import to_bytes
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
@@ -143,7 +144,7 @@ class CallbackModule(CallbackBase):
|
|||||||
body = {
|
body = {
|
||||||
'cmd': 'submitcheck',
|
'cmd': 'submitcheck',
|
||||||
'token': self.token,
|
'token': self.token,
|
||||||
'XMLDATA': bytes(xmldata)
|
'XMLDATA': to_bytes(xmldata)
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -79,6 +80,7 @@ from os.path import basename
|
|||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.six import raise_from
|
from ansible.module_utils.six import raise_from
|
||||||
|
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -90,8 +92,6 @@ try:
|
|||||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||||
from opentelemetry.sdk.trace import TracerProvider
|
from opentelemetry.sdk.trace import TracerProvider
|
||||||
from opentelemetry.sdk.trace.export import (
|
from opentelemetry.sdk.trace.export import (
|
||||||
ConsoleSpanExporter,
|
|
||||||
SimpleSpanProcessor,
|
|
||||||
BatchSpanProcessor
|
BatchSpanProcessor
|
||||||
)
|
)
|
||||||
from opentelemetry.util._time import _time_ns
|
from opentelemetry.util._time import _time_ns
|
||||||
@@ -179,7 +179,7 @@ class OpenTelemetrySource(object):
|
|||||||
args = None
|
args = None
|
||||||
|
|
||||||
if not task.no_log and not hide_task_arguments:
|
if not task.no_log and not hide_task_arguments:
|
||||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
args = task.args
|
||||||
|
|
||||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||||
|
|
||||||
@@ -246,32 +246,45 @@ class OpenTelemetrySource(object):
|
|||||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||||
|
|
||||||
message = 'success'
|
message = 'success'
|
||||||
|
res = {}
|
||||||
|
rc = 0
|
||||||
status = Status(status_code=StatusCode.OK)
|
status = Status(status_code=StatusCode.OK)
|
||||||
if host_data.status == 'included':
|
if host_data.status != 'included':
|
||||||
rc = 0
|
# Support loops
|
||||||
else:
|
if 'results' in host_data.result._result:
|
||||||
res = host_data.result._result
|
if host_data.status == 'failed':
|
||||||
rc = res.get('rc', 0)
|
message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action)
|
||||||
if host_data.status == 'failed':
|
enriched_error_message = self.enrich_error_message_from_results(host_data.result._result['results'], task_data.action)
|
||||||
|
else:
|
||||||
|
res = host_data.result._result
|
||||||
|
rc = res.get('rc', 0)
|
||||||
message = self.get_error_message(res)
|
message = self.get_error_message(res)
|
||||||
|
enriched_error_message = self.enrich_error_message(res)
|
||||||
|
|
||||||
|
if host_data.status == 'failed':
|
||||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||||
# Record an exception with the task message
|
# Record an exception with the task message
|
||||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
span.record_exception(BaseException(enriched_error_message))
|
||||||
elif host_data.status == 'skipped':
|
elif host_data.status == 'skipped':
|
||||||
if 'skip_reason' in res:
|
message = res['skip_reason'] if 'skip_reason' in res else 'skipped'
|
||||||
message = res['skip_reason']
|
status = Status(status_code=StatusCode.UNSET)
|
||||||
else:
|
elif host_data.status == 'ignored':
|
||||||
message = 'skipped'
|
|
||||||
status = Status(status_code=StatusCode.UNSET)
|
status = Status(status_code=StatusCode.UNSET)
|
||||||
|
|
||||||
span.set_status(status)
|
span.set_status(status)
|
||||||
self.set_span_attribute(span, "ansible.task.args", task_data.args)
|
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
|
||||||
|
names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
|
||||||
|
values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
|
||||||
|
self.set_span_attribute(span, ("ansible.task.args.name"), names)
|
||||||
|
self.set_span_attribute(span, ("ansible.task.args.value"), values)
|
||||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
||||||
self.set_span_attribute(span, "ansible.task.message", message)
|
self.set_span_attribute(span, "ansible.task.message", message)
|
||||||
self.set_span_attribute(span, "ansible.task.name", name)
|
self.set_span_attribute(span, "ansible.task.name", name)
|
||||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
self.set_span_attribute(span, "ansible.task.result", rc)
|
||||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
||||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||||
|
# This will allow to enrich the service map
|
||||||
|
self.add_attributes_for_service_map_if_possible(span, task_data)
|
||||||
span.end(end_time=host_data.finish)
|
span.end(end_time=host_data.finish)
|
||||||
|
|
||||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||||
@@ -283,12 +296,64 @@ class OpenTelemetrySource(object):
|
|||||||
if attributeValue is not None:
|
if attributeValue is not None:
|
||||||
span.set_attribute(attributeName, attributeValue)
|
span.set_attribute(attributeName, attributeValue)
|
||||||
|
|
||||||
|
def add_attributes_for_service_map_if_possible(self, span, task_data):
|
||||||
|
"""Update the span attributes with the service that the task interacted with, if possible."""
|
||||||
|
|
||||||
|
redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
|
||||||
|
if redacted_url:
|
||||||
|
self.set_span_attribute(span, "http.url", redacted_url.geturl())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_and_redact_url_if_possible(args):
|
||||||
|
"""Parse and redact the url, if possible."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
parsed_url = urlparse(OpenTelemetrySource.url_from_args(args))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if OpenTelemetrySource.is_valid_url(parsed_url):
|
||||||
|
return OpenTelemetrySource.redact_user_password(parsed_url)
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def url_from_args(args):
|
||||||
|
# the order matters
|
||||||
|
url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url")
|
||||||
|
for arg in url_args:
|
||||||
|
if args.get(arg):
|
||||||
|
return args.get(arg)
|
||||||
|
return ""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def redact_user_password(url):
|
||||||
|
return url._replace(netloc=url.hostname) if url.password else url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_valid_url(url):
|
||||||
|
if all([url.scheme, url.netloc, url.hostname]):
|
||||||
|
return "{{" not in url.hostname
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def transform_ansible_unicode_to_str(value):
|
||||||
|
parsed_url = urlparse(str(value))
|
||||||
|
if OpenTelemetrySource.is_valid_url(parsed_url):
|
||||||
|
return OpenTelemetrySource.redact_user_password(parsed_url).geturl()
|
||||||
|
return str(value)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_error_message(result):
|
def get_error_message(result):
|
||||||
if result.get('exception') is not None:
|
if result.get('exception') is not None:
|
||||||
return OpenTelemetrySource._last_line(result['exception'])
|
return OpenTelemetrySource._last_line(result['exception'])
|
||||||
return result.get('msg', 'failed')
|
return result.get('msg', 'failed')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_error_message_from_results(results, action):
|
||||||
|
for result in results:
|
||||||
|
if result.get('failed', False):
|
||||||
|
return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _last_line(text):
|
def _last_line(text):
|
||||||
lines = text.strip().split('\n')
|
lines = text.strip().split('\n')
|
||||||
@@ -301,6 +366,14 @@ class OpenTelemetrySource(object):
|
|||||||
stderr = result.get('stderr')
|
stderr = result.get('stderr')
|
||||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def enrich_error_message_from_results(results, action):
|
||||||
|
message = ""
|
||||||
|
for result in results:
|
||||||
|
if result.get('failed', False):
|
||||||
|
message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
class CallbackModule(CallbackBase):
|
||||||
"""
|
"""
|
||||||
@@ -392,10 +465,15 @@ class CallbackModule(CallbackBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||||
self.errors += 1
|
if ignore_errors:
|
||||||
|
status = 'ignored'
|
||||||
|
else:
|
||||||
|
status = 'failed'
|
||||||
|
self.errors += 1
|
||||||
|
|
||||||
self.opentelemetry.finish_task(
|
self.opentelemetry.finish_task(
|
||||||
self.tasks_data,
|
self.tasks_data,
|
||||||
'failed',
|
status,
|
||||||
result
|
result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -21,11 +21,11 @@ DOCUMENTATION = '''
|
|||||||
- In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
- In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import distutils.spawn
|
|
||||||
import platform
|
import platform
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
|
|
||||||
@@ -47,21 +47,24 @@ class CallbackModule(CallbackBase):
|
|||||||
self.HAPPY_VOICE = None
|
self.HAPPY_VOICE = None
|
||||||
self.LASER_VOICE = None
|
self.LASER_VOICE = None
|
||||||
|
|
||||||
self.synthesizer = distutils.spawn.find_executable('say')
|
try:
|
||||||
if not self.synthesizer:
|
self.synthesizer = get_bin_path('say')
|
||||||
self.synthesizer = distutils.spawn.find_executable('espeak')
|
if platform.system() != 'Darwin':
|
||||||
if self.synthesizer:
|
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
||||||
|
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
||||||
|
else:
|
||||||
|
self.FAILED_VOICE = 'Zarvox'
|
||||||
|
self.REGULAR_VOICE = 'Trinoids'
|
||||||
|
self.HAPPY_VOICE = 'Cellos'
|
||||||
|
self.LASER_VOICE = 'Princess'
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
self.synthesizer = get_bin_path('espeak')
|
||||||
self.FAILED_VOICE = 'klatt'
|
self.FAILED_VOICE = 'klatt'
|
||||||
self.HAPPY_VOICE = 'f5'
|
self.HAPPY_VOICE = 'f5'
|
||||||
self.LASER_VOICE = 'whisper'
|
self.LASER_VOICE = 'whisper'
|
||||||
elif platform.system() != 'Darwin':
|
except ValueError:
|
||||||
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
self.synthesizer = None
|
||||||
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
|
||||||
else:
|
|
||||||
self.FAILED_VOICE = 'Zarvox'
|
|
||||||
self.REGULAR_VOICE = 'Trinoids'
|
|
||||||
self.HAPPY_VOICE = 'Cellos'
|
|
||||||
self.LASER_VOICE = 'Princess'
|
|
||||||
|
|
||||||
# plugin disable itself if say is not present
|
# plugin disable itself if say is not present
|
||||||
# ansible will not call any callback if disabled is set to True
|
# ansible will not call any callback if disabled is set to True
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ DOCUMENTATION = '''
|
|||||||
- name: ansible_jail_user
|
- name: ansible_jail_user
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import distutils.spawn
|
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -39,6 +38,7 @@ import traceback
|
|||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
@@ -75,10 +75,10 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _search_executable(executable):
|
def _search_executable(executable):
|
||||||
cmd = distutils.spawn.find_executable(executable)
|
try:
|
||||||
if not cmd:
|
return get_bin_path(executable)
|
||||||
|
except ValueError:
|
||||||
raise AnsibleError("%s command not found in PATH" % executable)
|
raise AnsibleError("%s command not found in PATH" % executable)
|
||||||
return cmd
|
|
||||||
|
|
||||||
def list_jails(self):
|
def list_jails(self):
|
||||||
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
||||||
|
|||||||
@@ -43,10 +43,10 @@ DOCUMENTATION = '''
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from distutils.spawn import find_executable
|
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||||
from ansible.plugins.connection import ConnectionBase
|
from ansible.plugins.connection import ConnectionBase
|
||||||
|
|
||||||
@@ -62,9 +62,9 @@ class Connection(ConnectionBase):
|
|||||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||||
|
|
||||||
self._host = self._play_context.remote_addr
|
self._host = self._play_context.remote_addr
|
||||||
self._lxc_cmd = find_executable("lxc")
|
try:
|
||||||
|
self._lxc_cmd = get_bin_path("lxc")
|
||||||
if not self._lxc_cmd:
|
except ValueError:
|
||||||
raise AnsibleError("lxc command not found in PATH")
|
raise AnsibleError("lxc command not found in PATH")
|
||||||
|
|
||||||
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
|
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
|
||||||
@@ -89,9 +89,9 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend(["--project", self.get_option("project")])
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"exec",
|
"exec",
|
||||||
"%s:%s" % (self.get_option("remote"), self._host),
|
"%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
|
||||||
"--",
|
"--",
|
||||||
self._play_context.executable, "-c", cmd
|
self.get_option("executable"), "-c", cmd
|
||||||
])
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
@@ -126,7 +126,7 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"file", "push",
|
"file", "push",
|
||||||
in_path,
|
in_path,
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
|
||||||
])
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
@@ -145,7 +145,7 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend(["--project", self.get_option("project")])
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"file", "pull",
|
"file", "pull",
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
|
||||||
out_path
|
out_path
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ DOCUMENTATION = '''
|
|||||||
- name: ansible_zone_host
|
- name: ansible_zone_host
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import distutils.spawn
|
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -34,6 +33,7 @@ import traceback
|
|||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils.common.text.converters import to_bytes
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
@@ -64,10 +64,10 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _search_executable(executable):
|
def _search_executable(executable):
|
||||||
cmd = distutils.spawn.find_executable(executable)
|
try:
|
||||||
if not cmd:
|
return get_bin_path(executable)
|
||||||
|
except ValueError:
|
||||||
raise AnsibleError("%s command not found in PATH" % executable)
|
raise AnsibleError("%s command not found in PATH" % executable)
|
||||||
return cmd
|
|
||||||
|
|
||||||
def list_zones(self):
|
def list_zones(self):
|
||||||
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
|
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
|
||||||
|
|||||||
@@ -1,138 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
- See respective platform section for more details
|
|
||||||
requirements:
|
|
||||||
- See respective platform section for more details
|
|
||||||
notes:
|
|
||||||
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Documentation fragment for ONTAP (na_cdot)
|
|
||||||
ONTAP = r'''
|
|
||||||
options:
|
|
||||||
hostname:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- The hostname or IP address of the ONTAP instance.
|
|
||||||
username:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
|
||||||
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
|
||||||
aliases: ['user']
|
|
||||||
password:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- Password for the specified user.
|
|
||||||
aliases: ['pass']
|
|
||||||
requirements:
|
|
||||||
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
|
|
||||||
- Ansible 2.2
|
|
||||||
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Documentation fragment for SolidFire
|
|
||||||
SOLIDFIRE = r'''
|
|
||||||
options:
|
|
||||||
hostname:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- The hostname or IP address of the SolidFire cluster.
|
|
||||||
username:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation
|
|
||||||
U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
|
|
||||||
aliases: ['user']
|
|
||||||
password:
|
|
||||||
required: true
|
|
||||||
description:
|
|
||||||
- Password for the specified user.
|
|
||||||
aliases: ['pass']
|
|
||||||
|
|
||||||
requirements:
|
|
||||||
- The modules were developed with SolidFire 10.1
|
|
||||||
- solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Documentation fragment for ONTAP (na_ontap)
|
|
||||||
NA_ONTAP = r'''
|
|
||||||
options:
|
|
||||||
hostname:
|
|
||||||
description:
|
|
||||||
- The hostname or IP address of the ONTAP instance.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
|
||||||
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
aliases: [ user ]
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Password for the specified user.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
aliases: [ pass ]
|
|
||||||
https:
|
|
||||||
description:
|
|
||||||
- Enable and disable https
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- If set to C(no), the SSL certificates will not be validated.
|
|
||||||
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
http_port:
|
|
||||||
description:
|
|
||||||
- Override the default port (80 or 443) with this port
|
|
||||||
type: int
|
|
||||||
ontapi:
|
|
||||||
description:
|
|
||||||
- The ontap api version to use
|
|
||||||
type: int
|
|
||||||
use_rest:
|
|
||||||
description:
|
|
||||||
- REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
|
|
||||||
- Always -- will always use the REST API
|
|
||||||
- Never -- will always use the ZAPI
|
|
||||||
- Auto -- will try to use the REST Api
|
|
||||||
default: Auto
|
|
||||||
choices: ['Never', 'Always', 'Auto']
|
|
||||||
type: str
|
|
||||||
|
|
||||||
|
|
||||||
requirements:
|
|
||||||
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
|
|
||||||
- Ansible 2.6
|
|
||||||
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
|
|
||||||
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
|
|
||||||
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
|
|
||||||
|
|
||||||
'''
|
|
||||||
41
plugins/doc_fragments/bitbucket.py
Normal file
41
plugins/doc_fragments/bitbucket.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Standard documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
client_id:
|
||||||
|
description:
|
||||||
|
- The OAuth consumer key.
|
||||||
|
- If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
|
||||||
|
type: str
|
||||||
|
client_secret:
|
||||||
|
description:
|
||||||
|
- The OAuth consumer secret.
|
||||||
|
- If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
|
||||||
|
type: str
|
||||||
|
user:
|
||||||
|
description:
|
||||||
|
- The username.
|
||||||
|
- If not set the environment variable C(BITBUCKET_USERNAME) will be used.
|
||||||
|
type: str
|
||||||
|
version_added: 4.0.0
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- The App password.
|
||||||
|
- If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
|
||||||
|
type: str
|
||||||
|
version_added: 4.0.0
|
||||||
|
notes:
|
||||||
|
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
|
||||||
|
- Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
|
||||||
|
- If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
|
||||||
|
'''
|
||||||
31
plugins/doc_fragments/gitlab.py
Normal file
31
plugins/doc_fragments/gitlab.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Standard files documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
requirements:
|
||||||
|
- requests (Python library U(https://pypi.org/project/requests/))
|
||||||
|
|
||||||
|
options:
|
||||||
|
api_token:
|
||||||
|
description:
|
||||||
|
- GitLab access token with API permissions.
|
||||||
|
type: str
|
||||||
|
api_oauth_token:
|
||||||
|
description:
|
||||||
|
- GitLab OAuth token for logging in.
|
||||||
|
type: str
|
||||||
|
version_added: 4.2.0
|
||||||
|
api_job_token:
|
||||||
|
description:
|
||||||
|
- GitLab CI job token for logging in.
|
||||||
|
type: str
|
||||||
|
version_added: 4.2.0
|
||||||
|
'''
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
instance of NIOS WAPI over REST
|
|
||||||
- Value can also be specified using C(INFOBLOX_HOST) environment
|
|
||||||
variable.
|
|
||||||
type: str
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote instance of NIOS.
|
|
||||||
- Value can also be specified using C(INFOBLOX_USERNAME) environment
|
|
||||||
variable.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote instance of NIOS.
|
|
||||||
- Value can also be specified using C(INFOBLOX_PASSWORD) environment
|
|
||||||
variable.
|
|
||||||
type: str
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Boolean value to enable or disable verifying SSL certificates
|
|
||||||
- Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
|
|
||||||
variable.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
aliases: [ ssl_verify ]
|
|
||||||
http_request_timeout:
|
|
||||||
description:
|
|
||||||
- The amount of time before to wait before receiving a response
|
|
||||||
- Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
|
|
||||||
variable.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
max_retries:
|
|
||||||
description:
|
|
||||||
- Configures the number of attempted retries before the connection
|
|
||||||
is declared usable
|
|
||||||
- Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
|
|
||||||
variable.
|
|
||||||
type: int
|
|
||||||
default: 3
|
|
||||||
wapi_version:
|
|
||||||
description:
|
|
||||||
- Specifies the version of WAPI to use
|
|
||||||
- Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
|
|
||||||
variable.
|
|
||||||
- Until ansible 2.8 the default WAPI was 1.4
|
|
||||||
type: str
|
|
||||||
default: '2.1'
|
|
||||||
max_results:
|
|
||||||
description:
|
|
||||||
- Specifies the maximum number of objects to be returned,
|
|
||||||
if set to a negative number the appliance will return an error when the
|
|
||||||
number of returned objects would exceed the setting.
|
|
||||||
- Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
|
|
||||||
variable.
|
|
||||||
type: int
|
|
||||||
default: 1000
|
|
||||||
http_pool_connections:
|
|
||||||
description:
|
|
||||||
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
http_pool_maxsize:
|
|
||||||
description:
|
|
||||||
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
silent_ssl_warnings:
|
|
||||||
description:
|
|
||||||
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
notes:
|
|
||||||
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
|
||||||
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
|
||||||
|
|
||||||
'''
|
|
||||||
36
plugins/filter/counter.py
Normal file
36
plugins/filter/counter.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2021, Remy Keil <remy.keil@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
from ansible.module_utils.common._collections_compat import Sequence
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
|
|
||||||
|
def counter(sequence):
|
||||||
|
''' Count elements in a sequence. Returns dict with count result. '''
|
||||||
|
if not isinstance(sequence, Sequence):
|
||||||
|
raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
|
||||||
|
(sequence, type(sequence)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = dict(Counter(sequence))
|
||||||
|
except TypeError as e:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
"community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
''' Ansible counter jinja2 filters '''
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
filters = {
|
||||||
|
'counter': counter,
|
||||||
|
}
|
||||||
|
|
||||||
|
return filters
|
||||||
@@ -5,7 +5,7 @@
|
|||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
|
|
||||||
def version_sort(value, reverse=False):
|
def version_sort(value, reverse=False):
|
||||||
|
|||||||
@@ -68,7 +68,6 @@ user: ansible-tester
|
|||||||
password: secure
|
password: secure
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
|
|||||||
@@ -35,13 +35,23 @@ DOCUMENTATION = '''
|
|||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
host_filter:
|
host_filter:
|
||||||
description: An Icinga2 API valid host filter.
|
description:
|
||||||
|
- An Icinga2 API valid host filter. Leave blank for no filtering
|
||||||
type: string
|
type: string
|
||||||
required: false
|
required: false
|
||||||
validate_certs:
|
validate_certs:
|
||||||
description: Enables or disables SSL certificate verification.
|
description: Enables or disables SSL certificate verification.
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
inventory_attr:
|
||||||
|
description:
|
||||||
|
- Allows the override of the inventory name based on different attributes.
|
||||||
|
- This allows for changing the way limits are used.
|
||||||
|
- The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
|
||||||
|
type: string
|
||||||
|
default: address
|
||||||
|
choices: ['name', 'display_name', 'address']
|
||||||
|
version_added: 4.2.0
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r'''
|
||||||
@@ -52,6 +62,7 @@ user: ansible
|
|||||||
password: secure
|
password: secure
|
||||||
host_filter: \"linux-servers\" in host.groups
|
host_filter: \"linux-servers\" in host.groups
|
||||||
validate_certs: false
|
validate_certs: false
|
||||||
|
inventory_attr: name
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -59,6 +70,7 @@ import json
|
|||||||
from ansible.errors import AnsibleParserError
|
from ansible.errors import AnsibleParserError
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
|
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||||
@@ -76,6 +88,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
self.icinga2_password = None
|
self.icinga2_password = None
|
||||||
self.ssl_verify = None
|
self.ssl_verify = None
|
||||||
self.host_filter = None
|
self.host_filter = None
|
||||||
|
self.inventory_attr = None
|
||||||
|
|
||||||
self.cache_key = None
|
self.cache_key = None
|
||||||
self.use_cache = None
|
self.use_cache = None
|
||||||
@@ -114,9 +127,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
if data is not None:
|
if data is not None:
|
||||||
request_args['data'] = json.dumps(data)
|
request_args['data'] = json.dumps(data)
|
||||||
self.display.vvv("Request Args: %s" % request_args)
|
self.display.vvv("Request Args: %s" % request_args)
|
||||||
response = open_url(request_url, **request_args)
|
try:
|
||||||
|
response = open_url(request_url, **request_args)
|
||||||
|
except HTTPError as e:
|
||||||
|
try:
|
||||||
|
error_body = json.loads(e.read().decode())
|
||||||
|
self.display.vvv("Error returned: {0}".format(error_body))
|
||||||
|
except Exception:
|
||||||
|
error_body = {"status": None}
|
||||||
|
if e.code == 404 and error_body.get('status') == "No objects found.":
|
||||||
|
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
|
||||||
|
raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
|
||||||
|
|
||||||
response_body = response.read()
|
response_body = response.read()
|
||||||
json_data = json.loads(response_body.decode('utf-8'))
|
json_data = json.loads(response_body.decode('utf-8'))
|
||||||
|
self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
|
||||||
if 200 <= response.status <= 299:
|
if 200 <= response.status <= 299:
|
||||||
return json_data
|
return json_data
|
||||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
if response.status == 404 and json_data['status'] == "No objects found.":
|
||||||
@@ -155,7 +180,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
"""Query for all hosts """
|
"""Query for all hosts """
|
||||||
self.display.vvv("Querying Icinga2 for inventory")
|
self.display.vvv("Querying Icinga2 for inventory")
|
||||||
query_args = {
|
query_args = {
|
||||||
"attrs": ["address", "state_type", "state", "groups"],
|
"attrs": ["address", "display_name", "state_type", "state", "groups"],
|
||||||
}
|
}
|
||||||
if self.host_filter is not None:
|
if self.host_filter is not None:
|
||||||
query_args['host_filter'] = self.host_filter
|
query_args['host_filter'] = self.host_filter
|
||||||
@@ -177,24 +202,35 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
"""Convert Icinga2 API data to JSON format for Ansible"""
|
||||||
groups_dict = {"_meta": {"hostvars": {}}}
|
groups_dict = {"_meta": {"hostvars": {}}}
|
||||||
for entry in json_data:
|
for entry in json_data:
|
||||||
host_name = entry['name']
|
|
||||||
host_attrs = entry['attrs']
|
host_attrs = entry['attrs']
|
||||||
|
if self.inventory_attr == "name":
|
||||||
|
host_name = entry.get('name')
|
||||||
|
if self.inventory_attr == "address":
|
||||||
|
# When looking for address for inventory, if missing fallback to object name
|
||||||
|
if host_attrs.get('address', '') != '':
|
||||||
|
host_name = host_attrs.get('address')
|
||||||
|
else:
|
||||||
|
host_name = entry.get('name')
|
||||||
|
if self.inventory_attr == "display_name":
|
||||||
|
host_name = host_attrs.get('display_name')
|
||||||
if host_attrs['state'] == 0:
|
if host_attrs['state'] == 0:
|
||||||
host_attrs['state'] = 'on'
|
host_attrs['state'] = 'on'
|
||||||
else:
|
else:
|
||||||
host_attrs['state'] = 'off'
|
host_attrs['state'] = 'off'
|
||||||
host_groups = host_attrs['groups']
|
host_groups = host_attrs.get('groups')
|
||||||
host_addr = host_attrs['address']
|
self.inventory.add_host(host_name)
|
||||||
self.inventory.add_host(host_addr)
|
|
||||||
for group in host_groups:
|
for group in host_groups:
|
||||||
if group not in self.inventory.groups.keys():
|
if group not in self.inventory.groups.keys():
|
||||||
self.inventory.add_group(group)
|
self.inventory.add_group(group)
|
||||||
self.inventory.add_child(group, host_addr)
|
self.inventory.add_child(group, host_name)
|
||||||
self.inventory.set_variable(host_addr, 'address', host_addr)
|
# If the address attribute is populated, override ansible_host with the value
|
||||||
self.inventory.set_variable(host_addr, 'hostname', host_name)
|
if host_attrs.get('address') != '':
|
||||||
self.inventory.set_variable(host_addr, 'state',
|
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
|
||||||
|
self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
|
||||||
|
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
|
||||||
|
self.inventory.set_variable(host_name, 'state',
|
||||||
host_attrs['state'])
|
host_attrs['state'])
|
||||||
self.inventory.set_variable(host_addr, 'state_type',
|
self.inventory.set_variable(host_name, 'state_type',
|
||||||
host_attrs['state_type'])
|
host_attrs['state_type'])
|
||||||
return groups_dict
|
return groups_dict
|
||||||
|
|
||||||
@@ -211,6 +247,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
self.icinga2_password = self.get_option('password')
|
self.icinga2_password = self.get_option('password')
|
||||||
self.ssl_verify = self.get_option('validate_certs')
|
self.ssl_verify = self.get_option('validate_certs')
|
||||||
self.host_filter = self.get_option('host_filter')
|
self.host_filter = self.get_option('host_filter')
|
||||||
|
self.inventory_attr = self.get_option('inventory_attr')
|
||||||
# Not currently enabled
|
# Not currently enabled
|
||||||
# self.cache_key = self.get_cache_key(path)
|
# self.cache_key = self.get_cache_key(path)
|
||||||
# self.use_cache = cache and self.get_option('cache')
|
# self.use_cache = cache and self.get_option('cache')
|
||||||
|
|||||||
@@ -13,6 +13,9 @@ DOCUMENTATION = r'''
|
|||||||
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
||||||
version_added: "3.0.0"
|
version_added: "3.0.0"
|
||||||
author: "Frank Dornheim (@conloos)"
|
author: "Frank Dornheim (@conloos)"
|
||||||
|
requirements:
|
||||||
|
- ipaddress
|
||||||
|
- lxd >= 4.0
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||||
@@ -47,26 +50,38 @@ DOCUMENTATION = r'''
|
|||||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||||
type: str
|
type: str
|
||||||
state:
|
state:
|
||||||
description: Filter the container according to the current status.
|
description: Filter the instance according to the current status.
|
||||||
type: str
|
type: str
|
||||||
default: none
|
default: none
|
||||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||||
prefered_container_network_interface:
|
type_filter:
|
||||||
description:
|
description:
|
||||||
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
- Filter the instances by type C(virtual-machine), C(container) or C(both).
|
||||||
|
- The first version of the inventory only supported containers.
|
||||||
|
type: str
|
||||||
|
default: container
|
||||||
|
choices: [ 'virtual-machine', 'container', 'both' ]
|
||||||
|
version_added: 4.2.0
|
||||||
|
prefered_instance_network_interface:
|
||||||
|
description:
|
||||||
|
- If an instance has multiple network interfaces, select which one is the prefered as pattern.
|
||||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||||
|
- The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
|
||||||
|
The old name still works as an alias.
|
||||||
type: str
|
type: str
|
||||||
default: eth
|
default: eth
|
||||||
prefered_container_network_family:
|
aliases:
|
||||||
|
- prefered_container_network_interface
|
||||||
|
prefered_instance_network_family:
|
||||||
description:
|
description:
|
||||||
- If a container has multiple network interfaces, which one is the prefered by family.
|
- If an instance has multiple network interfaces, which one is the prefered by family.
|
||||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||||
type: str
|
type: str
|
||||||
default: inet
|
default: inet
|
||||||
choices: [ 'inet', 'inet6' ]
|
choices: [ 'inet', 'inet6' ]
|
||||||
groupby:
|
groupby:
|
||||||
description:
|
description:
|
||||||
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
- Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
|
||||||
- See example for syntax.
|
- See example for syntax.
|
||||||
type: dict
|
type: dict
|
||||||
'''
|
'''
|
||||||
@@ -81,38 +96,49 @@ plugin: community.general.lxd
|
|||||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||||
state: RUNNING
|
state: RUNNING
|
||||||
|
|
||||||
|
# simple lxd.yml including virtual machines and containers
|
||||||
|
plugin: community.general.lxd
|
||||||
|
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||||
|
type_filter: both
|
||||||
|
|
||||||
# grouping lxd.yml
|
# grouping lxd.yml
|
||||||
groupby:
|
groupby:
|
||||||
testpattern:
|
|
||||||
type: pattern
|
|
||||||
attribute: test
|
|
||||||
vlan666:
|
|
||||||
type: vlanid
|
|
||||||
attribute: 666
|
|
||||||
locationBerlin:
|
locationBerlin:
|
||||||
type: location
|
type: location
|
||||||
attribute: Berlin
|
attribute: Berlin
|
||||||
osUbuntu:
|
|
||||||
type: os
|
|
||||||
attribute: ubuntu
|
|
||||||
releaseFocal:
|
|
||||||
type: release
|
|
||||||
attribute: focal
|
|
||||||
releaseBionic:
|
|
||||||
type: release
|
|
||||||
attribute: bionic
|
|
||||||
profileDefault:
|
|
||||||
type: profile
|
|
||||||
attribute: default
|
|
||||||
profileX11:
|
|
||||||
type: profile
|
|
||||||
attribute: x11
|
|
||||||
netRangeIPv4:
|
netRangeIPv4:
|
||||||
type: network_range
|
type: network_range
|
||||||
attribute: 10.98.143.0/24
|
attribute: 10.98.143.0/24
|
||||||
netRangeIPv6:
|
netRangeIPv6:
|
||||||
type: network_range
|
type: network_range
|
||||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||||
|
osUbuntu:
|
||||||
|
type: os
|
||||||
|
attribute: ubuntu
|
||||||
|
testpattern:
|
||||||
|
type: pattern
|
||||||
|
attribute: test
|
||||||
|
profileDefault:
|
||||||
|
type: profile
|
||||||
|
attribute: default
|
||||||
|
profileX11:
|
||||||
|
type: profile
|
||||||
|
attribute: x11
|
||||||
|
releaseFocal:
|
||||||
|
type: release
|
||||||
|
attribute: focal
|
||||||
|
releaseBionic:
|
||||||
|
type: release
|
||||||
|
attribute: bionic
|
||||||
|
typeVM:
|
||||||
|
type: type
|
||||||
|
attribute: virtual-machine
|
||||||
|
typeContainer:
|
||||||
|
type: type
|
||||||
|
attribute: container
|
||||||
|
vlan666:
|
||||||
|
type: vlanid
|
||||||
|
attribute: 666
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import binascii
|
import binascii
|
||||||
@@ -124,10 +150,17 @@ import socket
|
|||||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||||
|
from ansible.module_utils.six import raise_from
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||||
|
|
||||||
|
try:
|
||||||
|
import ipaddress
|
||||||
|
except ImportError as exc:
|
||||||
|
IPADDRESS_IMPORT_ERROR = exc
|
||||||
|
else:
|
||||||
|
IPADDRESS_IMPORT_ERROR = None
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin):
|
class InventoryModule(BaseInventoryPlugin):
|
||||||
DEBUG = 4
|
DEBUG = 4
|
||||||
@@ -274,10 +307,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||||
|
|
||||||
def _get_containers(self):
|
def _get_instances(self):
|
||||||
"""Get Containernames
|
"""Get instancenames
|
||||||
|
|
||||||
Returns all containernames
|
Returns all instancenames
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
None
|
None
|
||||||
@@ -286,25 +319,27 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
list(names): names of all containers"""
|
list(names): names of all instances"""
|
||||||
# e.g. {'type': 'sync',
|
# e.g. {
|
||||||
# 'status': 'Success',
|
# "metadata": [
|
||||||
# 'status_code': 200,
|
# "/1.0/instances/foo",
|
||||||
# 'operation': '',
|
# "/1.0/instances/bar"
|
||||||
# 'error_code': 0,
|
# ],
|
||||||
# 'error': '',
|
# "status": "Success",
|
||||||
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
# "status_code": 200,
|
||||||
containers = self.socket.do('GET', '/1.0/containers')
|
# "type": "sync"
|
||||||
return [m.split('/')[3] for m in containers['metadata']]
|
# }
|
||||||
|
instances = self.socket.do('GET', '/1.0/instances')
|
||||||
|
return [m.split('/')[3] for m in instances['metadata']]
|
||||||
|
|
||||||
def _get_config(self, branch, name):
|
def _get_config(self, branch, name):
|
||||||
"""Get inventory of container
|
"""Get inventory of instance
|
||||||
|
|
||||||
Get config of container
|
Get config of instance
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(branch): Name oft the API-Branch
|
str(branch): Name oft the API-Branch
|
||||||
str(name): Name of Container
|
str(name): Name of instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Source:
|
Source:
|
||||||
@@ -312,7 +347,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(config): Config of the container"""
|
dict(config): Config of the instance"""
|
||||||
config = {}
|
config = {}
|
||||||
if isinstance(branch, (tuple, list)):
|
if isinstance(branch, (tuple, list)):
|
||||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||||
@@ -320,13 +355,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||||
return config
|
return config
|
||||||
|
|
||||||
def get_container_data(self, names):
|
def get_instance_data(self, names):
|
||||||
"""Create Inventory of the container
|
"""Create Inventory of the instance
|
||||||
|
|
||||||
Iterate through the different branches of the containers and collect Informations.
|
Iterate through the different branches of the instances and collect Informations.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
list(names): List of container names
|
list(names): List of instance names
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -335,20 +370,20 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None"""
|
None"""
|
||||||
# tuple(('instances','metadata/templates')) to get section in branch
|
# tuple(('instances','metadata/templates')) to get section in branch
|
||||||
# e.g. /1.0/instances/<name>/metadata/templates
|
# e.g. /1.0/instances/<name>/metadata/templates
|
||||||
branches = ['containers', ('instances', 'state')]
|
branches = ['instances', ('instances', 'state')]
|
||||||
container_config = {}
|
instance_config = {}
|
||||||
for branch in branches:
|
for branch in branches:
|
||||||
for name in names:
|
for name in names:
|
||||||
container_config['containers'] = self._get_config(branch, name)
|
instance_config['instances'] = self._get_config(branch, name)
|
||||||
self.data = dict_merge(container_config, self.data)
|
self.data = dict_merge(instance_config, self.data)
|
||||||
|
|
||||||
def get_network_data(self, names):
|
def get_network_data(self, names):
|
||||||
"""Create Inventory of the container
|
"""Create Inventory of the instance
|
||||||
|
|
||||||
Iterate through the different branches of the containers and collect Informations.
|
Iterate through the different branches of the instances and collect Informations.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
list(names): List of container names
|
list(names): List of instance names
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -367,26 +402,26 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_config['networks'] = {name: None}
|
network_config['networks'] = {name: None}
|
||||||
self.data = dict_merge(network_config, self.data)
|
self.data = dict_merge(network_config, self.data)
|
||||||
|
|
||||||
def extract_network_information_from_container_config(self, container_name):
|
def extract_network_information_from_instance_config(self, instance_name):
|
||||||
"""Returns the network interface configuration
|
"""Returns the network interface configuration
|
||||||
|
|
||||||
Returns the network ipv4 and ipv6 config of the container without local-link
|
Returns the network ipv4 and ipv6 config of the instance without local-link
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(container_name): Name oft he container
|
str(instance_name): Name oft he instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(network_configuration): network config"""
|
dict(network_configuration): network config"""
|
||||||
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
|
||||||
network_configuration = None
|
network_configuration = None
|
||||||
if container_network_interfaces:
|
if instance_network_interfaces:
|
||||||
network_configuration = {}
|
network_configuration = {}
|
||||||
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
|
||||||
for interface_name in gen_interface_names:
|
for interface_name in gen_interface_names:
|
||||||
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||||
network_configuration[interface_name] = []
|
network_configuration[interface_name] = []
|
||||||
for address in gen_address:
|
for address in gen_address:
|
||||||
address_set = {}
|
address_set = {}
|
||||||
@@ -397,24 +432,24 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_configuration[interface_name].append(address_set)
|
network_configuration[interface_name].append(address_set)
|
||||||
return network_configuration
|
return network_configuration
|
||||||
|
|
||||||
def get_prefered_container_network_interface(self, container_name):
|
def get_prefered_instance_network_interface(self, instance_name):
|
||||||
"""Helper to get the prefered interface of thr container
|
"""Helper to get the prefered interface of thr instance
|
||||||
|
|
||||||
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(containe_name): name of container
|
str(containe_name): name of instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
str(prefered_interface): None or interface name"""
|
str(prefered_interface): None or interface name"""
|
||||||
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||||
prefered_interface = None # init
|
prefered_interface = None # init
|
||||||
if container_network_interfaces: # container have network interfaces
|
if instance_network_interfaces: # instance have network interfaces
|
||||||
# generator if interfaces which start with the desired pattern
|
# generator if interfaces which start with the desired pattern
|
||||||
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
|
||||||
selected_interfaces = [] # init
|
selected_interfaces = [] # init
|
||||||
for interface in net_generator:
|
for interface in net_generator:
|
||||||
selected_interfaces.append(interface)
|
selected_interfaces.append(interface)
|
||||||
@@ -422,13 +457,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
prefered_interface = sorted(selected_interfaces)[0]
|
prefered_interface = sorted(selected_interfaces)[0]
|
||||||
return prefered_interface
|
return prefered_interface
|
||||||
|
|
||||||
def get_container_vlans(self, container_name):
|
def get_instance_vlans(self, instance_name):
|
||||||
"""Get VLAN(s) from container
|
"""Get VLAN(s) from instance
|
||||||
|
|
||||||
Helper to get the VLAN_ID from the container
|
Helper to get the VLAN_ID from the instance
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(containe_name): name of container
|
str(containe_name): name of instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -441,13 +476,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||||
|
|
||||||
# get networkdevices of container and return
|
# get networkdevices of instance and return
|
||||||
# e.g.
|
# e.g.
|
||||||
# "eth0":{ "name":"eth0",
|
# "eth0":{ "name":"eth0",
|
||||||
# "network":"lxdbr0",
|
# "network":"lxdbr0",
|
||||||
# "type":"nic"},
|
# "type":"nic"},
|
||||||
vlan_ids = {}
|
vlan_ids = {}
|
||||||
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
|
||||||
for device in devices:
|
for device in devices:
|
||||||
if 'network' in devices[device]:
|
if 'network' in devices[device]:
|
||||||
if devices[device]['network'] in network_vlans:
|
if devices[device]['network'] in network_vlans:
|
||||||
@@ -483,14 +518,14 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _set_data_entry(self, container_name, key, value, path=None):
|
def _set_data_entry(self, instance_name, key, value, path=None):
|
||||||
"""Helper to save data
|
"""Helper to save data
|
||||||
|
|
||||||
Helper to save the data in self.data
|
Helper to save the data in self.data
|
||||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(container_name): name of container
|
str(instance_name): name of instance
|
||||||
str(key): same as dict
|
str(key): same as dict
|
||||||
*(value): same as dict
|
*(value): same as dict
|
||||||
Kwargs:
|
Kwargs:
|
||||||
@@ -501,24 +536,24 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None"""
|
None"""
|
||||||
if not path:
|
if not path:
|
||||||
path = self.data['inventory']
|
path = self.data['inventory']
|
||||||
if container_name not in path:
|
if instance_name not in path:
|
||||||
path[container_name] = {}
|
path[instance_name] = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if isinstance(value, dict) and key in path[container_name]:
|
if isinstance(value, dict) and key in path[instance_name]:
|
||||||
path[container_name] = dict_merge(value, path[container_name][key])
|
path[instance_name] = dict_merge(value, path[instance_name][key])
|
||||||
else:
|
else:
|
||||||
path[container_name][key] = value
|
path[instance_name][key] = value
|
||||||
except KeyError as err:
|
except KeyError as err:
|
||||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||||
|
|
||||||
def extract_information_from_container_configs(self):
|
def extract_information_from_instance_configs(self):
|
||||||
"""Process configuration information
|
"""Process configuration information
|
||||||
|
|
||||||
Preparation of the data
|
Preparation of the data
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
dict(configs): Container configurations
|
dict(configs): instance configurations
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -529,33 +564,35 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if 'inventory' not in self.data:
|
if 'inventory' not in self.data:
|
||||||
self.data['inventory'] = {}
|
self.data['inventory'] = {}
|
||||||
|
|
||||||
for container_name in self.data['containers']:
|
for instance_name in self.data['instances']:
|
||||||
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
self._set_data_entry(instance_name, 'os', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
self._set_data_entry(instance_name, 'release', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
self._set_data_entry(instance_name, 'version', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
'instances/{0}/instances/metadata/profiles'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
self._set_data_entry(instance_name, 'location', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/location'.format(container_name)))
|
'instances/{0}/instances/metadata/location'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
self._set_data_entry(instance_name, 'state', self._get_data_entry(
|
||||||
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
self._set_data_entry(instance_name, 'type', self._get_data_entry(
|
||||||
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
'instances/{0}/instances/metadata/type'.format(instance_name)))
|
||||||
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
|
||||||
|
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
|
||||||
|
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
|
||||||
|
|
||||||
def build_inventory_network(self, container_name):
|
def build_inventory_network(self, instance_name):
|
||||||
"""Add the network interfaces of the container to the inventory
|
"""Add the network interfaces of the instance to the inventory
|
||||||
|
|
||||||
Logic:
|
Logic:
|
||||||
- if the container have no interface -> 'ansible_connection: local'
|
- if the instance have no interface -> 'ansible_connection: local'
|
||||||
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
- get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||||
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
- first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(container_name): name of container
|
str(instance_name): name of instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -563,45 +600,45 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
|
|
||||||
def interface_selection(container_name):
|
def interface_selection(instance_name):
|
||||||
"""Select container Interface for inventory
|
"""Select instance Interface for inventory
|
||||||
|
|
||||||
Logic:
|
Logic:
|
||||||
- get preferred_interface & prefered_container_network_family -> str(IP)
|
- get preferred_interface & prefered_instance_network_family -> str(IP)
|
||||||
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
- first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(container_name): name of container
|
str(instance_name): name of instance
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(interface_name: ip)"""
|
dict(interface_name: ip)"""
|
||||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
|
||||||
prefered_container_network_family = self.prefered_container_network_family
|
prefered_instance_network_family = self.prefered_instance_network_family
|
||||||
|
|
||||||
ip_address = ''
|
ip_address = ''
|
||||||
if prefered_interface:
|
if prefered_interface:
|
||||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
|
||||||
for config in interface:
|
for config in interface:
|
||||||
if config['family'] == prefered_container_network_family:
|
if config['family'] == prefered_instance_network_family:
|
||||||
ip_address = config['address']
|
ip_address = config['address']
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||||
for config in interface:
|
for interface in interfaces.values():
|
||||||
if config['family'] == prefered_container_network_family:
|
for config in interface:
|
||||||
ip_address = config['address']
|
if config['family'] == prefered_instance_network_family:
|
||||||
break
|
ip_address = config['address']
|
||||||
|
break
|
||||||
return ip_address
|
return ip_address
|
||||||
|
|
||||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
|
||||||
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
|
||||||
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
|
||||||
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
|
||||||
else:
|
else:
|
||||||
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
|
||||||
|
|
||||||
def build_inventory_hosts(self):
|
def build_inventory_hosts(self):
|
||||||
"""Build host-part dynamic inventory
|
"""Build host-part dynamic inventory
|
||||||
@@ -617,29 +654,33 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
for container_name in self.data['inventory']:
|
for instance_name in self.data['inventory']:
|
||||||
# Only consider containers that match the "state" filter, if self.state is not None
|
instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
|
||||||
|
|
||||||
|
# Only consider instances that match the "state" filter, if self.state is not None
|
||||||
if self.filter:
|
if self.filter:
|
||||||
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
if self.filter.lower() != instance_state:
|
||||||
continue
|
continue
|
||||||
# add container
|
# add instance
|
||||||
self.inventory.add_host(container_name)
|
self.inventory.add_host(instance_name)
|
||||||
# add network informations
|
# add network informations
|
||||||
self.build_inventory_network(container_name)
|
self.build_inventory_network(instance_name)
|
||||||
# add os
|
# add os
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
self.inventory.set_variable(instance_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(instance_name)).lower())
|
||||||
# add release
|
# add release
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
self.inventory.set_variable(instance_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(instance_name)).lower())
|
||||||
# add profile
|
# add profile
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
|
||||||
# add state
|
# add state
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
|
||||||
|
# add type
|
||||||
|
self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
|
||||||
# add location information
|
# add location information
|
||||||
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
|
||||||
# add VLAN_ID information
|
# add VLAN_ID information
|
||||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
|
||||||
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
|
||||||
|
|
||||||
def build_inventory_groups_location(self, group_name):
|
def build_inventory_groups_location(self, group_name):
|
||||||
"""create group by attribute: location
|
"""create group by attribute: location
|
||||||
@@ -656,9 +697,9 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
for container_name in self.inventory.hosts:
|
for instance_name in self.inventory.hosts:
|
||||||
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups_pattern(self, group_name):
|
def build_inventory_groups_pattern(self, group_name):
|
||||||
"""create group by name pattern
|
"""create group by name pattern
|
||||||
@@ -677,10 +718,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
regex_pattern = self.groupby[group_name].get('attribute')
|
regex_pattern = self.groupby[group_name].get('attribute')
|
||||||
|
|
||||||
for container_name in self.inventory.hosts:
|
for instance_name in self.inventory.hosts:
|
||||||
result = re.search(regex_pattern, container_name)
|
result = re.search(regex_pattern, instance_name)
|
||||||
if result:
|
if result:
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups_network_range(self, group_name):
|
def build_inventory_groups_network_range(self, group_name):
|
||||||
"""check if IP is in network-class
|
"""check if IP is in network-class
|
||||||
@@ -703,14 +744,14 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
raise AnsibleParserError(
|
raise AnsibleParserError(
|
||||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||||
|
|
||||||
for container_name in self.inventory.hosts:
|
for instance_name in self.inventory.hosts:
|
||||||
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
|
||||||
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
for interface in self.data['inventory'][instance_name].get('network_interfaces'):
|
||||||
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
|
||||||
try:
|
try:
|
||||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||||
if address.version == network.version and address in network:
|
if address.version == network.version and address in network:
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Ignore invalid IP addresses returned by lxd
|
# Ignore invalid IP addresses returned by lxd
|
||||||
pass
|
pass
|
||||||
@@ -721,7 +762,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Args:
|
Args:
|
||||||
str(group_name): Group name
|
str(group_name): Group name
|
||||||
Kwargs:
|
Kwargs:
|
||||||
Noneself.data['inventory'][container_name][interface]
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
@@ -730,12 +771,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_containers = [
|
gen_instances = [
|
||||||
container_name for container_name in self.inventory.hosts
|
instance_name for instance_name in self.inventory.hosts
|
||||||
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
|
||||||
for container_name in gen_containers:
|
for instance_name in gen_instances:
|
||||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups_release(self, group_name):
|
def build_inventory_groups_release(self, group_name):
|
||||||
"""create group by attribute: release
|
"""create group by attribute: release
|
||||||
@@ -752,12 +793,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_containers = [
|
gen_instances = [
|
||||||
container_name for container_name in self.inventory.hosts
|
instance_name for instance_name in self.inventory.hosts
|
||||||
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
|
||||||
for container_name in gen_containers:
|
for instance_name in gen_instances:
|
||||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups_profile(self, group_name):
|
def build_inventory_groups_profile(self, group_name):
|
||||||
"""create group by attribute: profile
|
"""create group by attribute: profile
|
||||||
@@ -774,12 +815,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_containers = [
|
gen_instances = [
|
||||||
container_name for container_name in self.inventory.hosts.keys()
|
instance_name for instance_name in self.inventory.hosts.keys()
|
||||||
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||||
for container_name in gen_containers:
|
for instance_name in gen_instances:
|
||||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups_vlanid(self, group_name):
|
def build_inventory_groups_vlanid(self, group_name):
|
||||||
"""create group by attribute: vlanid
|
"""create group by attribute: vlanid
|
||||||
@@ -796,12 +837,34 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_containers = [
|
gen_instances = [
|
||||||
container_name for container_name in self.inventory.hosts.keys()
|
instance_name for instance_name in self.inventory.hosts.keys()
|
||||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||||
for container_name in gen_containers:
|
for instance_name in gen_instances:
|
||||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||||
self.inventory.add_child(group_name, container_name)
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
|
def build_inventory_groups_type(self, group_name):
|
||||||
|
"""create group by attribute: type
|
||||||
|
|
||||||
|
Args:
|
||||||
|
str(group_name): Group name
|
||||||
|
Kwargs:
|
||||||
|
None
|
||||||
|
Raises:
|
||||||
|
None
|
||||||
|
Returns:
|
||||||
|
None"""
|
||||||
|
# maybe we just want to expand one group
|
||||||
|
if group_name not in self.inventory.groups:
|
||||||
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
|
gen_instances = [
|
||||||
|
instance_name for instance_name in self.inventory.hosts
|
||||||
|
if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
|
||||||
|
for instance_name in gen_instances:
|
||||||
|
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
|
||||||
|
self.inventory.add_child(group_name, instance_name)
|
||||||
|
|
||||||
def build_inventory_groups(self):
|
def build_inventory_groups(self):
|
||||||
"""Build group-part dynamic inventory
|
"""Build group-part dynamic inventory
|
||||||
@@ -830,6 +893,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
* 'release'
|
* 'release'
|
||||||
* 'profile'
|
* 'profile'
|
||||||
* 'vlanid'
|
* 'vlanid'
|
||||||
|
* 'type'
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(group_name): Group name
|
str(group_name): Group name
|
||||||
@@ -855,6 +919,8 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.build_inventory_groups_profile(group_name)
|
self.build_inventory_groups_profile(group_name)
|
||||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||||
self.build_inventory_groups_vlanid(group_name)
|
self.build_inventory_groups_vlanid(group_name)
|
||||||
|
elif self.groupby[group_name].get('type') == 'type':
|
||||||
|
self.build_inventory_groups_type(group_name)
|
||||||
else:
|
else:
|
||||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||||
|
|
||||||
@@ -881,10 +947,30 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.build_inventory_hosts()
|
self.build_inventory_hosts()
|
||||||
self.build_inventory_groups()
|
self.build_inventory_groups()
|
||||||
|
|
||||||
|
def cleandata(self):
|
||||||
|
"""Clean the dynamic inventory
|
||||||
|
|
||||||
|
The first version of the inventory only supported container.
|
||||||
|
This will change in the future.
|
||||||
|
The following function cleans up the data and remove the all items with the wrong type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
None
|
||||||
|
Kwargs:
|
||||||
|
None
|
||||||
|
Raises:
|
||||||
|
None
|
||||||
|
Returns:
|
||||||
|
None"""
|
||||||
|
iter_keys = list(self.data['instances'].keys())
|
||||||
|
for instance_name in iter_keys:
|
||||||
|
if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
|
||||||
|
del self.data['instances'][instance_name]
|
||||||
|
|
||||||
def _populate(self):
|
def _populate(self):
|
||||||
"""Return the hosts and groups
|
"""Return the hosts and groups
|
||||||
|
|
||||||
Returns the processed container configurations from the lxd import
|
Returns the processed instance configurations from the lxd import
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
None
|
None
|
||||||
@@ -897,10 +983,16 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||||
self.socket = self._connect_to_socket()
|
self.socket = self._connect_to_socket()
|
||||||
self.get_container_data(self._get_containers())
|
self.get_instance_data(self._get_instances())
|
||||||
self.get_network_data(self._get_networks())
|
self.get_network_data(self._get_networks())
|
||||||
|
|
||||||
self.extract_information_from_container_configs()
|
# The first version of the inventory only supported containers.
|
||||||
|
# This will change in the future.
|
||||||
|
# The following function cleans up the data.
|
||||||
|
if self.type_filter != 'both':
|
||||||
|
self.cleandata()
|
||||||
|
|
||||||
|
self.extract_information_from_instance_configs()
|
||||||
|
|
||||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||||
|
|
||||||
@@ -924,6 +1016,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
AnsibleParserError
|
AnsibleParserError
|
||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
|
if IPADDRESS_IMPORT_ERROR:
|
||||||
|
raise_from(
|
||||||
|
AnsibleError('another_library must be installed to use this plugin'),
|
||||||
|
IPADDRESS_IMPORT_ERROR)
|
||||||
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
||||||
# Read the inventory YAML file
|
# Read the inventory YAML file
|
||||||
@@ -935,8 +1031,9 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.data = {} # store for inventory-data
|
self.data = {} # store for inventory-data
|
||||||
self.groupby = self.get_option('groupby')
|
self.groupby = self.get_option('groupby')
|
||||||
self.plugin = self.get_option('plugin')
|
self.plugin = self.get_option('plugin')
|
||||||
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
|
||||||
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
|
||||||
|
self.type_filter = self.get_option('type_filter')
|
||||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||||
self.filter = None
|
self.filter = None
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: online
|
name: online
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@remyleone)
|
||||||
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
||||||
|
|||||||
@@ -119,12 +119,13 @@ compose:
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
# 3rd party imports
|
# 3rd party imports
|
||||||
try:
|
try:
|
||||||
import requests
|
import requests
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: scaleway
|
name: scaleway
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@remyleone)
|
||||||
short_description: Scaleway inventory source
|
short_description: Scaleway inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from Scaleway.
|
- Get inventory hosts from Scaleway.
|
||||||
|
|||||||
328
plugins/inventory/xen_orchestra.py
Normal file
328
plugins/inventory/xen_orchestra.py
Normal file
@@ -0,0 +1,328 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2021 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
name: xen_orchestra
|
||||||
|
short_description: Xen Orchestra inventory source
|
||||||
|
version_added: 4.1.0
|
||||||
|
author:
|
||||||
|
- Dom Del Nano (@ddelnano) <ddelnano@gmail.com>
|
||||||
|
- Samori Gorse (@shinuza) <samorigorse@gmail.com>
|
||||||
|
requirements:
|
||||||
|
- websocket-client >= 1.0.0
|
||||||
|
description:
|
||||||
|
- Get inventory hosts from a Xen Orchestra deployment.
|
||||||
|
- 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- constructed
|
||||||
|
- inventory_cache
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
|
||||||
|
required: yes
|
||||||
|
choices: ['community.general.xen_orchestra']
|
||||||
|
type: str
|
||||||
|
api_host:
|
||||||
|
description:
|
||||||
|
- API host to XOA API.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
|
||||||
|
type: str
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_XO_HOST
|
||||||
|
user:
|
||||||
|
description:
|
||||||
|
- Xen Orchestra user.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
|
||||||
|
required: yes
|
||||||
|
type: str
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_XO_USER
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- Xen Orchestra password.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
|
||||||
|
required: yes
|
||||||
|
type: str
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_XO_PASSWORD
|
||||||
|
validate_certs:
|
||||||
|
description: Verify TLS certificate if using HTTPS.
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
use_ssl:
|
||||||
|
description: Use wss when connecting to the Xen Orchestra API
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# file must be named xen_orchestra.yaml or xen_orchestra.yml
|
||||||
|
simple_config_file:
|
||||||
|
plugin: community.general.xen_orchestra
|
||||||
|
api_host: 192.168.1.255
|
||||||
|
user: xo
|
||||||
|
password: xo_pwd
|
||||||
|
validate_certs: true
|
||||||
|
use_ssl: true
|
||||||
|
groups:
|
||||||
|
kube_nodes: "'kube_node' in tags"
|
||||||
|
compose:
|
||||||
|
ansible_port: 2222
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
import json
|
||||||
|
import ssl
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
|
# 3rd party imports
|
||||||
|
try:
|
||||||
|
HAS_WEBSOCKET = True
|
||||||
|
import websocket
|
||||||
|
from websocket import create_connection
|
||||||
|
|
||||||
|
if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'):
|
||||||
|
raise ImportError
|
||||||
|
except ImportError as e:
|
||||||
|
HAS_WEBSOCKET = False
|
||||||
|
|
||||||
|
|
||||||
|
HALTED = 'Halted'
|
||||||
|
PAUSED = 'Paused'
|
||||||
|
RUNNING = 'Running'
|
||||||
|
SUSPENDED = 'Suspended'
|
||||||
|
POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED]
|
||||||
|
HOST_GROUP = 'xo_hosts'
|
||||||
|
POOL_GROUP = 'xo_pools'
|
||||||
|
|
||||||
|
|
||||||
|
def clean_group_name(label):
|
||||||
|
return label.lower().replace(' ', '-').replace('-', '_')
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||||
|
''' Host inventory parser for ansible using XenOrchestra as source. '''
|
||||||
|
|
||||||
|
NAME = 'community.general.xen_orchestra'
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
super(InventoryModule, self).__init__()
|
||||||
|
|
||||||
|
# from config
|
||||||
|
self.counter = -1
|
||||||
|
self.session = None
|
||||||
|
self.cache_key = None
|
||||||
|
self.use_cache = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pointer(self):
|
||||||
|
self.counter += 1
|
||||||
|
return self.counter
|
||||||
|
|
||||||
|
def create_connection(self, xoa_api_host):
|
||||||
|
validate_certs = self.get_option('validate_certs')
|
||||||
|
use_ssl = self.get_option('use_ssl')
|
||||||
|
proto = 'wss' if use_ssl else 'ws'
|
||||||
|
|
||||||
|
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
|
||||||
|
self.conn = create_connection(
|
||||||
|
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
|
||||||
|
|
||||||
|
def login(self, user, password):
|
||||||
|
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
|
||||||
|
'username': user, 'password': password}}
|
||||||
|
self.conn.send(json.dumps(payload))
|
||||||
|
result = json.loads(self.conn.recv())
|
||||||
|
|
||||||
|
if 'error' in result:
|
||||||
|
raise AnsibleError(
|
||||||
|
'Could not connect: {0}'.format(result['error']))
|
||||||
|
|
||||||
|
def get_object(self, name):
|
||||||
|
payload = {'id': self.pointer, 'jsonrpc': '2.0',
|
||||||
|
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
|
||||||
|
self.conn.send(json.dumps(payload))
|
||||||
|
answer = json.loads(self.conn.recv())
|
||||||
|
|
||||||
|
if 'error' in answer:
|
||||||
|
raise AnsibleError(
|
||||||
|
'Could not request: {0}'.format(answer['error']))
|
||||||
|
|
||||||
|
return answer['result']
|
||||||
|
|
||||||
|
def _get_objects(self):
|
||||||
|
self.create_connection(self.xoa_api_host)
|
||||||
|
self.login(self.xoa_user, self.xoa_password)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'vms': self.get_object('VM'),
|
||||||
|
'pools': self.get_object('pool'),
|
||||||
|
'hosts': self.get_object('host'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _apply_constructable(self, name, variables):
|
||||||
|
strict = self.get_option('strict')
|
||||||
|
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||||
|
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||||
|
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||||
|
|
||||||
|
def _add_vms(self, vms, hosts, pools):
|
||||||
|
for uuid, vm in vms.items():
|
||||||
|
group = 'with_ip'
|
||||||
|
ip = vm.get('mainIpAddress')
|
||||||
|
entry_name = uuid
|
||||||
|
power_state = vm['power_state'].lower()
|
||||||
|
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
|
||||||
|
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
|
||||||
|
|
||||||
|
self.inventory.add_host(entry_name)
|
||||||
|
|
||||||
|
# Grouping by power state
|
||||||
|
self.inventory.add_child(power_state, entry_name)
|
||||||
|
|
||||||
|
# Grouping by host
|
||||||
|
if host_name:
|
||||||
|
self.inventory.add_child(host_name, entry_name)
|
||||||
|
|
||||||
|
# Grouping by pool
|
||||||
|
if pool_name:
|
||||||
|
self.inventory.add_child(pool_name, entry_name)
|
||||||
|
|
||||||
|
# Grouping VMs with an IP together
|
||||||
|
if ip is None:
|
||||||
|
group = 'without_ip'
|
||||||
|
self.inventory.add_group(group)
|
||||||
|
self.inventory.add_child(group, entry_name)
|
||||||
|
|
||||||
|
# Adding meta
|
||||||
|
self.inventory.set_variable(entry_name, 'uuid', uuid)
|
||||||
|
self.inventory.set_variable(entry_name, 'ip', ip)
|
||||||
|
self.inventory.set_variable(entry_name, 'ansible_host', ip)
|
||||||
|
self.inventory.set_variable(entry_name, 'power_state', power_state)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'name_label', vm['name_label'])
|
||||||
|
self.inventory.set_variable(entry_name, 'type', vm['type'])
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'cpus', vm['CPUs']['number'])
|
||||||
|
self.inventory.set_variable(entry_name, 'tags', vm['tags'])
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'memory', vm['memory']['size'])
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'has_ip', group == 'with_ip')
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'is_managed', vm.get('managementAgentDetected', False))
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'os_version', vm['os_version'])
|
||||||
|
|
||||||
|
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
|
||||||
|
|
||||||
|
def _add_hosts(self, hosts, pools):
|
||||||
|
for host in hosts.values():
|
||||||
|
entry_name = host['uuid']
|
||||||
|
group_name = 'xo_host_{0}'.format(
|
||||||
|
clean_group_name(host['name_label']))
|
||||||
|
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
|
||||||
|
|
||||||
|
self.inventory.add_group(group_name)
|
||||||
|
self.inventory.add_host(entry_name)
|
||||||
|
self.inventory.add_child(HOST_GROUP, entry_name)
|
||||||
|
self.inventory.add_child(pool_name, entry_name)
|
||||||
|
|
||||||
|
self.inventory.set_variable(entry_name, 'enabled', host['enabled'])
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'hostname', host['hostname'])
|
||||||
|
self.inventory.set_variable(entry_name, 'memory', host['memory'])
|
||||||
|
self.inventory.set_variable(entry_name, 'address', host['address'])
|
||||||
|
self.inventory.set_variable(entry_name, 'cpus', host['cpus'])
|
||||||
|
self.inventory.set_variable(entry_name, 'type', 'host')
|
||||||
|
self.inventory.set_variable(entry_name, 'tags', host['tags'])
|
||||||
|
self.inventory.set_variable(entry_name, 'version', host['version'])
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'power_state', host['power_state'].lower())
|
||||||
|
self.inventory.set_variable(
|
||||||
|
entry_name, 'product_brand', host['productBrand'])
|
||||||
|
|
||||||
|
for pool in pools.values():
|
||||||
|
group_name = 'xo_pool_{0}'.format(
|
||||||
|
clean_group_name(pool['name_label']))
|
||||||
|
|
||||||
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
|
def _add_pools(self, pools):
|
||||||
|
for pool in pools.values():
|
||||||
|
group_name = 'xo_pool_{0}'.format(
|
||||||
|
clean_group_name(pool['name_label']))
|
||||||
|
|
||||||
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
|
# TODO: Refactor
|
||||||
|
def _pool_group_name_for_uuid(self, pools, pool_uuid):
|
||||||
|
for pool in pools:
|
||||||
|
if pool == pool_uuid:
|
||||||
|
return 'xo_pool_{0}'.format(
|
||||||
|
clean_group_name(pools[pool_uuid]['name_label']))
|
||||||
|
|
||||||
|
# TODO: Refactor
|
||||||
|
def _host_group_name_for_uuid(self, hosts, host_uuid):
|
||||||
|
for host in hosts:
|
||||||
|
if host == host_uuid:
|
||||||
|
return 'xo_host_{0}'.format(
|
||||||
|
clean_group_name(hosts[host_uuid]['name_label']
|
||||||
|
))
|
||||||
|
|
||||||
|
def _populate(self, objects):
|
||||||
|
# Prepare general groups
|
||||||
|
self.inventory.add_group(HOST_GROUP)
|
||||||
|
self.inventory.add_group(POOL_GROUP)
|
||||||
|
for group in POWER_STATES:
|
||||||
|
self.inventory.add_group(group.lower())
|
||||||
|
|
||||||
|
self._add_pools(objects['pools'])
|
||||||
|
self._add_hosts(objects['hosts'], objects['pools'])
|
||||||
|
self._add_vms(objects['vms'], objects['hosts'], objects['pools'])
|
||||||
|
|
||||||
|
def verify_file(self, path):
|
||||||
|
|
||||||
|
valid = False
|
||||||
|
if super(InventoryModule, self).verify_file(path):
|
||||||
|
if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')):
|
||||||
|
valid = True
|
||||||
|
else:
|
||||||
|
self.display.vvv(
|
||||||
|
'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"')
|
||||||
|
return valid
|
||||||
|
|
||||||
|
def parse(self, inventory, loader, path, cache=True):
|
||||||
|
if not HAS_WEBSOCKET:
|
||||||
|
raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: '
|
||||||
|
'https://github.com/websocket-client/websocket-client.')
|
||||||
|
|
||||||
|
super(InventoryModule, self).parse(inventory, loader, path)
|
||||||
|
|
||||||
|
# read config from file, this sets 'options'
|
||||||
|
self._read_config_data(path)
|
||||||
|
self.inventory = inventory
|
||||||
|
|
||||||
|
self.protocol = 'wss'
|
||||||
|
self.xoa_api_host = self.get_option('api_host')
|
||||||
|
self.xoa_user = self.get_option('user')
|
||||||
|
self.xoa_password = self.get_option('password')
|
||||||
|
self.cache_key = self.get_cache_key(path)
|
||||||
|
self.use_cache = cache and self.get_option('cache')
|
||||||
|
|
||||||
|
self.validate_certs = self.get_option('validate_certs')
|
||||||
|
if not self.get_option('use_ssl'):
|
||||||
|
self.protocol = 'ws'
|
||||||
|
|
||||||
|
objects = self._get_objects()
|
||||||
|
self._populate(objects)
|
||||||
138
plugins/lookup/collection_version.py
Normal file
138
plugins/lookup/collection_version.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# (c) 2021, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
name: collection_version
|
||||||
|
author: Felix Fontein (@felixfontein)
|
||||||
|
version_added: "4.0.0"
|
||||||
|
short_description: Retrieves the version of an installed collection
|
||||||
|
description:
|
||||||
|
- This lookup allows to query the version of an installed collection, and to determine whether a
|
||||||
|
collection is installed at all.
|
||||||
|
- By default it returns C(none) for non-existing collections and C(*) for collections without a
|
||||||
|
version number. The latter should only happen in development environments, or when installing
|
||||||
|
a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
|
||||||
|
by providing other values with I(result_not_found) and I(result_no_version).
|
||||||
|
options:
|
||||||
|
_terms:
|
||||||
|
description:
|
||||||
|
- The collections to look for.
|
||||||
|
- For example C(community.general).
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
required: true
|
||||||
|
result_not_found:
|
||||||
|
description:
|
||||||
|
- The value to return when the collection could not be found.
|
||||||
|
- By default, C(none) is returned.
|
||||||
|
type: string
|
||||||
|
default: ~
|
||||||
|
result_no_version:
|
||||||
|
description:
|
||||||
|
- The value to return when the collection has no version number.
|
||||||
|
- This can happen for collections installed from git which do not have a version number
|
||||||
|
in C(galaxy.yml).
|
||||||
|
- By default, C(*) is returned.
|
||||||
|
type: string
|
||||||
|
default: '*'
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- name: Check version of community.general
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = """
|
||||||
|
_raw:
|
||||||
|
description:
|
||||||
|
- The version number of the collections listed as input.
|
||||||
|
- If a collection can not be found, it will return the value provided in I(result_not_found).
|
||||||
|
By default, this is C(none).
|
||||||
|
- If a collection can be found, but the version not identified, it will return the value provided in
|
||||||
|
I(result_no_version). By default, this is C(*). This can happen for collections installed
|
||||||
|
from git which do not have a version number in C(galaxy.yml).
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleLookupError
|
||||||
|
from ansible.module_utils.compat.importlib import import_module
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
|
||||||
|
|
||||||
|
FQCN_RE = re.compile(r'^[A-Za-z0-9_]+\.[A-Za-z0-9_]+$')
|
||||||
|
|
||||||
|
|
||||||
|
def load_collection_meta_manifest(manifest_path):
|
||||||
|
with open(manifest_path, 'rb') as f:
|
||||||
|
meta = json.load(f)
|
||||||
|
return {
|
||||||
|
'version': meta['collection_info']['version'],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_collection_meta_galaxy(galaxy_path, no_version='*'):
|
||||||
|
with open(galaxy_path, 'rb') as f:
|
||||||
|
meta = yaml.safe_load(f)
|
||||||
|
return {
|
||||||
|
'version': meta.get('version') or no_version,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_collection_meta(collection_pkg, no_version='*'):
|
||||||
|
path = os.path.dirname(collection_pkg.__file__)
|
||||||
|
|
||||||
|
# Try to load MANIFEST.json
|
||||||
|
manifest_path = os.path.join(path, 'MANIFEST.json')
|
||||||
|
if os.path.exists(manifest_path):
|
||||||
|
return load_collection_meta_manifest(manifest_path)
|
||||||
|
|
||||||
|
# Try to load galaxy.y(a)ml
|
||||||
|
galaxy_path = os.path.join(path, 'galaxy.yml')
|
||||||
|
galaxy_alt_path = os.path.join(path, 'galaxy.yaml')
|
||||||
|
# galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed
|
||||||
|
# in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for
|
||||||
|
# ansible-core 2.12.
|
||||||
|
for path in (galaxy_path, galaxy_alt_path):
|
||||||
|
if os.path.exists(path):
|
||||||
|
return load_collection_meta_galaxy(path, no_version=no_version)
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
result = []
|
||||||
|
self.set_options(var_options=variables, direct=kwargs)
|
||||||
|
not_found = self.get_option('result_not_found')
|
||||||
|
no_version = self.get_option('result_no_version')
|
||||||
|
|
||||||
|
for term in terms:
|
||||||
|
if not FQCN_RE.match(term):
|
||||||
|
raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term))
|
||||||
|
|
||||||
|
try:
|
||||||
|
collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term))
|
||||||
|
except ImportError:
|
||||||
|
# Collection not found
|
||||||
|
result.append(not_found)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = load_collection_meta(collection_pkg, no_version=no_version)
|
||||||
|
except Exception as exc:
|
||||||
|
raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc))
|
||||||
|
|
||||||
|
result.append(data.get('version', no_version))
|
||||||
|
|
||||||
|
return result
|
||||||
@@ -93,7 +93,7 @@ DOCUMENTATION = '''
|
|||||||
environment variable and keep I(endpoints), I(host), and I(port) unused.
|
environment variable and keep I(endpoints), I(host), and I(port) unused.
|
||||||
seealso:
|
seealso:
|
||||||
- module: community.general.etcd3
|
- module: community.general.etcd3
|
||||||
- ref: etcd_lookup
|
- ref: ansible_collections.community.general.etcd_lookup
|
||||||
description: The etcd v2 lookup.
|
description: The etcd v2 lookup.
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ DOCUMENTATION = '''
|
|||||||
EXAMPLES = """
|
EXAMPLES = """
|
||||||
- name: "'unnest' all elements into single list"
|
- name: "'unnest' all elements into single list"
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
|
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
RETURN = """
|
RETURN = """
|
||||||
|
|||||||
@@ -1,126 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2018 Red Hat | Ansible
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
name: nios
|
|
||||||
short_description: Query Infoblox NIOS objects
|
|
||||||
deprecated:
|
|
||||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
|
||||||
alternative: infoblox.nios_modules.nios_lookup
|
|
||||||
removed_in: 5.0.0
|
|
||||||
description:
|
|
||||||
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
|
|
||||||
supports adding additional keywords to filter the return data and specify
|
|
||||||
the desired set of returned fields.
|
|
||||||
requirements:
|
|
||||||
- infoblox-client
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- community.general.nios
|
|
||||||
|
|
||||||
options:
|
|
||||||
_terms:
|
|
||||||
description: The name of the object to return from NIOS
|
|
||||||
required: True
|
|
||||||
return_fields:
|
|
||||||
description: The list of field names to return for the specified object.
|
|
||||||
filter:
|
|
||||||
description: a dict object that is used to filter the return objects
|
|
||||||
extattrs:
|
|
||||||
description: a dict object that is used to filter on extattrs
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = """
|
|
||||||
- name: fetch all networkview objects
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
networkviews: "{{ lookup('community.general.nios', 'networkview',
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
- name: fetch the default dns view
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
dns_views: "{{ lookup('community.general.nios', 'view', filter={'name': 'default'},
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
# all of the examples below use credentials that are set using env variables
|
|
||||||
# export INFOBLOX_HOST=nios01
|
|
||||||
# export INFOBLOX_USERNAME=admin
|
|
||||||
# export INFOBLOX_PASSWORD=admin
|
|
||||||
|
|
||||||
- name: fetch all host records and include extended attributes
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
host_records: "{{ lookup('community.general.nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
|
|
||||||
|
|
||||||
|
|
||||||
- name: use env variables to pass credentials
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
networkviews: "{{ lookup('community.general.nios', 'networkview') }}"
|
|
||||||
|
|
||||||
- name: get a host record
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
host: "{{ lookup('community.general.nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
|
|
||||||
|
|
||||||
- name: get the authoritative zone from a non default dns view
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
host: "{{ lookup('community.general.nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
obj_type:
|
|
||||||
description:
|
|
||||||
- The object type specified in the terms argument
|
|
||||||
type: dictionary
|
|
||||||
contains:
|
|
||||||
obj_field:
|
|
||||||
description:
|
|
||||||
- One or more obj_type fields as specified by return_fields argument or
|
|
||||||
the default set of fields as per the object type
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
try:
|
|
||||||
obj_type = terms[0]
|
|
||||||
except IndexError:
|
|
||||||
raise AnsibleError('the object_type must be specified')
|
|
||||||
|
|
||||||
return_fields = kwargs.pop('return_fields', None)
|
|
||||||
filter_data = kwargs.pop('filter', {})
|
|
||||||
extattrs = normalize_extattrs(kwargs.pop('extattrs', {}))
|
|
||||||
provider = kwargs.pop('provider', {})
|
|
||||||
wapi = WapiLookup(provider)
|
|
||||||
res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs)
|
|
||||||
if res is not None:
|
|
||||||
for obj in res:
|
|
||||||
if 'extattrs' in obj:
|
|
||||||
obj['extattrs'] = flatten_extattrs(obj['extattrs'])
|
|
||||||
else:
|
|
||||||
res = []
|
|
||||||
return res
|
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2018 Red Hat | Ansible
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
name: nios_next_ip
|
|
||||||
short_description: Return the next available IP address for a network
|
|
||||||
deprecated:
|
|
||||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
|
||||||
alternative: infoblox.nios_modules.nios_next_ip
|
|
||||||
removed_in: 5.0.0
|
|
||||||
description:
|
|
||||||
- Uses the Infoblox WAPI API to return the next available IP addresses
|
|
||||||
for a given network CIDR
|
|
||||||
requirements:
|
|
||||||
- infoblox-client
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- community.general.nios
|
|
||||||
|
|
||||||
options:
|
|
||||||
_terms:
|
|
||||||
description: The CIDR network to retrieve the next addresses from
|
|
||||||
required: True
|
|
||||||
num:
|
|
||||||
description: The number of IP addresses to return
|
|
||||||
required: false
|
|
||||||
default: 1
|
|
||||||
exclude:
|
|
||||||
description: List of IP's that need to be excluded from returned IP addresses
|
|
||||||
required: false
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = """
|
|
||||||
- name: return next available IP address for network 192.168.10.0/24
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
_list:
|
|
||||||
description:
|
|
||||||
- The list of next IP addresses available
|
|
||||||
type: list
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
try:
|
|
||||||
network = terms[0]
|
|
||||||
except IndexError:
|
|
||||||
raise AnsibleError('missing argument in the form of A.B.C.D/E')
|
|
||||||
|
|
||||||
provider = kwargs.pop('provider', {})
|
|
||||||
wapi = WapiLookup(provider)
|
|
||||||
|
|
||||||
network_obj = wapi.get_object('network', {'network': network})
|
|
||||||
if network_obj is None:
|
|
||||||
raise AnsibleError('unable to find network object %s' % network)
|
|
||||||
|
|
||||||
num = kwargs.get('num', 1)
|
|
||||||
exclude_ip = kwargs.get('exclude', [])
|
|
||||||
|
|
||||||
try:
|
|
||||||
ref = network_obj[0]['_ref']
|
|
||||||
avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
|
|
||||||
return [avail_ips['ips']]
|
|
||||||
except Exception as exc:
|
|
||||||
raise AnsibleError(to_text(exc))
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2018 Red Hat | Ansible
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
name: nios_next_network
|
|
||||||
short_description: Return the next available network range for a network-container
|
|
||||||
deprecated:
|
|
||||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
|
||||||
alternative: infoblox.nios_modules.nios_next_network
|
|
||||||
removed_in: 5.0.0
|
|
||||||
description:
|
|
||||||
- Uses the Infoblox WAPI API to return the next available network addresses for
|
|
||||||
a given network CIDR
|
|
||||||
requirements:
|
|
||||||
- infoblox_client
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- community.general.nios
|
|
||||||
|
|
||||||
options:
|
|
||||||
_terms:
|
|
||||||
description: The CIDR network to retrieve the next network from next available network within the specified
|
|
||||||
container.
|
|
||||||
required: True
|
|
||||||
cidr:
|
|
||||||
description:
|
|
||||||
- The CIDR of the network to retrieve the next network from next available network within the
|
|
||||||
specified container. Also, Requested CIDR must be specified and greater than the parent CIDR.
|
|
||||||
required: True
|
|
||||||
default: 24
|
|
||||||
num:
|
|
||||||
description: The number of network addresses to return from network-container
|
|
||||||
required: false
|
|
||||||
default: 1
|
|
||||||
exclude:
|
|
||||||
description: Network addresses returned from network-container excluding list of user's input network range
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = """
|
|
||||||
- name: return next available network for network-container 192.168.10.0/24
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25,
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
- name: return the next 2 available network addresses for network-container 192.168.10.0/24
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, num=2,
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
|
|
||||||
- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
|
|
||||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
_list:
|
|
||||||
description:
|
|
||||||
- The list of next network addresses available
|
|
||||||
type: list
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
|
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
try:
|
|
||||||
network = terms[0]
|
|
||||||
except IndexError:
|
|
||||||
raise AnsibleError('missing network argument in the form of A.B.C.D/E')
|
|
||||||
try:
|
|
||||||
cidr = kwargs.get('cidr', 24)
|
|
||||||
except IndexError:
|
|
||||||
raise AnsibleError('missing CIDR argument in the form of xx')
|
|
||||||
|
|
||||||
provider = kwargs.pop('provider', {})
|
|
||||||
wapi = WapiLookup(provider)
|
|
||||||
network_obj = wapi.get_object('networkcontainer', {'network': network})
|
|
||||||
|
|
||||||
if network_obj is None:
|
|
||||||
raise AnsibleError('unable to find network-container object %s' % network)
|
|
||||||
num = kwargs.get('num', 1)
|
|
||||||
exclude_ip = kwargs.get('exclude', [])
|
|
||||||
|
|
||||||
try:
|
|
||||||
ref = network_obj[0]['_ref']
|
|
||||||
avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip})
|
|
||||||
return [avail_nets['networks']]
|
|
||||||
except Exception as exc:
|
|
||||||
raise AnsibleError(to_text(exc))
|
|
||||||
@@ -141,9 +141,9 @@ import time
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
from distutils import util
|
|
||||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||||
|
from ansible.module_utils.parsing.convert_bool import boolean
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
from ansible.utils.encrypt import random_password
|
from ansible.utils.encrypt import random_password
|
||||||
from ansible.plugins.lookup import LookupBase
|
from ansible.plugins.lookup import LookupBase
|
||||||
@@ -211,7 +211,7 @@ class LookupModule(LookupBase):
|
|||||||
try:
|
try:
|
||||||
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
|
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
|
||||||
if not isinstance(self.paramvals[key], bool):
|
if not isinstance(self.paramvals[key], bool):
|
||||||
self.paramvals[key] = util.strtobool(self.paramvals[key])
|
self.paramvals[key] = boolean(self.paramvals[key])
|
||||||
except (ValueError, AssertionError) as e:
|
except (ValueError, AssertionError) as e:
|
||||||
raise AnsibleError(e)
|
raise AnsibleError(e)
|
||||||
if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
|
if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
|
||||||
|
|||||||
119
plugins/lookup/random_words.py
Normal file
119
plugins/lookup/random_words.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
"""The community.general.random_words Ansible lookup plugin."""
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: random_words
|
||||||
|
author:
|
||||||
|
- Thomas Sjögren (@konstruktoid)
|
||||||
|
short_description: Return a number of random words
|
||||||
|
version_added: "4.0.0"
|
||||||
|
requirements:
|
||||||
|
- xkcdpass U(https://github.com/redacted/XKCD-password-generator)
|
||||||
|
description:
|
||||||
|
- Returns a number of random words. The output can for example be used for
|
||||||
|
passwords.
|
||||||
|
- See U(https://xkcd.com/936/) for background.
|
||||||
|
options:
|
||||||
|
numwords:
|
||||||
|
description:
|
||||||
|
- The number of words.
|
||||||
|
default: 6
|
||||||
|
type: int
|
||||||
|
min_length:
|
||||||
|
description:
|
||||||
|
- Minimum length of words to make password.
|
||||||
|
default: 5
|
||||||
|
type: int
|
||||||
|
max_length:
|
||||||
|
description:
|
||||||
|
- Maximum length of words to make password.
|
||||||
|
default: 9
|
||||||
|
type: int
|
||||||
|
delimiter:
|
||||||
|
description:
|
||||||
|
- The delimiter character between words.
|
||||||
|
default: " "
|
||||||
|
type: str
|
||||||
|
case:
|
||||||
|
description:
|
||||||
|
- The method for setting the case of each word in the passphrase.
|
||||||
|
choices: ["alternating", "upper", "lower", "random", "capitalize"]
|
||||||
|
default: "lower"
|
||||||
|
type: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = r"""
|
||||||
|
- name: Generate password with default settings
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: lookup('community.general.random_words')
|
||||||
|
# Example result: 'traitor gigabyte cesarean unless aspect clear'
|
||||||
|
|
||||||
|
- name: Generate password with six, five character, words
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: lookup('community.general.random_words', min_length=5, max_length=5)
|
||||||
|
# Example result: 'brink banjo getup staff trump comfy'
|
||||||
|
|
||||||
|
- name: Generate password with three capitalized words and the '-' delimiter
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: lookup('community.general.random_words', numwords=3, delimiter='-', case='capitalize')
|
||||||
|
# Example result: 'Overlabor-Faucet-Coastline'
|
||||||
|
|
||||||
|
- name: Generate password with three words without any delimiter
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: lookup('community.general.random_words', numwords=3, delimiter='')
|
||||||
|
# Example result: 'deskworkmonopolystriking'
|
||||||
|
# https://www.ncsc.gov.uk/blog-post/the-logic-behind-three-random-words
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = r"""
|
||||||
|
_raw:
|
||||||
|
description: A single-element list containing random words.
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleLookupError
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
|
||||||
|
try:
|
||||||
|
from xkcdpass import xkcd_password as xp
|
||||||
|
|
||||||
|
HAS_XKCDPASS = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_XKCDPASS = False
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
"""The random_words Ansible lookup class."""
|
||||||
|
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
|
||||||
|
if not HAS_XKCDPASS:
|
||||||
|
raise AnsibleLookupError(
|
||||||
|
"Python xkcdpass library is required. "
|
||||||
|
'Please install using "pip install xkcdpass"'
|
||||||
|
)
|
||||||
|
|
||||||
|
self.set_options(var_options=variables, direct=kwargs)
|
||||||
|
method = self.get_option("case")
|
||||||
|
delimiter = self.get_option("delimiter")
|
||||||
|
max_length = self.get_option("max_length")
|
||||||
|
min_length = self.get_option("min_length")
|
||||||
|
numwords = self.get_option("numwords")
|
||||||
|
|
||||||
|
words = xp.locate_wordfile()
|
||||||
|
wordlist = xp.generate_wordlist(
|
||||||
|
max_length=max_length, min_length=min_length, wordfile=words
|
||||||
|
)
|
||||||
|
|
||||||
|
values = xp.generate_xkcdpassword(
|
||||||
|
wordlist, case=method, delimiter=delimiter, numwords=numwords
|
||||||
|
)
|
||||||
|
|
||||||
|
return [values]
|
||||||
107
plugins/lookup/revbitspss.py
Normal file
107
plugins/lookup/revbitspss.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright: (c) 2021, RevBits <info@revbits.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or
|
||||||
|
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: revbitspss
|
||||||
|
author: RevBits (@RevBits) <info@revbits.com>
|
||||||
|
short_description: Get secrets from RevBits PAM server
|
||||||
|
version_added: 4.1.0
|
||||||
|
description:
|
||||||
|
- Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
|
||||||
|
Server using API key authentication with the REST API.
|
||||||
|
requirements:
|
||||||
|
- revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
|
||||||
|
options:
|
||||||
|
_terms:
|
||||||
|
description:
|
||||||
|
- This will be an array of keys for secrets which you want to fetch from RevBits PAM.
|
||||||
|
required: true
|
||||||
|
type: list
|
||||||
|
elements: string
|
||||||
|
base_url:
|
||||||
|
description:
|
||||||
|
- This will be the base URL of the server, for example C(https://server-url-here).
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
api_key:
|
||||||
|
description:
|
||||||
|
- This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = r"""
|
||||||
|
_list:
|
||||||
|
description:
|
||||||
|
- The JSON responses which you can access with defined keys.
|
||||||
|
- If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
|
||||||
|
type: list
|
||||||
|
elements: dict
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = r"""
|
||||||
|
- hosts: localhost
|
||||||
|
vars:
|
||||||
|
secret: >-
|
||||||
|
{{
|
||||||
|
lookup(
|
||||||
|
'community.general.revbitspss',
|
||||||
|
'UUIDPAM', 'DB_PASS',
|
||||||
|
base_url='https://server-url-here',
|
||||||
|
api_key='API_KEY_GOES_HERE'
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
tasks:
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: >
|
||||||
|
UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.module_utils.six import raise_from
|
||||||
|
|
||||||
|
try:
|
||||||
|
from pam.revbits_ansible.server import SecretServer
|
||||||
|
except ImportError as imp_exc:
|
||||||
|
ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc
|
||||||
|
else:
|
||||||
|
ANOTHER_LIBRARY_IMPORT_ERROR = None
|
||||||
|
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def Client(server_parameters):
|
||||||
|
return SecretServer(**server_parameters)
|
||||||
|
|
||||||
|
def run(self, terms, variables, **kwargs):
|
||||||
|
if ANOTHER_LIBRARY_IMPORT_ERROR:
|
||||||
|
raise_from(
|
||||||
|
AnsibleError('revbits_ansible must be installed to use this plugin'),
|
||||||
|
ANOTHER_LIBRARY_IMPORT_ERROR
|
||||||
|
)
|
||||||
|
self.set_options(var_options=variables, direct=kwargs)
|
||||||
|
secret_server = LookupModule.Client(
|
||||||
|
{
|
||||||
|
"base_url": self.get_option('base_url'),
|
||||||
|
"api_key": self.get_option('api_key'),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
result = []
|
||||||
|
for term in terms:
|
||||||
|
try:
|
||||||
|
display.vvv(u"Secret Server lookup of Secret with ID %s" % term)
|
||||||
|
result.append({term: secret_server.get_pam_secret(term)})
|
||||||
|
except Exception as error:
|
||||||
|
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||||
|
return result
|
||||||
@@ -1,748 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
|
|
||||||
# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import mimetypes
|
|
||||||
|
|
||||||
from pprint import pformat
|
|
||||||
from ansible.module_utils import six
|
|
||||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
|
||||||
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
|
|
||||||
from ansible.module_utils.urls import open_url
|
|
||||||
from ansible.module_utils.api import basic_auth_argument_spec
|
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
|
||||||
|
|
||||||
try:
|
|
||||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
|
||||||
except ImportError:
|
|
||||||
ansible_version = 'unknown'
|
|
||||||
|
|
||||||
try:
|
|
||||||
from netapp_lib.api.zapi import zapi
|
|
||||||
HAS_NETAPP_LIB = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_NETAPP_LIB = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
import requests
|
|
||||||
HAS_REQUESTS = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_REQUESTS = False
|
|
||||||
|
|
||||||
import ssl
|
|
||||||
try:
|
|
||||||
from urlparse import urlparse, urlunparse
|
|
||||||
except ImportError:
|
|
||||||
from urllib.parse import urlparse, urlunparse
|
|
||||||
|
|
||||||
|
|
||||||
HAS_SF_SDK = False
|
|
||||||
SF_BYTE_MAP = dict(
|
|
||||||
# Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
|
|
||||||
bytes=1,
|
|
||||||
b=1,
|
|
||||||
kb=1000,
|
|
||||||
mb=1000 ** 2,
|
|
||||||
gb=1000 ** 3,
|
|
||||||
tb=1000 ** 4,
|
|
||||||
pb=1000 ** 5,
|
|
||||||
eb=1000 ** 6,
|
|
||||||
zb=1000 ** 7,
|
|
||||||
yb=1000 ** 8
|
|
||||||
)
|
|
||||||
|
|
||||||
POW2_BYTE_MAP = dict(
|
|
||||||
# Here, 1 kb = 1024
|
|
||||||
bytes=1,
|
|
||||||
b=1,
|
|
||||||
kb=1024,
|
|
||||||
mb=1024 ** 2,
|
|
||||||
gb=1024 ** 3,
|
|
||||||
tb=1024 ** 4,
|
|
||||||
pb=1024 ** 5,
|
|
||||||
eb=1024 ** 6,
|
|
||||||
zb=1024 ** 7,
|
|
||||||
yb=1024 ** 8
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
from solidfire.factory import ElementFactory
|
|
||||||
from solidfire.custom.models import TimeIntervalFrequency
|
|
||||||
from solidfire.models import Schedule, ScheduleInfo
|
|
||||||
|
|
||||||
HAS_SF_SDK = True
|
|
||||||
except Exception:
|
|
||||||
HAS_SF_SDK = False
|
|
||||||
|
|
||||||
|
|
||||||
def has_netapp_lib():
|
|
||||||
return HAS_NETAPP_LIB
|
|
||||||
|
|
||||||
|
|
||||||
def has_sf_sdk():
|
|
||||||
return HAS_SF_SDK
|
|
||||||
|
|
||||||
|
|
||||||
def na_ontap_host_argument_spec():
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
hostname=dict(required=True, type='str'),
|
|
||||||
username=dict(required=True, type='str', aliases=['user']),
|
|
||||||
password=dict(required=True, type='str', aliases=['pass'], no_log=True),
|
|
||||||
https=dict(required=False, type='bool', default=False),
|
|
||||||
validate_certs=dict(required=False, type='bool', default=True),
|
|
||||||
http_port=dict(required=False, type='int'),
|
|
||||||
ontapi=dict(required=False, type='int'),
|
|
||||||
use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ontap_sf_host_argument_spec():
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
hostname=dict(required=True, type='str'),
|
|
||||||
username=dict(required=True, type='str', aliases=['user']),
|
|
||||||
password=dict(required=True, type='str', aliases=['pass'], no_log=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def aws_cvs_host_argument_spec():
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
api_url=dict(required=True, type='str'),
|
|
||||||
validate_certs=dict(required=False, type='bool', default=True),
|
|
||||||
api_key=dict(required=True, type='str', no_log=True),
|
|
||||||
secret_key=dict(required=True, type='str', no_log=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_sf_connection(module, port=None):
|
|
||||||
hostname = module.params['hostname']
|
|
||||||
username = module.params['username']
|
|
||||||
password = module.params['password']
|
|
||||||
|
|
||||||
if HAS_SF_SDK and hostname and username and password:
|
|
||||||
try:
|
|
||||||
return_val = ElementFactory.create(hostname, username, password, port=port)
|
|
||||||
return return_val
|
|
||||||
except Exception:
|
|
||||||
raise Exception("Unable to create SF connection")
|
|
||||||
else:
|
|
||||||
module.fail_json(msg="the python SolidFire SDK module is required")
|
|
||||||
|
|
||||||
|
|
||||||
def setup_na_ontap_zapi(module, vserver=None):
|
|
||||||
hostname = module.params['hostname']
|
|
||||||
username = module.params['username']
|
|
||||||
password = module.params['password']
|
|
||||||
https = module.params['https']
|
|
||||||
validate_certs = module.params['validate_certs']
|
|
||||||
port = module.params['http_port']
|
|
||||||
version = module.params['ontapi']
|
|
||||||
|
|
||||||
if HAS_NETAPP_LIB:
|
|
||||||
# set up zapi
|
|
||||||
server = zapi.NaServer(hostname)
|
|
||||||
server.set_username(username)
|
|
||||||
server.set_password(password)
|
|
||||||
if vserver:
|
|
||||||
server.set_vserver(vserver)
|
|
||||||
if version:
|
|
||||||
minor = version
|
|
||||||
else:
|
|
||||||
minor = 110
|
|
||||||
server.set_api_version(major=1, minor=minor)
|
|
||||||
# default is HTTP
|
|
||||||
if https:
|
|
||||||
if port is None:
|
|
||||||
port = 443
|
|
||||||
transport_type = 'HTTPS'
|
|
||||||
# HACK to bypass certificate verification
|
|
||||||
if validate_certs is False:
|
|
||||||
if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
|
|
||||||
ssl._create_default_https_context = ssl._create_unverified_context
|
|
||||||
else:
|
|
||||||
if port is None:
|
|
||||||
port = 80
|
|
||||||
transport_type = 'HTTP'
|
|
||||||
server.set_transport_type(transport_type)
|
|
||||||
server.set_port(port)
|
|
||||||
server.set_server_type('FILER')
|
|
||||||
return server
|
|
||||||
else:
|
|
||||||
module.fail_json(msg="the python NetApp-Lib module is required")
|
|
||||||
|
|
||||||
|
|
||||||
def setup_ontap_zapi(module, vserver=None):
|
|
||||||
hostname = module.params['hostname']
|
|
||||||
username = module.params['username']
|
|
||||||
password = module.params['password']
|
|
||||||
|
|
||||||
if HAS_NETAPP_LIB:
|
|
||||||
# set up zapi
|
|
||||||
server = zapi.NaServer(hostname)
|
|
||||||
server.set_username(username)
|
|
||||||
server.set_password(password)
|
|
||||||
if vserver:
|
|
||||||
server.set_vserver(vserver)
|
|
||||||
# Todo : Replace hard-coded values with configurable parameters.
|
|
||||||
server.set_api_version(major=1, minor=110)
|
|
||||||
server.set_port(80)
|
|
||||||
server.set_server_type('FILER')
|
|
||||||
server.set_transport_type('HTTP')
|
|
||||||
return server
|
|
||||||
else:
|
|
||||||
module.fail_json(msg="the python NetApp-Lib module is required")
|
|
||||||
|
|
||||||
|
|
||||||
def eseries_host_argument_spec():
|
|
||||||
"""Retrieve a base argument specification common to all NetApp E-Series modules"""
|
|
||||||
argument_spec = basic_auth_argument_spec()
|
|
||||||
argument_spec.update(dict(
|
|
||||||
api_username=dict(type='str', required=True),
|
|
||||||
api_password=dict(type='str', required=True, no_log=True),
|
|
||||||
api_url=dict(type='str', required=True),
|
|
||||||
ssid=dict(type='str', required=False, default='1'),
|
|
||||||
validate_certs=dict(type='bool', required=False, default=True)
|
|
||||||
))
|
|
||||||
return argument_spec
|
|
||||||
|
|
||||||
|
|
||||||
class NetAppESeriesModule(object):
|
|
||||||
"""Base class for all NetApp E-Series modules.
|
|
||||||
|
|
||||||
Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
|
|
||||||
verification, http requests, secure http redirection for embedded web services, and logging setup.
|
|
||||||
|
|
||||||
Be sure to add the following lines in the module's documentation section:
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- netapp.eseries
|
|
||||||
|
|
||||||
:param dict(dict) ansible_options: dictionary of ansible option definitions
|
|
||||||
:param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
|
|
||||||
:param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
|
|
||||||
:param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
|
|
||||||
:param list(list) required_if: list containing list(s) containing the option, the option value, and then
|
|
||||||
a list of required options. (optional)
|
|
||||||
:param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
|
|
||||||
:param list(list) required_together: list containing list(s) of options that are required together. (optional)
|
|
||||||
:param bool log_requests: controls whether to log each request (default: True)
|
|
||||||
"""
|
|
||||||
DEFAULT_TIMEOUT = 60
|
|
||||||
DEFAULT_SECURE_PORT = "8443"
|
|
||||||
DEFAULT_REST_API_PATH = "devmgr/v2/"
|
|
||||||
DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
|
|
||||||
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
|
|
||||||
"netapp-client-type": "Ansible-%s" % ansible_version}
|
|
||||||
HTTP_AGENT = "Ansible / %s" % ansible_version
|
|
||||||
SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
|
|
||||||
pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
|
|
||||||
|
|
||||||
def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
|
|
||||||
mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
|
|
||||||
log_requests=True):
|
|
||||||
argument_spec = eseries_host_argument_spec()
|
|
||||||
argument_spec.update(ansible_options)
|
|
||||||
|
|
||||||
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
|
|
||||||
mutually_exclusive=mutually_exclusive, required_if=required_if,
|
|
||||||
required_one_of=required_one_of, required_together=required_together)
|
|
||||||
|
|
||||||
args = self.module.params
|
|
||||||
self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
|
|
||||||
self.ssid = args["ssid"]
|
|
||||||
self.url = args["api_url"]
|
|
||||||
self.log_requests = log_requests
|
|
||||||
self.creds = dict(url_username=args["api_username"],
|
|
||||||
url_password=args["api_password"],
|
|
||||||
validate_certs=args["validate_certs"])
|
|
||||||
|
|
||||||
if not self.url.endswith("/"):
|
|
||||||
self.url += "/"
|
|
||||||
|
|
||||||
self.is_embedded_mode = None
|
|
||||||
self.is_web_services_valid_cache = None
|
|
||||||
|
|
||||||
def _check_web_services_version(self):
|
|
||||||
"""Verify proxy or embedded web services meets minimum version required for module.
|
|
||||||
|
|
||||||
The minimum required web services version is evaluated against version supplied through the web services rest
|
|
||||||
api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
|
|
||||||
|
|
||||||
This helper function will update the supplied api url if secure http is not used for embedded web services
|
|
||||||
|
|
||||||
:raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
|
|
||||||
"""
|
|
||||||
if not self.is_web_services_valid_cache:
|
|
||||||
|
|
||||||
url_parts = urlparse(self.url)
|
|
||||||
if not url_parts.scheme or not url_parts.netloc:
|
|
||||||
self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
|
|
||||||
|
|
||||||
if url_parts.scheme not in ["http", "https"]:
|
|
||||||
self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
|
|
||||||
|
|
||||||
self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
|
|
||||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
|
||||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
|
|
||||||
|
|
||||||
if rc != 200:
|
|
||||||
self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
|
|
||||||
self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
|
|
||||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
|
||||||
try:
|
|
||||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
|
|
||||||
except Exception as error:
|
|
||||||
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
|
|
||||||
% (self.ssid, to_native(error)))
|
|
||||||
|
|
||||||
major, minor, other, revision = data["version"].split(".")
|
|
||||||
minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
|
|
||||||
|
|
||||||
if not (major > minimum_major or
|
|
||||||
(major == minimum_major and minor > minimum_minor) or
|
|
||||||
(major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
|
|
||||||
self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
|
|
||||||
" Version required: [%s]." % (data["version"], self.web_services_version))
|
|
||||||
|
|
||||||
self.module.log("Web services rest api version met the minimum required version.")
|
|
||||||
self.is_web_services_valid_cache = True
|
|
||||||
|
|
||||||
def is_embedded(self):
|
|
||||||
"""Determine whether web services server is the embedded web services.
|
|
||||||
|
|
||||||
If web services about endpoint fails based on an URLError then the request will be attempted again using
|
|
||||||
secure http.
|
|
||||||
|
|
||||||
:raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
|
|
||||||
:return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
|
|
||||||
"""
|
|
||||||
self._check_web_services_version()
|
|
||||||
|
|
||||||
if self.is_embedded_mode is None:
|
|
||||||
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
|
|
||||||
try:
|
|
||||||
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
|
|
||||||
self.is_embedded_mode = not data["runningAsProxy"]
|
|
||||||
except Exception as error:
|
|
||||||
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
|
|
||||||
% (self.ssid, to_native(error)))
|
|
||||||
|
|
||||||
return self.is_embedded_mode
|
|
||||||
|
|
||||||
def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
|
|
||||||
"""Issue an HTTP request to a url, retrieving an optional JSON response.
|
|
||||||
|
|
||||||
:param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
|
|
||||||
full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
|
|
||||||
:param data: data required for the request (data may be json or any python structured data)
|
|
||||||
:param str method: request method such as GET, POST, DELETE.
|
|
||||||
:param dict headers: dictionary containing request headers.
|
|
||||||
:param bool ignore_errors: forces the request to ignore any raised exceptions.
|
|
||||||
"""
|
|
||||||
self._check_web_services_version()
|
|
||||||
|
|
||||||
if headers is None:
|
|
||||||
headers = self.DEFAULT_HEADERS
|
|
||||||
|
|
||||||
if not isinstance(data, str) and headers["Content-Type"] == "application/json":
|
|
||||||
data = json.dumps(data)
|
|
||||||
|
|
||||||
if path.startswith("/"):
|
|
||||||
path = path[1:]
|
|
||||||
request_url = self.url + self.DEFAULT_REST_API_PATH + path
|
|
||||||
|
|
||||||
# if self.log_requests:
|
|
||||||
self.module.log(pformat(dict(url=request_url, data=data, method=method)))
|
|
||||||
|
|
||||||
return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
|
|
||||||
timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
|
|
||||||
|
|
||||||
|
|
||||||
def create_multipart_formdata(files, fields=None, send_8kb=False):
|
|
||||||
"""Create the data for a multipart/form request.
|
|
||||||
|
|
||||||
:param list(list) files: list of lists each containing (name, filename, path).
|
|
||||||
:param list(list) fields: list of lists each containing (key, value).
|
|
||||||
:param bool send_8kb: only sends the first 8kb of the files (default: False).
|
|
||||||
"""
|
|
||||||
boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
|
|
||||||
data_parts = list()
|
|
||||||
data = None
|
|
||||||
|
|
||||||
if six.PY2: # Generate payload for Python 2
|
|
||||||
newline = "\r\n"
|
|
||||||
if fields is not None:
|
|
||||||
for key, value in fields:
|
|
||||||
data_parts.extend(["--%s" % boundary,
|
|
||||||
'Content-Disposition: form-data; name="%s"' % key,
|
|
||||||
"",
|
|
||||||
value])
|
|
||||||
|
|
||||||
for name, filename, path in files:
|
|
||||||
with open(path, "rb") as fh:
|
|
||||||
value = fh.read(8192) if send_8kb else fh.read()
|
|
||||||
|
|
||||||
data_parts.extend(["--%s" % boundary,
|
|
||||||
'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
|
|
||||||
"Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
|
|
||||||
"",
|
|
||||||
value])
|
|
||||||
data_parts.extend(["--%s--" % boundary, ""])
|
|
||||||
data = newline.join(data_parts)
|
|
||||||
|
|
||||||
else:
|
|
||||||
newline = six.b("\r\n")
|
|
||||||
if fields is not None:
|
|
||||||
for key, value in fields:
|
|
||||||
data_parts.extend([six.b("--%s" % boundary),
|
|
||||||
six.b('Content-Disposition: form-data; name="%s"' % key),
|
|
||||||
six.b(""),
|
|
||||||
six.b(value)])
|
|
||||||
|
|
||||||
for name, filename, path in files:
|
|
||||||
with open(path, "rb") as fh:
|
|
||||||
value = fh.read(8192) if send_8kb else fh.read()
|
|
||||||
|
|
||||||
data_parts.extend([six.b("--%s" % boundary),
|
|
||||||
six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
|
|
||||||
six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
|
|
||||||
six.b(""),
|
|
||||||
value])
|
|
||||||
data_parts.extend([six.b("--%s--" % boundary), b""])
|
|
||||||
data = newline.join(data_parts)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "multipart/form-data; boundary=%s" % boundary,
|
|
||||||
"Content-Length": str(len(data))}
|
|
||||||
|
|
||||||
return headers, data
|
|
||||||
|
|
||||||
|
|
||||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
|
||||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
|
||||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
|
||||||
"""Issue an HTTP request to a url, retrieving an optional JSON response."""
|
|
||||||
|
|
||||||
if headers is None:
|
|
||||||
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
|
||||||
headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
|
|
||||||
|
|
||||||
if not http_agent:
|
|
||||||
http_agent = "Ansible / %s" % ansible_version
|
|
||||||
|
|
||||||
try:
|
|
||||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
|
||||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
|
||||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
|
||||||
force_basic_auth=force_basic_auth)
|
|
||||||
except HTTPError as err:
|
|
||||||
r = err.fp
|
|
||||||
|
|
||||||
try:
|
|
||||||
raw_data = r.read()
|
|
||||||
if raw_data:
|
|
||||||
data = json.loads(raw_data)
|
|
||||||
else:
|
|
||||||
raw_data = None
|
|
||||||
except Exception:
|
|
||||||
if ignore_errors:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise Exception(raw_data)
|
|
||||||
|
|
||||||
resp_code = r.getcode()
|
|
||||||
|
|
||||||
if resp_code >= 400 and not ignore_errors:
|
|
||||||
raise Exception(resp_code, data)
|
|
||||||
else:
|
|
||||||
return resp_code, data
|
|
||||||
|
|
||||||
|
|
||||||
def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
|
|
||||||
category="Information", event="setup", autosupport="false"):
|
|
||||||
ems_log = zapi.NaElement('ems-autosupport-log')
|
|
||||||
# Host name invoking the API.
|
|
||||||
ems_log.add_new_child("computer-name", name)
|
|
||||||
# ID of event. A user defined event-id, range [0..2^32-2].
|
|
||||||
ems_log.add_new_child("event-id", id)
|
|
||||||
# Name of the application invoking the API.
|
|
||||||
ems_log.add_new_child("event-source", source)
|
|
||||||
# Version of application invoking the API.
|
|
||||||
ems_log.add_new_child("app-version", version)
|
|
||||||
# Application defined category of the event.
|
|
||||||
ems_log.add_new_child("category", category)
|
|
||||||
# Description of event to log. An application defined message to log.
|
|
||||||
ems_log.add_new_child("event-description", event)
|
|
||||||
ems_log.add_new_child("log-level", "6")
|
|
||||||
ems_log.add_new_child("auto-support", autosupport)
|
|
||||||
server.invoke_successfully(ems_log, True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_cserver_zapi(server):
|
|
||||||
vserver_info = zapi.NaElement('vserver-get-iter')
|
|
||||||
query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
|
|
||||||
query = zapi.NaElement('query')
|
|
||||||
query.add_child_elem(query_details)
|
|
||||||
vserver_info.add_child_elem(query)
|
|
||||||
result = server.invoke_successfully(vserver_info,
|
|
||||||
enable_tunneling=False)
|
|
||||||
attribute_list = result.get_child_by_name('attributes-list')
|
|
||||||
vserver_list = attribute_list.get_child_by_name('vserver-info')
|
|
||||||
return vserver_list.get_child_content('vserver-name')
|
|
||||||
|
|
||||||
|
|
||||||
def get_cserver(connection, is_rest=False):
|
|
||||||
if not is_rest:
|
|
||||||
return get_cserver_zapi(connection)
|
|
||||||
|
|
||||||
params = {'fields': 'type'}
|
|
||||||
api = "private/cli/vserver"
|
|
||||||
json, error = connection.get(api, params)
|
|
||||||
if json is None or error is not None:
|
|
||||||
# exit if there is an error or no data
|
|
||||||
return None
|
|
||||||
vservers = json.get('records')
|
|
||||||
if vservers is not None:
|
|
||||||
for vserver in vservers:
|
|
||||||
if vserver['type'] == 'admin': # cluster admin
|
|
||||||
return vserver['vserver']
|
|
||||||
if len(vservers) == 1: # assume vserver admin
|
|
||||||
return vservers[0]['vserver']
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class OntapRestAPI(object):
|
|
||||||
def __init__(self, module, timeout=60):
|
|
||||||
self.module = module
|
|
||||||
self.username = self.module.params['username']
|
|
||||||
self.password = self.module.params['password']
|
|
||||||
self.hostname = self.module.params['hostname']
|
|
||||||
self.use_rest = self.module.params['use_rest']
|
|
||||||
self.verify = self.module.params['validate_certs']
|
|
||||||
self.timeout = timeout
|
|
||||||
self.url = 'https://' + self.hostname + '/api/'
|
|
||||||
self.errors = list()
|
|
||||||
self.debug_logs = list()
|
|
||||||
self.check_required_library()
|
|
||||||
|
|
||||||
def check_required_library(self):
|
|
||||||
if not HAS_REQUESTS:
|
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'))
|
|
||||||
|
|
||||||
def send_request(self, method, api, params, json=None, return_status_code=False):
|
|
||||||
''' send http request and process reponse, including error conditions '''
|
|
||||||
url = self.url + api
|
|
||||||
status_code = None
|
|
||||||
content = None
|
|
||||||
json_dict = None
|
|
||||||
json_error = None
|
|
||||||
error_details = None
|
|
||||||
|
|
||||||
def get_json(response):
|
|
||||||
''' extract json, and error message if present '''
|
|
||||||
try:
|
|
||||||
json = response.json()
|
|
||||||
except ValueError:
|
|
||||||
return None, None
|
|
||||||
error = json.get('error')
|
|
||||||
return json, error
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
|
|
||||||
content = response.content # for debug purposes
|
|
||||||
status_code = response.status_code
|
|
||||||
# If the response was successful, no Exception will be raised
|
|
||||||
response.raise_for_status()
|
|
||||||
json_dict, json_error = get_json(response)
|
|
||||||
except requests.exceptions.HTTPError as err:
|
|
||||||
__, json_error = get_json(response)
|
|
||||||
if json_error is None:
|
|
||||||
self.log_error(status_code, 'HTTP error: %s' % err)
|
|
||||||
error_details = str(err)
|
|
||||||
# If an error was reported in the json payload, it is handled below
|
|
||||||
except requests.exceptions.ConnectionError as err:
|
|
||||||
self.log_error(status_code, 'Connection error: %s' % err)
|
|
||||||
error_details = str(err)
|
|
||||||
except Exception as err:
|
|
||||||
self.log_error(status_code, 'Other error: %s' % err)
|
|
||||||
error_details = str(err)
|
|
||||||
if json_error is not None:
|
|
||||||
self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
|
|
||||||
error_details = json_error
|
|
||||||
self.log_debug(status_code, content)
|
|
||||||
if return_status_code:
|
|
||||||
return status_code, error_details
|
|
||||||
return json_dict, error_details
|
|
||||||
|
|
||||||
def get(self, api, params):
|
|
||||||
method = 'GET'
|
|
||||||
return self.send_request(method, api, params)
|
|
||||||
|
|
||||||
def post(self, api, data, params=None):
|
|
||||||
method = 'POST'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def patch(self, api, data, params=None):
|
|
||||||
method = 'PATCH'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def delete(self, api, data, params=None):
|
|
||||||
method = 'DELETE'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def _is_rest(self, used_unsupported_rest_properties=None):
|
|
||||||
if self.use_rest == "Always":
|
|
||||||
if used_unsupported_rest_properties:
|
|
||||||
error = "REST API currently does not support '%s'" % \
|
|
||||||
', '.join(used_unsupported_rest_properties)
|
|
||||||
return True, error
|
|
||||||
else:
|
|
||||||
return True, None
|
|
||||||
if self.use_rest == 'Never' or used_unsupported_rest_properties:
|
|
||||||
# force ZAPI if requested or if some parameter requires it
|
|
||||||
return False, None
|
|
||||||
method = 'HEAD'
|
|
||||||
api = 'cluster/software'
|
|
||||||
status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
|
|
||||||
if status_code == 200:
|
|
||||||
return True, None
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
def is_rest(self, used_unsupported_rest_properties=None):
|
|
||||||
''' only return error if there is a reason to '''
|
|
||||||
use_rest, error = self._is_rest(used_unsupported_rest_properties)
|
|
||||||
if used_unsupported_rest_properties is None:
|
|
||||||
return use_rest
|
|
||||||
return use_rest, error
|
|
||||||
|
|
||||||
def log_error(self, status_code, message):
|
|
||||||
self.errors.append(message)
|
|
||||||
self.debug_logs.append((status_code, message))
|
|
||||||
|
|
||||||
def log_debug(self, status_code, content):
|
|
||||||
self.debug_logs.append((status_code, content))
|
|
||||||
|
|
||||||
|
|
||||||
class AwsCvsRestAPI(object):
|
|
||||||
def __init__(self, module, timeout=60):
|
|
||||||
self.module = module
|
|
||||||
self.api_key = self.module.params['api_key']
|
|
||||||
self.secret_key = self.module.params['secret_key']
|
|
||||||
self.api_url = self.module.params['api_url']
|
|
||||||
self.verify = self.module.params['validate_certs']
|
|
||||||
self.timeout = timeout
|
|
||||||
self.url = 'https://' + self.api_url + '/v1/'
|
|
||||||
self.check_required_library()
|
|
||||||
|
|
||||||
def check_required_library(self):
|
|
||||||
if not HAS_REQUESTS:
|
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'))
|
|
||||||
|
|
||||||
def send_request(self, method, api, params, json=None):
|
|
||||||
''' send http request and process reponse, including error conditions '''
|
|
||||||
url = self.url + api
|
|
||||||
status_code = None
|
|
||||||
content = None
|
|
||||||
json_dict = None
|
|
||||||
json_error = None
|
|
||||||
error_details = None
|
|
||||||
headers = {
|
|
||||||
'Content-type': "application/json",
|
|
||||||
'api-key': self.api_key,
|
|
||||||
'secret-key': self.secret_key,
|
|
||||||
'Cache-Control': "no-cache",
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_json(response):
|
|
||||||
''' extract json, and error message if present '''
|
|
||||||
try:
|
|
||||||
json = response.json()
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
return None, None
|
|
||||||
success_code = [200, 201, 202]
|
|
||||||
if response.status_code not in success_code:
|
|
||||||
error = json.get('message')
|
|
||||||
else:
|
|
||||||
error = None
|
|
||||||
return json, error
|
|
||||||
try:
|
|
||||||
response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
|
|
||||||
status_code = response.status_code
|
|
||||||
# If the response was successful, no Exception will be raised
|
|
||||||
json_dict, json_error = get_json(response)
|
|
||||||
except requests.exceptions.HTTPError as err:
|
|
||||||
__, json_error = get_json(response)
|
|
||||||
if json_error is None:
|
|
||||||
error_details = str(err)
|
|
||||||
except requests.exceptions.ConnectionError as err:
|
|
||||||
error_details = str(err)
|
|
||||||
except Exception as err:
|
|
||||||
error_details = str(err)
|
|
||||||
if json_error is not None:
|
|
||||||
error_details = json_error
|
|
||||||
|
|
||||||
return json_dict, error_details
|
|
||||||
|
|
||||||
# If an error was reported in the json payload, it is handled below
|
|
||||||
def get(self, api, params=None):
|
|
||||||
method = 'GET'
|
|
||||||
return self.send_request(method, api, params)
|
|
||||||
|
|
||||||
def post(self, api, data, params=None):
|
|
||||||
method = 'POST'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def patch(self, api, data, params=None):
|
|
||||||
method = 'PATCH'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def put(self, api, data, params=None):
|
|
||||||
method = 'PUT'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def delete(self, api, data, params=None):
|
|
||||||
method = 'DELETE'
|
|
||||||
return self.send_request(method, api, params, json=data)
|
|
||||||
|
|
||||||
def get_state(self, jobId):
|
|
||||||
""" Method to get the state of the job """
|
|
||||||
method = 'GET'
|
|
||||||
response, status_code = self.get('Jobs/%s' % jobId)
|
|
||||||
while str(response['state']) not in 'done':
|
|
||||||
response, status_code = self.get('Jobs/%s' % jobId)
|
|
||||||
return 'done'
|
|
||||||
343
plugins/module_utils/_version.py
Normal file
343
plugins/module_utils/_version.py
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# Vendored copy of distutils/version.py from CPython 3.9.5
|
||||||
|
#
|
||||||
|
# Implements multiple version numbering conventions for the
|
||||||
|
# Python Module Distribution Utilities.
|
||||||
|
#
|
||||||
|
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
|
||||||
|
#
|
||||||
|
|
||||||
|
"""Provides classes to represent module version numbers (one class for
|
||||||
|
each style of version numbering). There are currently two such classes
|
||||||
|
implemented: StrictVersion and LooseVersion.
|
||||||
|
|
||||||
|
Every version number class implements the following interface:
|
||||||
|
* the 'parse' method takes a string and parses it to some internal
|
||||||
|
representation; if the string is an invalid version number,
|
||||||
|
'parse' raises a ValueError exception
|
||||||
|
* the class constructor takes an optional string argument which,
|
||||||
|
if supplied, is passed to 'parse'
|
||||||
|
* __str__ reconstructs the string that was passed to 'parse' (or
|
||||||
|
an equivalent string -- ie. one that will generate an equivalent
|
||||||
|
version number instance)
|
||||||
|
* __repr__ generates Python code to recreate the version number instance
|
||||||
|
* _cmp compares the current instance with either another instance
|
||||||
|
of the same class or a string (which will be parsed to an instance
|
||||||
|
of the same class, thus must follow the same rules)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
try:
|
||||||
|
RE_FLAGS = re.VERBOSE | re.ASCII
|
||||||
|
except AttributeError:
|
||||||
|
RE_FLAGS = re.VERBOSE
|
||||||
|
|
||||||
|
|
||||||
|
class Version:
|
||||||
|
"""Abstract base class for version numbering classes. Just provides
|
||||||
|
constructor (__init__) and reproducer (__repr__), because those
|
||||||
|
seem to be the same for all version numbering classes; and route
|
||||||
|
rich comparisons to _cmp.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, vstring=None):
|
||||||
|
if vstring:
|
||||||
|
self.parse(vstring)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%s ('%s')" % (self.__class__.__name__, str(self))
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
c = self._cmp(other)
|
||||||
|
if c is NotImplemented:
|
||||||
|
return c
|
||||||
|
return c == 0
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
c = self._cmp(other)
|
||||||
|
if c is NotImplemented:
|
||||||
|
return c
|
||||||
|
return c < 0
|
||||||
|
|
||||||
|
def __le__(self, other):
|
||||||
|
c = self._cmp(other)
|
||||||
|
if c is NotImplemented:
|
||||||
|
return c
|
||||||
|
return c <= 0
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
c = self._cmp(other)
|
||||||
|
if c is NotImplemented:
|
||||||
|
return c
|
||||||
|
return c > 0
|
||||||
|
|
||||||
|
def __ge__(self, other):
|
||||||
|
c = self._cmp(other)
|
||||||
|
if c is NotImplemented:
|
||||||
|
return c
|
||||||
|
return c >= 0
|
||||||
|
|
||||||
|
|
||||||
|
# Interface for version-number classes -- must be implemented
|
||||||
|
# by the following classes (the concrete ones -- Version should
|
||||||
|
# be treated as an abstract class).
|
||||||
|
# __init__ (string) - create and take same action as 'parse'
|
||||||
|
# (string parameter is optional)
|
||||||
|
# parse (string) - convert a string representation to whatever
|
||||||
|
# internal representation is appropriate for
|
||||||
|
# this style of version numbering
|
||||||
|
# __str__ (self) - convert back to a string; should be very similar
|
||||||
|
# (if not identical to) the string supplied to parse
|
||||||
|
# __repr__ (self) - generate Python code to recreate
|
||||||
|
# the instance
|
||||||
|
# _cmp (self, other) - compare two version numbers ('other' may
|
||||||
|
# be an unparsed version string, or another
|
||||||
|
# instance of your version class)
|
||||||
|
|
||||||
|
|
||||||
|
class StrictVersion(Version):
|
||||||
|
"""Version numbering for anal retentives and software idealists.
|
||||||
|
Implements the standard interface for version number classes as
|
||||||
|
described above. A version number consists of two or three
|
||||||
|
dot-separated numeric components, with an optional "pre-release" tag
|
||||||
|
on the end. The pre-release tag consists of the letter 'a' or 'b'
|
||||||
|
followed by a number. If the numeric components of two version
|
||||||
|
numbers are equal, then one with a pre-release tag will always
|
||||||
|
be deemed earlier (lesser) than one without.
|
||||||
|
|
||||||
|
The following are valid version numbers (shown in the order that
|
||||||
|
would be obtained by sorting according to the supplied cmp function):
|
||||||
|
|
||||||
|
0.4 0.4.0 (these two are equivalent)
|
||||||
|
0.4.1
|
||||||
|
0.5a1
|
||||||
|
0.5b3
|
||||||
|
0.5
|
||||||
|
0.9.6
|
||||||
|
1.0
|
||||||
|
1.0.4a3
|
||||||
|
1.0.4b1
|
||||||
|
1.0.4
|
||||||
|
|
||||||
|
The following are examples of invalid version numbers:
|
||||||
|
|
||||||
|
1
|
||||||
|
2.7.2.2
|
||||||
|
1.3.a4
|
||||||
|
1.3pl1
|
||||||
|
1.3c4
|
||||||
|
|
||||||
|
The rationale for this version numbering system will be explained
|
||||||
|
in the distutils documentation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
|
||||||
|
RE_FLAGS)
|
||||||
|
|
||||||
|
def parse(self, vstring):
|
||||||
|
match = self.version_re.match(vstring)
|
||||||
|
if not match:
|
||||||
|
raise ValueError("invalid version number '%s'" % vstring)
|
||||||
|
|
||||||
|
(major, minor, patch, prerelease, prerelease_num) = \
|
||||||
|
match.group(1, 2, 4, 5, 6)
|
||||||
|
|
||||||
|
if patch:
|
||||||
|
self.version = tuple(map(int, [major, minor, patch]))
|
||||||
|
else:
|
||||||
|
self.version = tuple(map(int, [major, minor])) + (0,)
|
||||||
|
|
||||||
|
if prerelease:
|
||||||
|
self.prerelease = (prerelease[0], int(prerelease_num))
|
||||||
|
else:
|
||||||
|
self.prerelease = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.version[2] == 0:
|
||||||
|
vstring = '.'.join(map(str, self.version[0:2]))
|
||||||
|
else:
|
||||||
|
vstring = '.'.join(map(str, self.version))
|
||||||
|
|
||||||
|
if self.prerelease:
|
||||||
|
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
|
||||||
|
|
||||||
|
return vstring
|
||||||
|
|
||||||
|
def _cmp(self, other):
|
||||||
|
if isinstance(other, str):
|
||||||
|
other = StrictVersion(other)
|
||||||
|
elif not isinstance(other, StrictVersion):
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
if self.version != other.version:
|
||||||
|
# numeric versions don't match
|
||||||
|
# prerelease stuff doesn't matter
|
||||||
|
if self.version < other.version:
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# have to compare prerelease
|
||||||
|
# case 1: neither has prerelease; they're equal
|
||||||
|
# case 2: self has prerelease, other doesn't; other is greater
|
||||||
|
# case 3: self doesn't have prerelease, other does: self is greater
|
||||||
|
# case 4: both have prerelease: must compare them!
|
||||||
|
|
||||||
|
if (not self.prerelease and not other.prerelease):
|
||||||
|
return 0
|
||||||
|
elif (self.prerelease and not other.prerelease):
|
||||||
|
return -1
|
||||||
|
elif (not self.prerelease and other.prerelease):
|
||||||
|
return 1
|
||||||
|
elif (self.prerelease and other.prerelease):
|
||||||
|
if self.prerelease == other.prerelease:
|
||||||
|
return 0
|
||||||
|
elif self.prerelease < other.prerelease:
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
raise AssertionError("never get here")
|
||||||
|
|
||||||
|
# end class StrictVersion
|
||||||
|
|
||||||
|
# The rules according to Greg Stein:
|
||||||
|
# 1) a version number has 1 or more numbers separated by a period or by
|
||||||
|
# sequences of letters. If only periods, then these are compared
|
||||||
|
# left-to-right to determine an ordering.
|
||||||
|
# 2) sequences of letters are part of the tuple for comparison and are
|
||||||
|
# compared lexicographically
|
||||||
|
# 3) recognize the numeric components may have leading zeroes
|
||||||
|
#
|
||||||
|
# The LooseVersion class below implements these rules: a version number
|
||||||
|
# string is split up into a tuple of integer and string components, and
|
||||||
|
# comparison is a simple tuple comparison. This means that version
|
||||||
|
# numbers behave in a predictable and obvious way, but a way that might
|
||||||
|
# not necessarily be how people *want* version numbers to behave. There
|
||||||
|
# wouldn't be a problem if people could stick to purely numeric version
|
||||||
|
# numbers: just split on period and compare the numbers as tuples.
|
||||||
|
# However, people insist on putting letters into their version numbers;
|
||||||
|
# the most common purpose seems to be:
|
||||||
|
# - indicating a "pre-release" version
|
||||||
|
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
|
||||||
|
# - indicating a post-release patch ('p', 'pl', 'patch')
|
||||||
|
# but of course this can't cover all version number schemes, and there's
|
||||||
|
# no way to know what a programmer means without asking him.
|
||||||
|
#
|
||||||
|
# The problem is what to do with letters (and other non-numeric
|
||||||
|
# characters) in a version number. The current implementation does the
|
||||||
|
# obvious and predictable thing: keep them as strings and compare
|
||||||
|
# lexically within a tuple comparison. This has the desired effect if
|
||||||
|
# an appended letter sequence implies something "post-release":
|
||||||
|
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
|
||||||
|
#
|
||||||
|
# However, if letters in a version number imply a pre-release version,
|
||||||
|
# the "obvious" thing isn't correct. Eg. you would expect that
|
||||||
|
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
|
||||||
|
# implemented here, this just isn't so.
|
||||||
|
#
|
||||||
|
# Two possible solutions come to mind. The first is to tie the
|
||||||
|
# comparison algorithm to a particular set of semantic rules, as has
|
||||||
|
# been done in the StrictVersion class above. This works great as long
|
||||||
|
# as everyone can go along with bondage and discipline. Hopefully a
|
||||||
|
# (large) subset of Python module programmers will agree that the
|
||||||
|
# particular flavour of bondage and discipline provided by StrictVersion
|
||||||
|
# provides enough benefit to be worth using, and will submit their
|
||||||
|
# version numbering scheme to its domination. The free-thinking
|
||||||
|
# anarchists in the lot will never give in, though, and something needs
|
||||||
|
# to be done to accommodate them.
|
||||||
|
#
|
||||||
|
# Perhaps a "moderately strict" version class could be implemented that
|
||||||
|
# lets almost anything slide (syntactically), and makes some heuristic
|
||||||
|
# assumptions about non-digits in version number strings. This could
|
||||||
|
# sink into special-case-hell, though; if I was as talented and
|
||||||
|
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
|
||||||
|
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
|
||||||
|
# just as happy dealing with things like "2g6" and "1.13++". I don't
|
||||||
|
# think I'm smart enough to do it right though.
|
||||||
|
#
|
||||||
|
# In any case, I've coded the test suite for this module (see
|
||||||
|
# ../test/test_version.py) specifically to fail on things like comparing
|
||||||
|
# "1.2a2" and "1.2". That's not because the *code* is doing anything
|
||||||
|
# wrong, it's because the simple, obvious design doesn't match my
|
||||||
|
# complicated, hairy expectations for real-world version numbers. It
|
||||||
|
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
|
||||||
|
# the Right Thing" (ie. the code matches the conception). But I'd rather
|
||||||
|
# have a conception that matches common notions about version numbers.
|
||||||
|
|
||||||
|
|
||||||
|
class LooseVersion(Version):
|
||||||
|
"""Version numbering for anarchists and software realists.
|
||||||
|
Implements the standard interface for version number classes as
|
||||||
|
described above. A version number consists of a series of numbers,
|
||||||
|
separated by either periods or strings of letters. When comparing
|
||||||
|
version numbers, the numeric components will be compared
|
||||||
|
numerically, and the alphabetic components lexically. The following
|
||||||
|
are all valid version numbers, in no particular order:
|
||||||
|
|
||||||
|
1.5.1
|
||||||
|
1.5.2b2
|
||||||
|
161
|
||||||
|
3.10a
|
||||||
|
8.02
|
||||||
|
3.4j
|
||||||
|
1996.07.12
|
||||||
|
3.2.pl0
|
||||||
|
3.1.1.6
|
||||||
|
2g6
|
||||||
|
11g
|
||||||
|
0.960923
|
||||||
|
2.2beta29
|
||||||
|
1.13++
|
||||||
|
5.5.kw
|
||||||
|
2.0b1pl0
|
||||||
|
|
||||||
|
In fact, there is no such thing as an invalid version number under
|
||||||
|
this scheme; the rules for comparison are simple and predictable,
|
||||||
|
but may not always give the results you want (for some definition
|
||||||
|
of "want").
|
||||||
|
"""
|
||||||
|
|
||||||
|
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
|
||||||
|
|
||||||
|
def __init__(self, vstring=None):
|
||||||
|
if vstring:
|
||||||
|
self.parse(vstring)
|
||||||
|
|
||||||
|
def parse(self, vstring):
|
||||||
|
# I've given up on thinking I can reconstruct the version string
|
||||||
|
# from the parsed tuple -- so I just store the string here for
|
||||||
|
# use by __str__
|
||||||
|
self.vstring = vstring
|
||||||
|
components = [x for x in self.component_re.split(vstring) if x and x != '.']
|
||||||
|
for i, obj in enumerate(components):
|
||||||
|
try:
|
||||||
|
components[i] = int(obj)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.version = components
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.vstring
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "LooseVersion ('%s')" % str(self)
|
||||||
|
|
||||||
|
def _cmp(self, other):
|
||||||
|
if isinstance(other, str):
|
||||||
|
other = LooseVersion(other)
|
||||||
|
elif not isinstance(other, LooseVersion):
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
if self.version == other.version:
|
||||||
|
return 0
|
||||||
|
if self.version < other.version:
|
||||||
|
return -1
|
||||||
|
if self.version > other.version:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# end class LooseVersion
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -7,54 +7,41 @@
|
|||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
import json
|
|
||||||
from distutils.version import StrictVersion
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import missing_required_lib
|
from ansible.module_utils.basic import missing_required_lib
|
||||||
from ansible.module_utils.urls import fetch_url
|
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils.common.text.converters import to_native
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from urllib import quote_plus # Python 2.X
|
from urllib import quote_plus # Python 2.X
|
||||||
|
from urlparse import urljoin
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from urllib.parse import quote_plus # Python 3+
|
from urllib.parse import quote_plus, urljoin # Python 3+
|
||||||
|
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
GITLAB_IMP_ERR = None
|
GITLAB_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
import gitlab
|
import gitlab
|
||||||
|
import requests
|
||||||
HAS_GITLAB_PACKAGE = True
|
HAS_GITLAB_PACKAGE = True
|
||||||
except Exception:
|
except Exception:
|
||||||
GITLAB_IMP_ERR = traceback.format_exc()
|
GITLAB_IMP_ERR = traceback.format_exc()
|
||||||
HAS_GITLAB_PACKAGE = False
|
HAS_GITLAB_PACKAGE = False
|
||||||
|
|
||||||
|
|
||||||
def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
|
def auth_argument_spec(spec=None):
|
||||||
url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
|
arg_spec = (dict(
|
||||||
headers = {}
|
api_token=dict(type='str', no_log=True),
|
||||||
if access_token:
|
api_oauth_token=dict(type='str', no_log=True),
|
||||||
headers['Authorization'] = "Bearer %s" % access_token
|
api_job_token=dict(type='str', no_log=True),
|
||||||
else:
|
))
|
||||||
headers['Private-Token'] = private_token
|
if spec:
|
||||||
|
arg_spec.update(spec)
|
||||||
headers['Accept'] = "application/json"
|
return arg_spec
|
||||||
headers['Content-Type'] = "application/json"
|
|
||||||
|
|
||||||
response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
|
|
||||||
status = info['status']
|
|
||||||
content = ""
|
|
||||||
if response:
|
|
||||||
content = response.read()
|
|
||||||
if status == 204:
|
|
||||||
return True, content
|
|
||||||
elif status == 200 or status == 201:
|
|
||||||
return True, json.loads(content)
|
|
||||||
else:
|
|
||||||
return False, str(status) + ": " + content
|
|
||||||
|
|
||||||
|
|
||||||
def findProject(gitlab_instance, identifier):
|
def find_project(gitlab_instance, identifier):
|
||||||
try:
|
try:
|
||||||
project = gitlab_instance.projects.get(identifier)
|
project = gitlab_instance.projects.get(identifier)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -67,7 +54,7 @@ def findProject(gitlab_instance, identifier):
|
|||||||
return project
|
return project
|
||||||
|
|
||||||
|
|
||||||
def findGroup(gitlab_instance, identifier):
|
def find_group(gitlab_instance, identifier):
|
||||||
try:
|
try:
|
||||||
project = gitlab_instance.groups.get(identifier)
|
project = gitlab_instance.groups.get(identifier)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -76,12 +63,14 @@ def findGroup(gitlab_instance, identifier):
|
|||||||
return project
|
return project
|
||||||
|
|
||||||
|
|
||||||
def gitlabAuthentication(module):
|
def gitlab_authentication(module):
|
||||||
gitlab_url = module.params['api_url']
|
gitlab_url = module.params['api_url']
|
||||||
validate_certs = module.params['validate_certs']
|
validate_certs = module.params['validate_certs']
|
||||||
gitlab_user = module.params['api_username']
|
gitlab_user = module.params['api_username']
|
||||||
gitlab_password = module.params['api_password']
|
gitlab_password = module.params['api_password']
|
||||||
gitlab_token = module.params['api_token']
|
gitlab_token = module.params['api_token']
|
||||||
|
gitlab_oauth_token = module.params['api_oauth_token']
|
||||||
|
gitlab_job_token = module.params['api_job_token']
|
||||||
|
|
||||||
if not HAS_GITLAB_PACKAGE:
|
if not HAS_GITLAB_PACKAGE:
|
||||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||||
@@ -90,11 +79,20 @@ def gitlabAuthentication(module):
|
|||||||
# python-gitlab library remove support for username/password authentication since 1.13.0
|
# python-gitlab library remove support for username/password authentication since 1.13.0
|
||||||
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
|
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
|
||||||
# This condition allow to still support older version of the python-gitlab library
|
# This condition allow to still support older version of the python-gitlab library
|
||||||
if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
|
if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
|
||||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
|
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
|
||||||
private_token=gitlab_token, api_version=4)
|
private_token=gitlab_token, api_version=4)
|
||||||
else:
|
else:
|
||||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
|
# We can create an oauth_token using a username and password
|
||||||
|
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
|
||||||
|
if gitlab_user:
|
||||||
|
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
|
||||||
|
resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
|
||||||
|
resp_data = resp.json()
|
||||||
|
gitlab_oauth_token = resp_data["access_token"]
|
||||||
|
|
||||||
|
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
|
||||||
|
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
|
||||||
|
|
||||||
gitlab_instance.auth()
|
gitlab_instance.auth()
|
||||||
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
|
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
|
|||||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||||
|
|
||||||
|
URL_REALM_INFO = "{url}/realms/{realm}"
|
||||||
URL_REALMS = "{url}/admin/realms"
|
URL_REALMS = "{url}/admin/realms"
|
||||||
URL_REALM = "{url}/admin/realms/{realm}"
|
URL_REALM = "{url}/admin/realms/{realm}"
|
||||||
|
|
||||||
@@ -230,6 +231,31 @@ class KeycloakAPI(object):
|
|||||||
self.validate_certs = self.module.params.get('validate_certs')
|
self.validate_certs = self.module.params.get('validate_certs')
|
||||||
self.restheaders = connection_header
|
self.restheaders = connection_header
|
||||||
|
|
||||||
|
def get_realm_info_by_id(self, realm='master'):
|
||||||
|
""" Obtain realm public info by id
|
||||||
|
|
||||||
|
:param realm: realm id
|
||||||
|
:return: dict of real, representation or None if none matching exist
|
||||||
|
"""
|
||||||
|
realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders,
|
||||||
|
validate_certs=self.validate_certs).read()))
|
||||||
|
|
||||||
|
except HTTPError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
except ValueError as e:
|
||||||
|
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||||
|
exception=traceback.format_exc())
|
||||||
|
|
||||||
def get_realm_by_id(self, realm='master'):
|
def get_realm_by_id(self, realm='master'):
|
||||||
""" Obtain realm representation by id
|
""" Obtain realm representation by id
|
||||||
|
|
||||||
|
|||||||
232
plugins/module_utils/ilo_redfish_utils.py
Normal file
232
plugins/module_utils/ilo_redfish_utils.py
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||||
|
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||||
|
|
||||||
|
|
||||||
|
class iLORedfishUtils(RedfishUtils):
|
||||||
|
|
||||||
|
def get_ilo_sessions(self):
|
||||||
|
result = {}
|
||||||
|
# listing all users has always been slower than other operations, why?
|
||||||
|
session_list = []
|
||||||
|
sessions_results = []
|
||||||
|
# Get these entries, but does not fail if not found
|
||||||
|
properties = ['Description', 'Id', 'Name', 'UserName']
|
||||||
|
|
||||||
|
# Changed self.sessions_uri to Hardcoded string.
|
||||||
|
response = self.get_request(
|
||||||
|
self.root_uri + self.service_root + "SessionService/Sessions/")
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
result['ret'] = True
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
if 'Oem' in data:
|
||||||
|
if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
|
||||||
|
current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
|
||||||
|
|
||||||
|
for sessions in data[u'Members']:
|
||||||
|
# session_list[] are URIs
|
||||||
|
session_list.append(sessions[u'@odata.id'])
|
||||||
|
# for each session, get details
|
||||||
|
for uri in session_list:
|
||||||
|
session = {}
|
||||||
|
if uri != current_session:
|
||||||
|
response = self.get_request(self.root_uri + uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
for property in properties:
|
||||||
|
if property in data:
|
||||||
|
session[property] = data[property]
|
||||||
|
sessions_results.append(session)
|
||||||
|
result["msg"] = sessions_results
|
||||||
|
result["ret"] = True
|
||||||
|
return result
|
||||||
|
|
||||||
|
def set_ntp_server(self, mgr_attributes):
|
||||||
|
result = {}
|
||||||
|
setkey = mgr_attributes['mgr_attr_name']
|
||||||
|
|
||||||
|
nic_info = self.get_manager_ethernet_uri()
|
||||||
|
ethuri = nic_info["nic_addr"]
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + ethuri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
result['ret'] = True
|
||||||
|
data = response['data']
|
||||||
|
payload = {"DHCPv4": {
|
||||||
|
"UseNTPServers": ""
|
||||||
|
}}
|
||||||
|
|
||||||
|
if data["DHCPv4"]["UseNTPServers"]:
|
||||||
|
payload["DHCPv4"]["UseNTPServers"] = False
|
||||||
|
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not res_dhv4['ret']:
|
||||||
|
return res_dhv4
|
||||||
|
|
||||||
|
payload = {"DHCPv6": {
|
||||||
|
"UseNTPServers": ""
|
||||||
|
}}
|
||||||
|
|
||||||
|
if data["DHCPv6"]["UseNTPServers"]:
|
||||||
|
payload["DHCPv6"]["UseNTPServers"] = False
|
||||||
|
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not res_dhv6['ret']:
|
||||||
|
return res_dhv6
|
||||||
|
|
||||||
|
datetime_uri = self.manager_uri + "DateTime"
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + datetime_uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
ntp_list = data[setkey]
|
||||||
|
if(len(ntp_list) == 2):
|
||||||
|
ntp_list.pop(0)
|
||||||
|
|
||||||
|
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||||
|
|
||||||
|
payload = {setkey: ntp_list}
|
||||||
|
|
||||||
|
response1 = self.patch_request(self.root_uri + datetime_uri, payload)
|
||||||
|
if not response1['ret']:
|
||||||
|
return response1
|
||||||
|
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
|
||||||
|
|
||||||
|
def set_time_zone(self, attr):
|
||||||
|
key = attr['mgr_attr_name']
|
||||||
|
|
||||||
|
uri = self.manager_uri + "DateTime/"
|
||||||
|
response = self.get_request(self.root_uri + uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
data = response["data"]
|
||||||
|
|
||||||
|
if key not in data:
|
||||||
|
return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
|
||||||
|
|
||||||
|
timezones = data["TimeZoneList"]
|
||||||
|
index = ""
|
||||||
|
for tz in timezones:
|
||||||
|
if attr['mgr_attr_value'] in tz["Name"]:
|
||||||
|
index = tz["Index"]
|
||||||
|
break
|
||||||
|
|
||||||
|
payload = {key: {"Index": index}}
|
||||||
|
response = self.patch_request(self.root_uri + uri, payload)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||||
|
|
||||||
|
def set_dns_server(self, attr):
|
||||||
|
key = attr['mgr_attr_name']
|
||||||
|
nic_info = self.get_manager_ethernet_uri()
|
||||||
|
uri = nic_info["nic_addr"]
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||||
|
|
||||||
|
if len(dns_list) == 3:
|
||||||
|
dns_list.pop(0)
|
||||||
|
|
||||||
|
dns_list.append(attr['mgr_attr_value'])
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"Oem": {
|
||||||
|
"Hpe": {
|
||||||
|
"IPv4": {
|
||||||
|
key: dns_list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response = self.patch_request(self.root_uri + uri, payload)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||||
|
|
||||||
|
def set_domain_name(self, attr):
|
||||||
|
key = attr['mgr_attr_name']
|
||||||
|
|
||||||
|
nic_info = self.get_manager_ethernet_uri()
|
||||||
|
ethuri = nic_info["nic_addr"]
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + ethuri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
payload = {"DHCPv4": {
|
||||||
|
"UseDomainName": ""
|
||||||
|
}}
|
||||||
|
|
||||||
|
if data["DHCPv4"]["UseDomainName"]:
|
||||||
|
payload["DHCPv4"]["UseDomainName"] = False
|
||||||
|
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not res_dhv4['ret']:
|
||||||
|
return res_dhv4
|
||||||
|
|
||||||
|
payload = {"DHCPv6": {
|
||||||
|
"UseDomainName": ""
|
||||||
|
}}
|
||||||
|
|
||||||
|
if data["DHCPv6"]["UseDomainName"]:
|
||||||
|
payload["DHCPv6"]["UseDomainName"] = False
|
||||||
|
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not res_dhv6['ret']:
|
||||||
|
return res_dhv6
|
||||||
|
|
||||||
|
domain_name = attr['mgr_attr_value']
|
||||||
|
|
||||||
|
payload = {"Oem": {
|
||||||
|
"Hpe": {
|
||||||
|
key: domain_name
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
|
||||||
|
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||||
|
|
||||||
|
def set_wins_registration(self, mgrattr):
|
||||||
|
Key = mgrattr['mgr_attr_name']
|
||||||
|
|
||||||
|
nic_info = self.get_manager_ethernet_uri()
|
||||||
|
ethuri = nic_info["nic_addr"]
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"Oem": {
|
||||||
|
"Hpe": {
|
||||||
|
"IPv4": {
|
||||||
|
Key: False
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||||
@@ -9,7 +9,8 @@ __metaclass__ = type
|
|||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from ansible.module_utils.basic import missing_required_lib
|
from ansible.module_utils.basic import missing_required_lib
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -179,10 +179,10 @@ class IPAClient(object):
|
|||||||
result.append(key)
|
result.append(key)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
|
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None):
|
||||||
changed = False
|
changed = False
|
||||||
diff = list(set(ipa_list) - set(module_list))
|
diff = list(set(ipa_list) - set(module_list))
|
||||||
if len(diff) > 0:
|
if append is not True and len(diff) > 0:
|
||||||
changed = True
|
changed = True
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
if item:
|
if item:
|
||||||
|
|||||||
@@ -52,3 +52,36 @@ def module_fails_on_exception(func):
|
|||||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||||
output=self.output, vars=self.vars.output(), **self.output)
|
output=self.output, vars=self.vars.output(), **self.output)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def check_mode_skip(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
if not self.module.check_mode:
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def check_mode_skip_returns(callable=None, value=None):
|
||||||
|
|
||||||
|
def deco(func):
|
||||||
|
if callable is not None:
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper_callable(self, *args, **kwargs):
|
||||||
|
if self.module.check_mode:
|
||||||
|
return callable(self, *args, **kwargs)
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
return wrapper_callable
|
||||||
|
|
||||||
|
if value is not None:
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper_value(self, *args, **kwargs):
|
||||||
|
if self.module.check_mode:
|
||||||
|
return value
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
return wrapper_value
|
||||||
|
|
||||||
|
if callable is None and value is None:
|
||||||
|
return check_mode_skip
|
||||||
|
|
||||||
|
return deco
|
||||||
|
|||||||
@@ -141,11 +141,7 @@ class CmdMixin(object):
|
|||||||
fmt = find_format(param)
|
fmt = find_format(param)
|
||||||
value = extra_params[param]
|
value = extra_params[param]
|
||||||
else:
|
else:
|
||||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
raise self.ModuleHelperException('Cannot determine value for parameter: {0}'.format(param))
|
||||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
|
||||||
version="4.0.0", collection_name="community.general")
|
|
||||||
continue
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||||
@@ -162,8 +158,9 @@ class CmdMixin(object):
|
|||||||
publish_rc=True,
|
publish_rc=True,
|
||||||
publish_out=True,
|
publish_out=True,
|
||||||
publish_err=True,
|
publish_err=True,
|
||||||
|
publish_cmd=True,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
self.vars.cmd_args = self._calculate_args(extra_params, params)
|
cmd_args = self._calculate_args(extra_params, params)
|
||||||
options = dict(self.run_command_fixed_options)
|
options = dict(self.run_command_fixed_options)
|
||||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||||
options.update(kwargs)
|
options.update(kwargs)
|
||||||
@@ -175,13 +172,15 @@ class CmdMixin(object):
|
|||||||
})
|
})
|
||||||
self.update_output(force_lang=self.force_lang)
|
self.update_output(force_lang=self.force_lang)
|
||||||
options['environ_update'] = env_update
|
options['environ_update'] = env_update
|
||||||
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
|
rc, out, err = self.module.run_command(cmd_args, *args, **options)
|
||||||
if publish_rc:
|
if publish_rc:
|
||||||
self.update_output(rc=rc)
|
self.update_output(rc=rc)
|
||||||
if publish_out:
|
if publish_out:
|
||||||
self.update_output(stdout=out)
|
self.update_output(stdout=out)
|
||||||
if publish_err:
|
if publish_err:
|
||||||
self.update_output(stderr=err)
|
self.update_output(stderr=err)
|
||||||
|
if publish_cmd:
|
||||||
|
self.update_output(cmd_args=cmd_args)
|
||||||
if process_output is None:
|
if process_output is None:
|
||||||
_process = self.process_command_output
|
_process = self.process_command_output
|
||||||
else:
|
else:
|
||||||
|
|||||||
61
plugins/module_utils/mh/mixins/deprecate_attrs.py
Normal file
61
plugins/module_utils/mh/mixins/deprecate_attrs.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||||
|
# Copyright: (c) 2020, Ansible Project
|
||||||
|
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecateAttrsMixin(object):
|
||||||
|
|
||||||
|
def _deprecate_setup(self, attr, target, module):
|
||||||
|
if target is None:
|
||||||
|
target = self
|
||||||
|
if not hasattr(target, attr):
|
||||||
|
raise ValueError("Target {0} has no attribute {1}".format(target, attr))
|
||||||
|
if module is None:
|
||||||
|
if isinstance(target, AnsibleModule):
|
||||||
|
module = target
|
||||||
|
elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
|
||||||
|
module = target.module
|
||||||
|
else:
|
||||||
|
raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
|
||||||
|
|
||||||
|
# setup internal state dicts
|
||||||
|
value_attr = "__deprecated_attr_value"
|
||||||
|
trigger_attr = "__deprecated_attr_trigger"
|
||||||
|
if not hasattr(target, value_attr):
|
||||||
|
setattr(target, value_attr, {})
|
||||||
|
if not hasattr(target, trigger_attr):
|
||||||
|
setattr(target, trigger_attr, {})
|
||||||
|
value_dict = getattr(target, value_attr)
|
||||||
|
trigger_dict = getattr(target, trigger_attr)
|
||||||
|
return target, module, value_dict, trigger_dict
|
||||||
|
|
||||||
|
def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
|
||||||
|
target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
|
||||||
|
|
||||||
|
value_dict[attr] = getattr(target, attr, value)
|
||||||
|
trigger_dict[attr] = False
|
||||||
|
|
||||||
|
def _trigger():
|
||||||
|
if not trigger_dict[attr]:
|
||||||
|
module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||||
|
trigger_dict[attr] = True
|
||||||
|
|
||||||
|
def _getter(_self):
|
||||||
|
_trigger()
|
||||||
|
return value_dict[attr]
|
||||||
|
|
||||||
|
def _setter(_self, new_value):
|
||||||
|
_trigger()
|
||||||
|
value_dict[attr] = new_value
|
||||||
|
|
||||||
|
# override attribute
|
||||||
|
prop = property(_getter)
|
||||||
|
setattr(target, attr, prop)
|
||||||
|
setattr(target, "_{0}_setter".format(attr), prop.setter(_setter))
|
||||||
@@ -13,9 +13,10 @@ from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd im
|
|||||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
|
||||||
|
|
||||||
|
|
||||||
class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||||
facts_name = None
|
facts_name = None
|
||||||
output_params = ()
|
output_params = ()
|
||||||
@@ -36,6 +37,15 @@ class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
|||||||
fact=name in self.facts_params,
|
fact=name in self.facts_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._deprecate_attr(
|
||||||
|
attr="VarDict",
|
||||||
|
msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
|
||||||
|
"the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
|
||||||
|
version="6.0.0",
|
||||||
|
collection_name="community.general",
|
||||||
|
target=ModuleHelper,
|
||||||
|
module=self.module)
|
||||||
|
|
||||||
def update_output(self, **kwargs):
|
def update_output(self, **kwargs):
|
||||||
self.update_vars(meta={"output": True}, **kwargs)
|
self.update_vars(meta={"output": True}, **kwargs)
|
||||||
|
|
||||||
|
|||||||
@@ -1,598 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
|
||||||
from functools import partial
|
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
|
||||||
from ansible.module_utils.six import iteritems
|
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible.module_utils.common.validation import check_type_dict
|
|
||||||
|
|
||||||
try:
|
|
||||||
from infoblox_client.connector import Connector
|
|
||||||
from infoblox_client.exceptions import InfobloxException
|
|
||||||
HAS_INFOBLOX_CLIENT = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_INFOBLOX_CLIENT = False
|
|
||||||
|
|
||||||
# defining nios constants
|
|
||||||
NIOS_DNS_VIEW = 'view'
|
|
||||||
NIOS_NETWORK_VIEW = 'networkview'
|
|
||||||
NIOS_HOST_RECORD = 'record:host'
|
|
||||||
NIOS_IPV4_NETWORK = 'network'
|
|
||||||
NIOS_IPV6_NETWORK = 'ipv6network'
|
|
||||||
NIOS_ZONE = 'zone_auth'
|
|
||||||
NIOS_PTR_RECORD = 'record:ptr'
|
|
||||||
NIOS_A_RECORD = 'record:a'
|
|
||||||
NIOS_AAAA_RECORD = 'record:aaaa'
|
|
||||||
NIOS_CNAME_RECORD = 'record:cname'
|
|
||||||
NIOS_MX_RECORD = 'record:mx'
|
|
||||||
NIOS_SRV_RECORD = 'record:srv'
|
|
||||||
NIOS_NAPTR_RECORD = 'record:naptr'
|
|
||||||
NIOS_TXT_RECORD = 'record:txt'
|
|
||||||
NIOS_NSGROUP = 'nsgroup'
|
|
||||||
NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
|
|
||||||
NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
|
|
||||||
NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
|
|
||||||
NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
|
|
||||||
NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
|
|
||||||
NIOS_MEMBER = 'member'
|
|
||||||
|
|
||||||
NIOS_PROVIDER_SPEC = {
|
|
||||||
'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
|
|
||||||
'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
|
|
||||||
'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
|
|
||||||
'silent_ssl_warnings': dict(type='bool', default=True),
|
|
||||||
'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
|
|
||||||
'http_pool_connections': dict(type='int', default=10),
|
|
||||||
'http_pool_maxsize': dict(type='int', default=10),
|
|
||||||
'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
|
|
||||||
'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
|
|
||||||
'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_connector(*args, **kwargs):
|
|
||||||
''' Returns an instance of infoblox_client.connector.Connector
|
|
||||||
:params args: positional arguments are silently ignored
|
|
||||||
:params kwargs: dict that is passed to Connector init
|
|
||||||
:returns: Connector
|
|
||||||
'''
|
|
||||||
if not HAS_INFOBLOX_CLIENT:
|
|
||||||
raise Exception('infoblox-client is required but does not appear '
|
|
||||||
'to be installed. It can be installed using the '
|
|
||||||
'command `pip install infoblox-client`')
|
|
||||||
|
|
||||||
if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
|
|
||||||
raise Exception('invalid or unsupported keyword argument for connector')
|
|
||||||
for key, value in iteritems(NIOS_PROVIDER_SPEC):
|
|
||||||
if key not in kwargs:
|
|
||||||
# apply default values from NIOS_PROVIDER_SPEC since we cannot just
|
|
||||||
# assume the provider values are coming from AnsibleModule
|
|
||||||
if 'default' in value:
|
|
||||||
kwargs[key] = value['default']
|
|
||||||
|
|
||||||
# override any values with env variables unless they were
|
|
||||||
# explicitly set
|
|
||||||
env = ('INFOBLOX_%s' % key).upper()
|
|
||||||
if env in os.environ:
|
|
||||||
kwargs[key] = os.environ.get(env)
|
|
||||||
|
|
||||||
if 'validate_certs' in kwargs.keys():
|
|
||||||
kwargs['ssl_verify'] = kwargs['validate_certs']
|
|
||||||
kwargs.pop('validate_certs', None)
|
|
||||||
|
|
||||||
return Connector(kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_extattrs(value):
|
|
||||||
''' Normalize extattrs field to expected format
|
|
||||||
The module accepts extattrs as key/value pairs. This method will
|
|
||||||
transform the key/value pairs into a structure suitable for
|
|
||||||
sending across WAPI in the format of:
|
|
||||||
extattrs: {
|
|
||||||
key: {
|
|
||||||
value: <value>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'''
|
|
||||||
return dict([(k, {'value': v}) for k, v in iteritems(value)])
|
|
||||||
|
|
||||||
|
|
||||||
def flatten_extattrs(value):
|
|
||||||
''' Flatten the key/value struct for extattrs
|
|
||||||
WAPI returns extattrs field as a dict in form of:
|
|
||||||
extattrs: {
|
|
||||||
key: {
|
|
||||||
value: <value>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
This method will flatten the structure to:
|
|
||||||
extattrs: {
|
|
||||||
key: value
|
|
||||||
}
|
|
||||||
'''
|
|
||||||
return dict([(k, v['value']) for k, v in iteritems(value)])
|
|
||||||
|
|
||||||
|
|
||||||
def member_normalize(member_spec):
|
|
||||||
''' Transforms the member module arguments into a valid WAPI struct
|
|
||||||
This function will transform the arguments into a structure that
|
|
||||||
is a valid WAPI structure in the format of:
|
|
||||||
{
|
|
||||||
key: <value>,
|
|
||||||
}
|
|
||||||
It will remove any arguments that are set to None since WAPI will error on
|
|
||||||
that condition.
|
|
||||||
The remainder of the value validation is performed by WAPI
|
|
||||||
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
|
|
||||||
In this function, they are converted to dictionary.
|
|
||||||
'''
|
|
||||||
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
|
|
||||||
'pre_provisioning', 'network_setting', 'v6_network_setting',
|
|
||||||
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
|
|
||||||
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
|
|
||||||
for key in list(member_spec.keys()):
|
|
||||||
if key in member_elements and member_spec[key] is not None:
|
|
||||||
member_spec[key] = member_spec[key][0]
|
|
||||||
if isinstance(member_spec[key], dict):
|
|
||||||
member_spec[key] = member_normalize(member_spec[key])
|
|
||||||
elif isinstance(member_spec[key], list):
|
|
||||||
for x in member_spec[key]:
|
|
||||||
if isinstance(x, dict):
|
|
||||||
x = member_normalize(x)
|
|
||||||
elif member_spec[key] is None:
|
|
||||||
del member_spec[key]
|
|
||||||
return member_spec
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_ib_spec(ib_spec):
|
|
||||||
result = {}
|
|
||||||
for arg in ib_spec:
|
|
||||||
result[arg] = dict([(k, v)
|
|
||||||
for k, v in iteritems(ib_spec[arg])
|
|
||||||
if k not in ('ib_req', 'transform', 'update')])
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class WapiBase(object):
|
|
||||||
''' Base class for implementing Infoblox WAPI API '''
|
|
||||||
provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
|
|
||||||
|
|
||||||
def __init__(self, provider):
|
|
||||||
self.connector = get_connector(**provider)
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
try:
|
|
||||||
return self.__dict__[name]
|
|
||||||
except KeyError:
|
|
||||||
if name.startswith('_'):
|
|
||||||
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
|
|
||||||
return partial(self._invoke_method, name)
|
|
||||||
|
|
||||||
def _invoke_method(self, name, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
method = getattr(self.connector, name)
|
|
||||||
return method(*args, **kwargs)
|
|
||||||
except InfobloxException as exc:
|
|
||||||
if hasattr(self, 'handle_exception'):
|
|
||||||
self.handle_exception(name, exc)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
class WapiLookup(WapiBase):
|
|
||||||
''' Implements WapiBase for lookup plugins '''
|
|
||||||
def handle_exception(self, method_name, exc):
|
|
||||||
if ('text' in exc.response):
|
|
||||||
raise Exception(exc.response['text'])
|
|
||||||
else:
|
|
||||||
raise Exception(exc)
|
|
||||||
|
|
||||||
|
|
||||||
class WapiInventory(WapiBase):
|
|
||||||
''' Implements WapiBase for dynamic inventory script '''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WapiModule(WapiBase):
|
|
||||||
''' Implements WapiBase for executing a NIOS module '''
|
|
||||||
def __init__(self, module):
|
|
||||||
self.module = module
|
|
||||||
provider = module.params['provider']
|
|
||||||
try:
|
|
||||||
super(WapiModule, self).__init__(provider)
|
|
||||||
except Exception as exc:
|
|
||||||
self.module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
def handle_exception(self, method_name, exc):
|
|
||||||
''' Handles any exceptions raised
|
|
||||||
This method will be called if an InfobloxException is raised for
|
|
||||||
any call to the instance of Connector and also, in case of generic
|
|
||||||
exception. This method will then gracefully fail the module.
|
|
||||||
:args exc: instance of InfobloxException
|
|
||||||
'''
|
|
||||||
if ('text' in exc.response):
|
|
||||||
self.module.fail_json(
|
|
||||||
msg=exc.response['text'],
|
|
||||||
type=exc.response['Error'].split(':')[0],
|
|
||||||
code=exc.response.get('code'),
|
|
||||||
operation=method_name
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.module.fail_json(msg=to_native(exc))
|
|
||||||
|
|
||||||
def run(self, ib_obj_type, ib_spec):
|
|
||||||
''' Runs the module and performans configuration tasks
|
|
||||||
:args ib_obj_type: the WAPI object type to operate against
|
|
||||||
:args ib_spec: the specification for the WAPI object as a dict
|
|
||||||
:returns: a results dict
|
|
||||||
'''
|
|
||||||
|
|
||||||
update = new_name = None
|
|
||||||
state = self.module.params['state']
|
|
||||||
if state not in ('present', 'absent'):
|
|
||||||
self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
|
|
||||||
|
|
||||||
result = {'changed': False}
|
|
||||||
|
|
||||||
obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
|
|
||||||
|
|
||||||
# get object reference
|
|
||||||
ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
|
|
||||||
proposed_object = {}
|
|
||||||
for key, value in iteritems(ib_spec):
|
|
||||||
if self.module.params[key] is not None:
|
|
||||||
if 'transform' in value:
|
|
||||||
proposed_object[key] = value['transform'](self.module)
|
|
||||||
else:
|
|
||||||
proposed_object[key] = self.module.params[key]
|
|
||||||
|
|
||||||
# If configure_by_dns is set to False and view is 'default', then delete the default dns
|
|
||||||
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
|
|
||||||
and ib_obj_type == NIOS_HOST_RECORD:
|
|
||||||
del proposed_object['view']
|
|
||||||
|
|
||||||
if ib_obj_ref:
|
|
||||||
if len(ib_obj_ref) > 1:
|
|
||||||
for each in ib_obj_ref:
|
|
||||||
# To check for existing A_record with same name with input A_record by IP
|
|
||||||
if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
|
|
||||||
current_object = each
|
|
||||||
# To check for existing Host_record with same name with input Host_record by IP
|
|
||||||
elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
|
|
||||||
== proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
|
|
||||||
current_object = each
|
|
||||||
# Else set the current_object with input value
|
|
||||||
else:
|
|
||||||
current_object = obj_filter
|
|
||||||
ref = None
|
|
||||||
else:
|
|
||||||
current_object = ib_obj_ref[0]
|
|
||||||
if 'extattrs' in current_object:
|
|
||||||
current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
|
|
||||||
if current_object.get('_ref'):
|
|
||||||
ref = current_object.pop('_ref')
|
|
||||||
else:
|
|
||||||
current_object = obj_filter
|
|
||||||
ref = None
|
|
||||||
# checks if the object type is member to normalize the attributes being passed
|
|
||||||
if (ib_obj_type == NIOS_MEMBER):
|
|
||||||
proposed_object = member_normalize(proposed_object)
|
|
||||||
|
|
||||||
# checks if the name's field has been updated
|
|
||||||
if update and new_name:
|
|
||||||
proposed_object['name'] = new_name
|
|
||||||
|
|
||||||
check_remove = []
|
|
||||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
|
||||||
# this check is for idempotency, as if the same ip address shall be passed
|
|
||||||
# add param will be removed, and same exists true for remove case as well.
|
|
||||||
if 'ipv4addrs' in [current_object and proposed_object]:
|
|
||||||
for each in current_object['ipv4addrs']:
|
|
||||||
if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
|
|
||||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
|
||||||
del proposed_object['ipv4addrs'][0]['add']
|
|
||||||
break
|
|
||||||
check_remove += each.values()
|
|
||||||
if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
|
|
||||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
|
||||||
del proposed_object['ipv4addrs'][0]['remove']
|
|
||||||
|
|
||||||
res = None
|
|
||||||
modified = not self.compare_objects(current_object, proposed_object)
|
|
||||||
if 'extattrs' in proposed_object:
|
|
||||||
proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
|
|
||||||
|
|
||||||
# Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
|
|
||||||
proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
|
|
||||||
|
|
||||||
if state == 'present':
|
|
||||||
if ref is None:
|
|
||||||
if not self.module.check_mode:
|
|
||||||
self.create_object(ib_obj_type, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
# Check if NIOS_MEMBER and the flag to call function create_token is set
|
|
||||||
elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
|
|
||||||
proposed_object = None
|
|
||||||
# the function creates a token that can be used by a pre-provisioned member to join the grid
|
|
||||||
result['api_results'] = self.call_func('create_token', ref, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
elif modified:
|
|
||||||
if 'ipv4addrs' in proposed_object:
|
|
||||||
if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
|
|
||||||
self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
|
|
||||||
|
|
||||||
if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
|
|
||||||
run_update = True
|
|
||||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
|
||||||
if 'ipv4addrs' in proposed_object:
|
|
||||||
if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
|
|
||||||
run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
|
|
||||||
if run_update:
|
|
||||||
res = self.update_object(ref, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
else:
|
|
||||||
res = ref
|
|
||||||
if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
|
|
||||||
# popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
|
|
||||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
|
||||||
del proposed_object['view']
|
|
||||||
if not self.module.check_mode:
|
|
||||||
res = self.update_object(ref, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
elif 'network_view' in proposed_object:
|
|
||||||
proposed_object.pop('network_view')
|
|
||||||
result['changed'] = True
|
|
||||||
if not self.module.check_mode and res is None:
|
|
||||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
|
||||||
self.update_object(ref, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
|
|
||||||
elif state == 'absent':
|
|
||||||
if ref is not None:
|
|
||||||
if 'ipv4addrs' in proposed_object:
|
|
||||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
|
||||||
self.check_if_add_remove_ip_arg_exists(proposed_object)
|
|
||||||
self.update_object(ref, proposed_object)
|
|
||||||
result['changed'] = True
|
|
||||||
elif not self.module.check_mode:
|
|
||||||
self.delete_object(ref)
|
|
||||||
result['changed'] = True
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
|
|
||||||
''' Send POST request if host record input name and retrieved ref name is same,
|
|
||||||
but input IP and retrieved IP is different'''
|
|
||||||
|
|
||||||
if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
|
|
||||||
obj_host_name = obj_filter['name']
|
|
||||||
ref_host_name = ib_obj_ref[0]['name']
|
|
||||||
if 'ipv4addrs' in (current_object and proposed_object):
|
|
||||||
current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
|
|
||||||
proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
|
|
||||||
elif 'ipv6addrs' in (current_object and proposed_object):
|
|
||||||
current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
|
|
||||||
proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
|
|
||||||
|
|
||||||
if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
|
|
||||||
self.create_object(ib_obj_type, proposed_object)
|
|
||||||
|
|
||||||
def check_if_nios_next_ip_exists(self, proposed_object):
|
|
||||||
''' Check if nios_next_ip argument is passed in ipaddr while creating
|
|
||||||
host record, if yes then format proposed object ipv4addrs and pass
|
|
||||||
func:nextavailableip and ipaddr range to create hostrecord with next
|
|
||||||
available ip in one call to avoid any race condition '''
|
|
||||||
|
|
||||||
if 'ipv4addrs' in proposed_object:
|
|
||||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
|
||||||
ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
|
||||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
|
||||||
elif 'ipv4addr' in proposed_object:
|
|
||||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
|
||||||
ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
|
||||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
|
||||||
|
|
||||||
return proposed_object
|
|
||||||
|
|
||||||
def check_if_add_remove_ip_arg_exists(self, proposed_object):
|
|
||||||
'''
|
|
||||||
This function shall check if add/remove param is set to true and
|
|
||||||
is passed in the args, then we will update the proposed dictionary
|
|
||||||
to add/remove IP to existing host_record, if the user passes false
|
|
||||||
param with the argument nothing shall be done.
|
|
||||||
:returns: True if param is changed based on add/remove, and also the
|
|
||||||
changed proposed_object.
|
|
||||||
'''
|
|
||||||
update = False
|
|
||||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
|
||||||
if proposed_object['ipv4addrs'][0]['add']:
|
|
||||||
proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
|
|
||||||
del proposed_object['ipv4addrs']
|
|
||||||
del proposed_object['ipv4addrs+'][0]['add']
|
|
||||||
update = True
|
|
||||||
else:
|
|
||||||
del proposed_object['ipv4addrs'][0]['add']
|
|
||||||
elif 'remove' in proposed_object['ipv4addrs'][0]:
|
|
||||||
if proposed_object['ipv4addrs'][0]['remove']:
|
|
||||||
proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
|
|
||||||
del proposed_object['ipv4addrs']
|
|
||||||
del proposed_object['ipv4addrs-'][0]['remove']
|
|
||||||
update = True
|
|
||||||
else:
|
|
||||||
del proposed_object['ipv4addrs'][0]['remove']
|
|
||||||
return update, proposed_object
|
|
||||||
|
|
||||||
def issubset(self, item, objects):
|
|
||||||
''' Checks if item is a subset of objects
|
|
||||||
:args item: the subset item to validate
|
|
||||||
:args objects: superset list of objects to validate against
|
|
||||||
:returns: True if item is a subset of one entry in objects otherwise
|
|
||||||
this method will return None
|
|
||||||
'''
|
|
||||||
for obj in objects:
|
|
||||||
if isinstance(item, dict):
|
|
||||||
if all(entry in obj.items() for entry in item.items()):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
if item in obj:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def compare_objects(self, current_object, proposed_object):
|
|
||||||
for key, proposed_item in iteritems(proposed_object):
|
|
||||||
current_item = current_object.get(key)
|
|
||||||
|
|
||||||
# if proposed has a key that current doesn't then the objects are
|
|
||||||
# not equal and False will be immediately returned
|
|
||||||
if current_item is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
elif isinstance(proposed_item, list):
|
|
||||||
if key == 'aliases':
|
|
||||||
if set(current_item) != set(proposed_item):
|
|
||||||
return False
|
|
||||||
for subitem in proposed_item:
|
|
||||||
if not self.issubset(subitem, current_item):
|
|
||||||
return False
|
|
||||||
|
|
||||||
elif isinstance(proposed_item, dict):
|
|
||||||
return self.compare_objects(current_item, proposed_item)
|
|
||||||
|
|
||||||
else:
|
|
||||||
if current_item != proposed_item:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
|
|
||||||
''' this function gets the reference object of pre-existing nios objects '''
|
|
||||||
|
|
||||||
update = False
|
|
||||||
old_name = new_name = None
|
|
||||||
if ('name' in obj_filter):
|
|
||||||
# gets and returns the current object based on name/old_name passed
|
|
||||||
try:
|
|
||||||
name_obj = check_type_dict(obj_filter['name'])
|
|
||||||
old_name = name_obj['old_name']
|
|
||||||
new_name = name_obj['new_name']
|
|
||||||
except TypeError:
|
|
||||||
name = obj_filter['name']
|
|
||||||
|
|
||||||
if old_name and new_name:
|
|
||||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
|
||||||
test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
|
|
||||||
elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
else:
|
|
||||||
test_obj_filter = dict([('name', old_name)])
|
|
||||||
# get the object reference
|
|
||||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
|
||||||
if ib_obj:
|
|
||||||
obj_filter['name'] = new_name
|
|
||||||
else:
|
|
||||||
test_obj_filter['name'] = new_name
|
|
||||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
|
||||||
update = True
|
|
||||||
return ib_obj, update, new_name
|
|
||||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
|
||||||
# to check only by name if dns bypassing is set
|
|
||||||
if not obj_filter['configure_for_dns']:
|
|
||||||
test_obj_filter = dict([('name', name)])
|
|
||||||
else:
|
|
||||||
test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
|
|
||||||
elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
|
|
||||||
test_obj_filter = dict([['mac', obj_filter['mac']]])
|
|
||||||
elif (ib_obj_type == NIOS_A_RECORD):
|
|
||||||
# resolves issue where a_record with uppercase name was returning null and was failing
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
|
||||||
# resolves issue where multiple a_records with same name and different IP address
|
|
||||||
try:
|
|
||||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
|
||||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
|
||||||
except TypeError:
|
|
||||||
ipaddr = obj_filter['ipv4addr']
|
|
||||||
test_obj_filter['ipv4addr'] = ipaddr
|
|
||||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
|
||||||
# resolves issue where multiple txt_records with same name and different text
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
try:
|
|
||||||
text_obj = check_type_dict(obj_filter['text'])
|
|
||||||
txt = text_obj['old_text']
|
|
||||||
except TypeError:
|
|
||||||
txt = obj_filter['text']
|
|
||||||
test_obj_filter['text'] = txt
|
|
||||||
# check if test_obj_filter is empty copy passed obj_filter
|
|
||||||
else:
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
elif (ib_obj_type == NIOS_A_RECORD):
|
|
||||||
# resolves issue where multiple a_records with same name and different IP address
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
try:
|
|
||||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
|
||||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
|
||||||
except TypeError:
|
|
||||||
ipaddr = obj_filter['ipv4addr']
|
|
||||||
test_obj_filter['ipv4addr'] = ipaddr
|
|
||||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
|
||||||
# resolves issue where multiple txt_records with same name and different text
|
|
||||||
test_obj_filter = obj_filter
|
|
||||||
try:
|
|
||||||
text_obj = check_type_dict(obj_filter['text'])
|
|
||||||
txt = text_obj['old_text']
|
|
||||||
except TypeError:
|
|
||||||
txt = obj_filter['text']
|
|
||||||
test_obj_filter['text'] = txt
|
|
||||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
elif (ib_obj_type == NIOS_ZONE):
|
|
||||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
|
||||||
temp = ib_spec['restart_if_needed']
|
|
||||||
del ib_spec['restart_if_needed']
|
|
||||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
|
||||||
if not ib_obj:
|
|
||||||
ib_spec['restart_if_needed'] = temp
|
|
||||||
elif (ib_obj_type == NIOS_MEMBER):
|
|
||||||
# del key 'create_token' as nios_member get_object fails with the key present
|
|
||||||
temp = ib_spec['create_token']
|
|
||||||
del ib_spec['create_token']
|
|
||||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
if temp:
|
|
||||||
# reinstate 'create_token' key
|
|
||||||
ib_spec['create_token'] = temp
|
|
||||||
else:
|
|
||||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
|
||||||
return ib_obj, update, new_name
|
|
||||||
|
|
||||||
def on_update(self, proposed_object, ib_spec):
|
|
||||||
''' Event called before the update is sent to the API endpoing
|
|
||||||
This method will allow the final proposed object to be changed
|
|
||||||
and/or keys filtered before it is sent to the API endpoint to
|
|
||||||
be processed.
|
|
||||||
:args proposed_object: A dict item that will be encoded and sent
|
|
||||||
the API endpoint with the updated data structure
|
|
||||||
:returns: updated object to be sent to API endpoint
|
|
||||||
'''
|
|
||||||
keys = set()
|
|
||||||
for key, value in iteritems(proposed_object):
|
|
||||||
update = ib_spec[key].get('update', True)
|
|
||||||
if not update:
|
|
||||||
keys.add(key)
|
|
||||||
return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
|
|
||||||
@@ -54,6 +54,17 @@ def proxmox_to_ansible_bool(value):
|
|||||||
return True if value == 1 else False
|
return True if value == 1 else False
|
||||||
|
|
||||||
|
|
||||||
|
def ansible_to_proxmox_bool(value):
|
||||||
|
'''Convert Ansible representation of a boolean to be proxmox-friendly'''
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not isinstance(value, bool):
|
||||||
|
raise ValueError("%s must be of type bool not %s" % (value, type(value)))
|
||||||
|
|
||||||
|
return 1 if value else 0
|
||||||
|
|
||||||
|
|
||||||
class ProxmoxAnsible(object):
|
class ProxmoxAnsible(object):
|
||||||
"""Base class for Proxmox modules"""
|
"""Base class for Proxmox modules"""
|
||||||
def __init__(self, module):
|
def __init__(self, module):
|
||||||
|
|||||||
@@ -1834,12 +1834,16 @@ class RedfishUtils(object):
|
|||||||
result['ret'] = True
|
result['ret'] = True
|
||||||
data = response['data']
|
data = response['data']
|
||||||
|
|
||||||
for device in data[u'Fans']:
|
# Checking if fans are present
|
||||||
fan = {}
|
if u'Fans' in data:
|
||||||
for property in properties:
|
for device in data[u'Fans']:
|
||||||
if property in device:
|
fan = {}
|
||||||
fan[property] = device[property]
|
for property in properties:
|
||||||
fan_results.append(fan)
|
if property in device:
|
||||||
|
fan[property] = device[property]
|
||||||
|
fan_results.append(fan)
|
||||||
|
else:
|
||||||
|
return {'ret': False, 'msg': "No Fans present"}
|
||||||
result["entries"] = fan_results
|
result["entries"] = fan_results
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -2029,15 +2033,28 @@ class RedfishUtils(object):
|
|||||||
def get_multi_memory_inventory(self):
|
def get_multi_memory_inventory(self):
|
||||||
return self.aggregate_systems(self.get_memory_inventory)
|
return self.aggregate_systems(self.get_memory_inventory)
|
||||||
|
|
||||||
|
def get_nic(self, resource_uri):
|
||||||
|
result = {}
|
||||||
|
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||||
|
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||||
|
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||||
|
response = self.get_request(self.root_uri + resource_uri)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
result['ret'] = True
|
||||||
|
data = response['data']
|
||||||
|
nic = {}
|
||||||
|
for property in properties:
|
||||||
|
if property in data:
|
||||||
|
nic[property] = data[property]
|
||||||
|
result['entries'] = nic
|
||||||
|
return(result)
|
||||||
|
|
||||||
def get_nic_inventory(self, resource_uri):
|
def get_nic_inventory(self, resource_uri):
|
||||||
result = {}
|
result = {}
|
||||||
nic_list = []
|
nic_list = []
|
||||||
nic_results = []
|
nic_results = []
|
||||||
key = "EthernetInterfaces"
|
key = "EthernetInterfaces"
|
||||||
# Get these entries, but does not fail if not found
|
|
||||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
|
||||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
|
||||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
|
||||||
|
|
||||||
response = self.get_request(self.root_uri + resource_uri)
|
response = self.get_request(self.root_uri + resource_uri)
|
||||||
if response['ret'] is False:
|
if response['ret'] is False:
|
||||||
@@ -2061,18 +2078,9 @@ class RedfishUtils(object):
|
|||||||
nic_list.append(nic[u'@odata.id'])
|
nic_list.append(nic[u'@odata.id'])
|
||||||
|
|
||||||
for n in nic_list:
|
for n in nic_list:
|
||||||
nic = {}
|
nic = self.get_nic(n)
|
||||||
uri = self.root_uri + n
|
if nic['ret']:
|
||||||
response = self.get_request(uri)
|
nic_results.append(nic['entries'])
|
||||||
if response['ret'] is False:
|
|
||||||
return response
|
|
||||||
data = response['data']
|
|
||||||
|
|
||||||
for property in properties:
|
|
||||||
if property in data:
|
|
||||||
nic[property] = data[property]
|
|
||||||
|
|
||||||
nic_results.append(nic)
|
|
||||||
result["entries"] = nic_results
|
result["entries"] = nic_results
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -2697,39 +2705,14 @@ class RedfishUtils(object):
|
|||||||
return self.aggregate_managers(self.get_manager_health_report)
|
return self.aggregate_managers(self.get_manager_health_report)
|
||||||
|
|
||||||
def set_manager_nic(self, nic_addr, nic_config):
|
def set_manager_nic(self, nic_addr, nic_config):
|
||||||
# Get EthernetInterface collection
|
# Get the manager ethernet interface uri
|
||||||
response = self.get_request(self.root_uri + self.manager_uri)
|
nic_info = self.get_manager_ethernet_uri(nic_addr)
|
||||||
if response['ret'] is False:
|
|
||||||
return response
|
|
||||||
data = response['data']
|
|
||||||
if 'EthernetInterfaces' not in data:
|
|
||||||
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
|
||||||
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
|
||||||
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
|
||||||
if response['ret'] is False:
|
|
||||||
return response
|
|
||||||
data = response['data']
|
|
||||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
|
||||||
a.get('@odata.id')]
|
|
||||||
|
|
||||||
# Find target EthernetInterface
|
if nic_info.get('nic_addr') is None:
|
||||||
target_ethernet_uri = None
|
return nic_info
|
||||||
target_ethernet_current_setting = None
|
else:
|
||||||
if nic_addr == 'null':
|
target_ethernet_uri = nic_info['nic_addr']
|
||||||
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
target_ethernet_current_setting = nic_info['ethernet_setting']
|
||||||
nic_addr = (self.root_uri).split('/')[-1]
|
|
||||||
nic_addr = nic_addr.split(':')[0] # split port if existing
|
|
||||||
for uri in uris:
|
|
||||||
response = self.get_request(self.root_uri + uri)
|
|
||||||
if response['ret'] is False:
|
|
||||||
return response
|
|
||||||
data = response['data']
|
|
||||||
if '"' + nic_addr.lower() + '"' in str(data).lower() or "'" + nic_addr.lower() + "'" in str(data).lower():
|
|
||||||
target_ethernet_uri = uri
|
|
||||||
target_ethernet_current_setting = data
|
|
||||||
break
|
|
||||||
if target_ethernet_uri is None:
|
|
||||||
return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
|
|
||||||
|
|
||||||
# Convert input to payload and check validity
|
# Convert input to payload and check validity
|
||||||
payload = {}
|
payload = {}
|
||||||
@@ -2792,3 +2775,208 @@ class RedfishUtils(object):
|
|||||||
if response['ret'] is False:
|
if response['ret'] is False:
|
||||||
return response
|
return response
|
||||||
return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
|
return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
|
||||||
|
|
||||||
|
# A helper function to get the EthernetInterface URI
|
||||||
|
def get_manager_ethernet_uri(self, nic_addr='null'):
|
||||||
|
# Get EthernetInterface collection
|
||||||
|
response = self.get_request(self.root_uri + self.manager_uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
if 'EthernetInterfaces' not in data:
|
||||||
|
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
||||||
|
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
||||||
|
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
||||||
|
a.get('@odata.id')]
|
||||||
|
|
||||||
|
# Find target EthernetInterface
|
||||||
|
target_ethernet_uri = None
|
||||||
|
target_ethernet_current_setting = None
|
||||||
|
if nic_addr == 'null':
|
||||||
|
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
||||||
|
nic_addr = (self.root_uri).split('/')[-1]
|
||||||
|
nic_addr = nic_addr.split(':')[0] # split port if existing
|
||||||
|
for uri in uris:
|
||||||
|
response = self.get_request(self.root_uri + uri)
|
||||||
|
if not response['ret']:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
data_string = json.dumps(data)
|
||||||
|
if nic_addr.lower() in data_string.lower():
|
||||||
|
target_ethernet_uri = uri
|
||||||
|
target_ethernet_current_setting = data
|
||||||
|
break
|
||||||
|
|
||||||
|
nic_info = {}
|
||||||
|
nic_info['nic_addr'] = target_ethernet_uri
|
||||||
|
nic_info['ethernet_setting'] = target_ethernet_current_setting
|
||||||
|
|
||||||
|
if target_ethernet_uri is None:
|
||||||
|
return {}
|
||||||
|
else:
|
||||||
|
return nic_info
|
||||||
|
|
||||||
|
def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None):
|
||||||
|
response = self.get_request(self.root_uri + self.manager_uri)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
if 'HostInterfaces' not in data:
|
||||||
|
return {'ret': False, 'msg': "HostInterfaces resource not found"}
|
||||||
|
|
||||||
|
hostinterfaces_uri = data["HostInterfaces"]["@odata.id"]
|
||||||
|
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
data = response['data']
|
||||||
|
uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')]
|
||||||
|
# Capture list of URIs that match a specified HostInterface resource ID
|
||||||
|
if hostinterface_id:
|
||||||
|
matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]]
|
||||||
|
|
||||||
|
if hostinterface_id and matching_hostinterface_uris:
|
||||||
|
hostinterface_uri = list.pop(matching_hostinterface_uris)
|
||||||
|
elif hostinterface_id and not matching_hostinterface_uris:
|
||||||
|
return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id}
|
||||||
|
elif len(uris) == 1:
|
||||||
|
hostinterface_uri = list.pop(uris)
|
||||||
|
else:
|
||||||
|
return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."}
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + hostinterface_uri)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
current_hostinterface_config = response['data']
|
||||||
|
payload = {}
|
||||||
|
for property in hostinterface_config.keys():
|
||||||
|
value = hostinterface_config[property]
|
||||||
|
if property not in current_hostinterface_config:
|
||||||
|
return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property}
|
||||||
|
if isinstance(value, dict):
|
||||||
|
if isinstance(current_hostinterface_config[property], dict):
|
||||||
|
payload[property] = value
|
||||||
|
elif isinstance(current_hostinterface_config[property], list):
|
||||||
|
payload[property] = list()
|
||||||
|
payload[property].append(value)
|
||||||
|
else:
|
||||||
|
return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property}
|
||||||
|
else:
|
||||||
|
payload[property] = value
|
||||||
|
|
||||||
|
need_change = False
|
||||||
|
for property in payload.keys():
|
||||||
|
set_value = payload[property]
|
||||||
|
cur_value = current_hostinterface_config[property]
|
||||||
|
if not isinstance(set_value, dict) and not isinstance(set_value, list):
|
||||||
|
if set_value != cur_value:
|
||||||
|
need_change = True
|
||||||
|
if isinstance(set_value, dict):
|
||||||
|
for subprop in payload[property].keys():
|
||||||
|
if subprop not in current_hostinterface_config[property]:
|
||||||
|
need_change = True
|
||||||
|
break
|
||||||
|
sub_set_value = payload[property][subprop]
|
||||||
|
sub_cur_value = current_hostinterface_config[property][subprop]
|
||||||
|
if sub_set_value != sub_cur_value:
|
||||||
|
need_change = True
|
||||||
|
if isinstance(set_value, list):
|
||||||
|
if len(set_value) != len(cur_value):
|
||||||
|
need_change = True
|
||||||
|
continue
|
||||||
|
for i in range(len(set_value)):
|
||||||
|
for subprop in payload[property][i].keys():
|
||||||
|
if subprop not in current_hostinterface_config[property][i]:
|
||||||
|
need_change = True
|
||||||
|
break
|
||||||
|
sub_set_value = payload[property][i][subprop]
|
||||||
|
sub_cur_value = current_hostinterface_config[property][i][subprop]
|
||||||
|
if sub_set_value != sub_cur_value:
|
||||||
|
need_change = True
|
||||||
|
if not need_change:
|
||||||
|
return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"}
|
||||||
|
|
||||||
|
response = self.patch_request(self.root_uri + hostinterface_uri, payload)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"}
|
||||||
|
|
||||||
|
def get_hostinterfaces(self):
|
||||||
|
result = {}
|
||||||
|
hostinterface_results = []
|
||||||
|
properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status',
|
||||||
|
'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes',
|
||||||
|
'AuthNoneRoleId', 'CredentialBootstrapping']
|
||||||
|
manager_uri_list = self.manager_uris
|
||||||
|
for manager_uri in manager_uri_list:
|
||||||
|
response = self.get_request(self.root_uri + manager_uri)
|
||||||
|
if response['ret'] is False:
|
||||||
|
return response
|
||||||
|
|
||||||
|
result['ret'] = True
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
if 'HostInterfaces' in data:
|
||||||
|
hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id']
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||||
|
data = response['data']
|
||||||
|
|
||||||
|
if 'Members' in data:
|
||||||
|
for hostinterface in data['Members']:
|
||||||
|
hostinterface_uri = hostinterface['@odata.id']
|
||||||
|
hostinterface_response = self.get_request(self.root_uri + hostinterface_uri)
|
||||||
|
# dictionary for capturing individual HostInterface properties
|
||||||
|
hostinterface_data_temp = {}
|
||||||
|
if hostinterface_response['ret'] is False:
|
||||||
|
return hostinterface_response
|
||||||
|
hostinterface_data = hostinterface_response['data']
|
||||||
|
for property in properties:
|
||||||
|
if property in hostinterface_data:
|
||||||
|
if hostinterface_data[property] is not None:
|
||||||
|
hostinterface_data_temp[property] = hostinterface_data[property]
|
||||||
|
# Check for the presence of a ManagerEthernetInterface
|
||||||
|
# object, a link to a _single_ EthernetInterface that the
|
||||||
|
# BMC uses to communicate with the host.
|
||||||
|
if 'ManagerEthernetInterface' in hostinterface_data:
|
||||||
|
interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id']
|
||||||
|
interface_response = self.get_nic(interface_uri)
|
||||||
|
if interface_response['ret'] is False:
|
||||||
|
return interface_response
|
||||||
|
hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries']
|
||||||
|
|
||||||
|
# Check for the presence of a HostEthernetInterfaces
|
||||||
|
# object, a link to a _collection_ of EthernetInterfaces
|
||||||
|
# that the host uses to communicate with the BMC.
|
||||||
|
if 'HostEthernetInterfaces' in hostinterface_data:
|
||||||
|
interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id']
|
||||||
|
interfaces_response = self.get_request(self.root_uri + interfaces_uri)
|
||||||
|
if interfaces_response['ret'] is False:
|
||||||
|
return interfaces_response
|
||||||
|
interfaces_data = interfaces_response['data']
|
||||||
|
if 'Members' in interfaces_data:
|
||||||
|
for interface in interfaces_data['Members']:
|
||||||
|
interface_uri = interface['@odata.id']
|
||||||
|
interface_response = self.get_nic(interface_uri)
|
||||||
|
if interface_response['ret'] is False:
|
||||||
|
return interface_response
|
||||||
|
# Check if this is the first
|
||||||
|
# HostEthernetInterfaces item and create empty
|
||||||
|
# list if so.
|
||||||
|
if 'HostEthernetInterfaces' not in hostinterface_data_temp:
|
||||||
|
hostinterface_data_temp['HostEthernetInterfaces'] = []
|
||||||
|
|
||||||
|
hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries'])
|
||||||
|
|
||||||
|
hostinterface_results.append(hostinterface_data_temp)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
result["entries"] = hostinterface_results
|
||||||
|
if not result["entries"]:
|
||||||
|
return {'ret': False, 'msg': "No HostInterface objects found"}
|
||||||
|
return result
|
||||||
|
|||||||
@@ -15,13 +15,6 @@ from ansible.module_utils.urls import fetch_url, basic_auth_header
|
|||||||
class BitbucketHelper:
|
class BitbucketHelper:
|
||||||
BITBUCKET_API_URL = 'https://api.bitbucket.org'
|
BITBUCKET_API_URL = 'https://api.bitbucket.org'
|
||||||
|
|
||||||
error_messages = {
|
|
||||||
'required_client_id': '`client_id` must be specified as a parameter or '
|
|
||||||
'BITBUCKET_CLIENT_ID environment variable',
|
|
||||||
'required_client_secret': '`client_secret` must be specified as a parameter or '
|
|
||||||
'BITBUCKET_CLIENT_SECRET environment variable',
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, module):
|
def __init__(self, module):
|
||||||
self.module = module
|
self.module = module
|
||||||
self.access_token = None
|
self.access_token = None
|
||||||
@@ -29,35 +22,40 @@ class BitbucketHelper:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def bitbucket_argument_spec():
|
def bitbucket_argument_spec():
|
||||||
return dict(
|
return dict(
|
||||||
client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
|
client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
|
||||||
client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
|
client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
|
||||||
|
# TODO:
|
||||||
|
# - Rename user to username once current usage of username is removed
|
||||||
|
# - Alias user to username and deprecate it
|
||||||
|
user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
|
||||||
|
password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_arguments(self):
|
@staticmethod
|
||||||
if self.module.params['client_id'] is None:
|
def bitbucket_required_one_of():
|
||||||
self.module.fail_json(msg=self.error_messages['required_client_id'])
|
return [['client_id', 'client_secret', 'user', 'password']]
|
||||||
|
|
||||||
if self.module.params['client_secret'] is None:
|
@staticmethod
|
||||||
self.module.fail_json(msg=self.error_messages['required_client_secret'])
|
def bitbucket_required_together():
|
||||||
|
return [['client_id', 'client_secret'], ['user', 'password']]
|
||||||
|
|
||||||
def fetch_access_token(self):
|
def fetch_access_token(self):
|
||||||
self.check_arguments()
|
if self.module.params['client_id'] and self.module.params['client_secret']:
|
||||||
|
headers = {
|
||||||
|
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
|
||||||
|
}
|
||||||
|
|
||||||
headers = {
|
info, content = self.request(
|
||||||
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret'])
|
api_url='https://bitbucket.org/site/oauth2/access_token',
|
||||||
}
|
method='POST',
|
||||||
|
data='grant_type=client_credentials',
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
|
||||||
info, content = self.request(
|
if info['status'] == 200:
|
||||||
api_url='https://bitbucket.org/site/oauth2/access_token',
|
self.access_token = content['access_token']
|
||||||
method='POST',
|
else:
|
||||||
data='grant_type=client_credentials',
|
self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
if info['status'] == 200:
|
|
||||||
self.access_token = content['access_token']
|
|
||||||
else:
|
|
||||||
self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
|
|
||||||
|
|
||||||
def request(self, api_url, method, data=None, headers=None):
|
def request(self, api_url, method, data=None, headers=None):
|
||||||
headers = headers or {}
|
headers = headers or {}
|
||||||
@@ -66,6 +64,10 @@ class BitbucketHelper:
|
|||||||
headers.update({
|
headers.update({
|
||||||
'Authorization': 'Bearer {0}'.format(self.access_token),
|
'Authorization': 'Bearer {0}'.format(self.access_token),
|
||||||
})
|
})
|
||||||
|
elif self.module.params['user'] and self.module.params['password']:
|
||||||
|
headers.update({
|
||||||
|
'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
|
||||||
|
})
|
||||||
|
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
data = self.module.jsonify(data)
|
data = self.module.jsonify(data)
|
||||||
|
|||||||
17
plugins/module_utils/version.py
Normal file
17
plugins/module_utils/version.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
"""Provide version object to compare version numbers."""
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
|
||||||
|
# remove the _version.py file, and replace the following import by
|
||||||
|
#
|
||||||
|
# from ansible.module_utils.compat.version import LooseVersion
|
||||||
|
|
||||||
|
from ._version import LooseVersion
|
||||||
1
plugins/modules/cargo.py
Symbolic link
1
plugins/modules/cargo.py
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
packaging/language/cargo.py
|
||||||
@@ -120,7 +120,7 @@ __version__ = '${version}'
|
|||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -161,7 +161,8 @@ __version__ = '${version}'
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -89,7 +89,8 @@ __version__ = '${version}'
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -132,8 +133,7 @@ class ClcBlueprintPackage:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -162,7 +162,8 @@ import os
|
|||||||
import traceback
|
import traceback
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -203,8 +204,7 @@ class ClcFirewallPolicy:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -207,7 +207,8 @@ __version__ = '${version}'
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -210,7 +210,8 @@ import json
|
|||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -255,8 +256,7 @@ class ClcLoadBalancer:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -311,7 +311,8 @@ __version__ = '${version}'
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -355,8 +356,7 @@ class ClcModifyServer:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -117,7 +117,8 @@ __version__ = '${version}'
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -433,7 +433,8 @@ import json
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -478,8 +479,7 @@ class ClcServer:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -101,7 +101,8 @@ __version__ = '${version}'
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
REQUESTS_IMP_ERR = None
|
REQUESTS_IMP_ERR = None
|
||||||
try:
|
try:
|
||||||
@@ -145,8 +146,7 @@ class ClcSnapshot:
|
|||||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||||
if not REQUESTS_FOUND:
|
if not REQUESTS_FOUND:
|
||||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||||
if requests.__version__ and LooseVersion(
|
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||||
requests.__version__) < LooseVersion('2.5.0'):
|
|
||||||
self.module.fail_json(
|
self.module.fail_json(
|
||||||
msg='requests library version should be >= 2.5.0')
|
msg='requests library version should be >= 2.5.0')
|
||||||
|
|
||||||
|
|||||||
@@ -422,6 +422,7 @@ import shutil
|
|||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
import shlex
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import lxc
|
import lxc
|
||||||
@@ -661,9 +662,8 @@ class LxcContainerManagement(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
for key, value in variables_dict.items():
|
for key, value in variables_dict.items():
|
||||||
build_command.append(
|
build_command.append(str(key))
|
||||||
'%s %s' % (key, value)
|
build_command.append(str(value))
|
||||||
)
|
|
||||||
return build_command
|
return build_command
|
||||||
|
|
||||||
def _get_vars(self, variables):
|
def _get_vars(self, variables):
|
||||||
@@ -686,24 +686,6 @@ class LxcContainerManagement(object):
|
|||||||
return_dict[v] = _var
|
return_dict[v] = _var
|
||||||
return return_dict
|
return return_dict
|
||||||
|
|
||||||
def _run_command(self, build_command, unsafe_shell=False):
|
|
||||||
"""Return information from running an Ansible Command.
|
|
||||||
|
|
||||||
This will squash the build command list into a string and then
|
|
||||||
execute the command via Ansible. The output is returned to the method.
|
|
||||||
This output is returned as `return_code`, `stdout`, `stderr`.
|
|
||||||
|
|
||||||
:param build_command: Used for the command and all options.
|
|
||||||
:type build_command: ``list``
|
|
||||||
:param unsafe_shell: Enable or Disable unsafe sell commands.
|
|
||||||
:type unsafe_shell: ``bol``
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self.module.run_command(
|
|
||||||
' '.join(build_command),
|
|
||||||
use_unsafe_shell=unsafe_shell
|
|
||||||
)
|
|
||||||
|
|
||||||
def _config(self):
|
def _config(self):
|
||||||
"""Configure an LXC container.
|
"""Configure an LXC container.
|
||||||
|
|
||||||
@@ -810,7 +792,7 @@ class LxcContainerManagement(object):
|
|||||||
elif self.module.params.get('backing_store') == 'overlayfs':
|
elif self.module.params.get('backing_store') == 'overlayfs':
|
||||||
build_command.append('--snapshot')
|
build_command.append('--snapshot')
|
||||||
|
|
||||||
rc, return_data, err = self._run_command(build_command)
|
rc, return_data, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
message = "Failed executing %s." % os.path.basename(clone_cmd)
|
message = "Failed executing %s." % os.path.basename(clone_cmd)
|
||||||
self.failure(
|
self.failure(
|
||||||
@@ -843,7 +825,7 @@ class LxcContainerManagement(object):
|
|||||||
|
|
||||||
build_command = [
|
build_command = [
|
||||||
self.module.get_bin_path('lxc-create', True),
|
self.module.get_bin_path('lxc-create', True),
|
||||||
'--name %s' % self.container_name,
|
'--name', self.container_name,
|
||||||
'--quiet'
|
'--quiet'
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -869,10 +851,12 @@ class LxcContainerManagement(object):
|
|||||||
log_path = os.getenv('HOME')
|
log_path = os.getenv('HOME')
|
||||||
|
|
||||||
build_command.extend([
|
build_command.extend([
|
||||||
'--logfile %s' % os.path.join(
|
'--logfile',
|
||||||
|
os.path.join(
|
||||||
log_path, 'lxc-%s.log' % self.container_name
|
log_path, 'lxc-%s.log' % self.container_name
|
||||||
),
|
),
|
||||||
'--logpriority %s' % self.module.params.get(
|
'--logpriority',
|
||||||
|
self.module.params.get(
|
||||||
'container_log_level'
|
'container_log_level'
|
||||||
).upper()
|
).upper()
|
||||||
])
|
])
|
||||||
@@ -880,9 +864,10 @@ class LxcContainerManagement(object):
|
|||||||
# Add the template commands to the end of the command if there are any
|
# Add the template commands to the end of the command if there are any
|
||||||
template_options = self.module.params.get('template_options', None)
|
template_options = self.module.params.get('template_options', None)
|
||||||
if template_options:
|
if template_options:
|
||||||
build_command.append('-- %s' % template_options)
|
build_command.append('--')
|
||||||
|
build_command += shlex.split(template_options)
|
||||||
|
|
||||||
rc, return_data, err = self._run_command(build_command)
|
rc, return_data, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
message = "Failed executing lxc-create."
|
message = "Failed executing lxc-create."
|
||||||
self.failure(
|
self.failure(
|
||||||
@@ -1186,7 +1171,7 @@ class LxcContainerManagement(object):
|
|||||||
self.module.get_bin_path('lxc-config', True),
|
self.module.get_bin_path('lxc-config', True),
|
||||||
"lxc.bdev.lvm.vg"
|
"lxc.bdev.lvm.vg"
|
||||||
]
|
]
|
||||||
rc, vg, err = self._run_command(build_command)
|
rc, vg, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1204,7 +1189,7 @@ class LxcContainerManagement(object):
|
|||||||
build_command = [
|
build_command = [
|
||||||
self.module.get_bin_path('lvs', True)
|
self.module.get_bin_path('lvs', True)
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1231,7 +1216,7 @@ class LxcContainerManagement(object):
|
|||||||
'--units',
|
'--units',
|
||||||
'g'
|
'g'
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1262,7 +1247,7 @@ class LxcContainerManagement(object):
|
|||||||
'--units',
|
'--units',
|
||||||
'g'
|
'g'
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1311,7 +1296,7 @@ class LxcContainerManagement(object):
|
|||||||
os.path.join(vg, source_lv),
|
os.path.join(vg, source_lv),
|
||||||
"-L%sg" % snapshot_size_gb
|
"-L%sg" % snapshot_size_gb
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1336,7 +1321,7 @@ class LxcContainerManagement(object):
|
|||||||
"/dev/%s/%s" % (vg, lv_name),
|
"/dev/%s/%s" % (vg, lv_name),
|
||||||
mount_point,
|
mount_point,
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1380,9 +1365,8 @@ class LxcContainerManagement(object):
|
|||||||
'.'
|
'.'
|
||||||
]
|
]
|
||||||
|
|
||||||
rc, stdout, err = self._run_command(
|
rc, stdout, err = self.module.run_command(
|
||||||
build_command=build_command,
|
build_command
|
||||||
unsafe_shell=True
|
|
||||||
)
|
)
|
||||||
|
|
||||||
os.umask(old_umask)
|
os.umask(old_umask)
|
||||||
@@ -1410,7 +1394,7 @@ class LxcContainerManagement(object):
|
|||||||
"-f",
|
"-f",
|
||||||
"%s/%s" % (vg, lv_name),
|
"%s/%s" % (vg, lv_name),
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1442,11 +1426,10 @@ class LxcContainerManagement(object):
|
|||||||
self.module.get_bin_path('rsync', True),
|
self.module.get_bin_path('rsync', True),
|
||||||
'-aHAX',
|
'-aHAX',
|
||||||
fs_path,
|
fs_path,
|
||||||
temp_dir
|
temp_dir,
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(
|
rc, stdout, err = self.module.run_command(
|
||||||
build_command,
|
build_command,
|
||||||
unsafe_shell=True
|
|
||||||
)
|
)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
@@ -1467,7 +1450,7 @@ class LxcContainerManagement(object):
|
|||||||
self.module.get_bin_path('umount', True),
|
self.module.get_bin_path('umount', True),
|
||||||
mount_point,
|
mount_point,
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
@@ -1489,12 +1472,12 @@ class LxcContainerManagement(object):
|
|||||||
|
|
||||||
build_command = [
|
build_command = [
|
||||||
self.module.get_bin_path('mount', True),
|
self.module.get_bin_path('mount', True),
|
||||||
'-t overlayfs',
|
'-t', 'overlayfs',
|
||||||
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
'-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||||
'overlayfs',
|
'overlayfs',
|
||||||
mount_point,
|
mount_point,
|
||||||
]
|
]
|
||||||
rc, stdout, err = self._run_command(build_command)
|
rc, stdout, err = self.module.run_command(build_command)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
self.failure(
|
self.failure(
|
||||||
err=err,
|
err=err,
|
||||||
|
|||||||
@@ -11,29 +11,28 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
module: lxd_container
|
module: lxd_container
|
||||||
short_description: Manage LXD Containers
|
short_description: Manage LXD instances
|
||||||
description:
|
description:
|
||||||
- Management of LXD containers
|
- Management of LXD containers and virtual machines.
|
||||||
author: "Hiroaki Nakamura (@hnakamur)"
|
author: "Hiroaki Nakamura (@hnakamur)"
|
||||||
options:
|
options:
|
||||||
name:
|
name:
|
||||||
description:
|
description:
|
||||||
- Name of a container.
|
- Name of an instance.
|
||||||
type: str
|
type: str
|
||||||
required: true
|
required: true
|
||||||
architecture:
|
architecture:
|
||||||
description:
|
description:
|
||||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
- 'The architecture for the instance (for example C(x86_64) or C(i686)).
|
||||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||||
type: str
|
type: str
|
||||||
required: false
|
required: false
|
||||||
config:
|
config:
|
||||||
description:
|
description:
|
||||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
- 'The config for the instance (for example C({"limits.cpu": "2"})).
|
||||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||||
- If the container already exists and its "config" values in metadata
|
- If the instance already exists and its "config" values in metadata
|
||||||
obtained from GET /1.0/containers/<name>
|
obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
|
||||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
|
||||||
are different, this module tries to apply the configurations.
|
are different, this module tries to apply the configurations.
|
||||||
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
||||||
type: dict
|
type: dict
|
||||||
@@ -43,33 +42,32 @@ options:
|
|||||||
- If set to C(true), options starting with C(volatile.) are ignored. As a result,
|
- If set to C(true), options starting with C(volatile.) are ignored. As a result,
|
||||||
they are reapplied for each execution.
|
they are reapplied for each execution.
|
||||||
- This default behavior can be changed by setting this option to C(false).
|
- This default behavior can be changed by setting this option to C(false).
|
||||||
- The default value C(true) will be deprecated in community.general 4.0.0,
|
- The current default value C(true) is deprecated since community.general 4.0.0,
|
||||||
and will change to C(false) in community.general 5.0.0.
|
and will change to C(false) in community.general 6.0.0.
|
||||||
type: bool
|
type: bool
|
||||||
default: true
|
|
||||||
required: false
|
required: false
|
||||||
version_added: 3.7.0
|
version_added: 3.7.0
|
||||||
profiles:
|
profiles:
|
||||||
description:
|
description:
|
||||||
- Profile to be used by the container.
|
- Profile to be used by the instance.
|
||||||
type: list
|
type: list
|
||||||
elements: str
|
elements: str
|
||||||
devices:
|
devices:
|
||||||
description:
|
description:
|
||||||
- 'The devices for the container
|
- 'The devices for the instance
|
||||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||||
type: dict
|
type: dict
|
||||||
required: false
|
required: false
|
||||||
ephemeral:
|
ephemeral:
|
||||||
description:
|
description:
|
||||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
- Whether or not the instance is ephemeral (for example C(true) or C(false)).
|
||||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||||
required: false
|
required: false
|
||||||
type: bool
|
type: bool
|
||||||
source:
|
source:
|
||||||
description:
|
description:
|
||||||
- 'The source for the container
|
- 'The source for the instance
|
||||||
(e.g. { "type": "image",
|
(e.g. { "type": "image",
|
||||||
"mode": "pull",
|
"mode": "pull",
|
||||||
"server": "https://images.linuxcontainers.org",
|
"server": "https://images.linuxcontainers.org",
|
||||||
@@ -87,39 +85,49 @@ options:
|
|||||||
- absent
|
- absent
|
||||||
- frozen
|
- frozen
|
||||||
description:
|
description:
|
||||||
- Define the state of a container.
|
- Define the state of an instance.
|
||||||
required: false
|
required: false
|
||||||
default: started
|
default: started
|
||||||
type: str
|
type: str
|
||||||
target:
|
target:
|
||||||
description:
|
description:
|
||||||
- For cluster deployments. Will attempt to create a container on a target node.
|
- For cluster deployments. Will attempt to create an instance on a target node.
|
||||||
If container exists elsewhere in a cluster, then container will not be replaced or moved.
|
If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
|
||||||
The name should respond to same name of the node you see in C(lxc cluster list).
|
The name should respond to same name of the node you see in C(lxc cluster list).
|
||||||
type: str
|
type: str
|
||||||
required: false
|
required: false
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
timeout:
|
timeout:
|
||||||
description:
|
description:
|
||||||
- A timeout for changing the state of the container.
|
- A timeout for changing the state of the instance.
|
||||||
- This is also used as a timeout for waiting until IPv4 addresses
|
- This is also used as a timeout for waiting until IPv4 addresses
|
||||||
are set to the all network interfaces in the container after
|
are set to the all network interfaces in the instance after
|
||||||
starting or restarting.
|
starting or restarting.
|
||||||
required: false
|
required: false
|
||||||
default: 30
|
default: 30
|
||||||
type: int
|
type: int
|
||||||
|
type:
|
||||||
|
description:
|
||||||
|
- Instance type can be either C(virtual-machine) or C(container).
|
||||||
|
required: false
|
||||||
|
default: container
|
||||||
|
choices:
|
||||||
|
- container
|
||||||
|
- virtual-machine
|
||||||
|
type: str
|
||||||
|
version_added: 4.1.0
|
||||||
wait_for_ipv4_addresses:
|
wait_for_ipv4_addresses:
|
||||||
description:
|
description:
|
||||||
- If this is true, the C(lxd_container) waits until IPv4 addresses
|
- If this is true, the C(lxd_container) waits until IPv4 addresses
|
||||||
are set to the all network interfaces in the container after
|
are set to the all network interfaces in the instance after
|
||||||
starting or restarting.
|
starting or restarting.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: bool
|
type: bool
|
||||||
force_stop:
|
force_stop:
|
||||||
description:
|
description:
|
||||||
- If this is true, the C(lxd_container) forces to stop the container
|
- If this is true, the C(lxd_container) forces to stop the instance
|
||||||
when it stops or restarts the container.
|
when it stops or restarts the instance.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: bool
|
type: bool
|
||||||
@@ -161,18 +169,18 @@ options:
|
|||||||
required: false
|
required: false
|
||||||
type: str
|
type: str
|
||||||
notes:
|
notes:
|
||||||
- Containers must have a unique name. If you attempt to create a container
|
- Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
|
||||||
with a name that already existed in the users namespace the module will
|
with a name that already existed in the users namespace the module will
|
||||||
simply return as "unchanged".
|
simply return as "unchanged".
|
||||||
- There are two ways to run commands in containers, using the command
|
- There are two ways to run commands inside a container or virtual machine, using the command
|
||||||
module or using the ansible lxd connection plugin bundled in Ansible >=
|
module or using the ansible lxd connection plugin bundled in Ansible >=
|
||||||
2.1, the later requires python to be installed in the container which can
|
2.1, the later requires python to be installed in the instance which can
|
||||||
be done with the command module.
|
be done with the command module.
|
||||||
- You can copy a file from the host to the container
|
- You can copy a file from the host to the instance
|
||||||
with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
|
with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
|
||||||
See the example below.
|
See the example below.
|
||||||
- You can copy a file in the created container to the localhost
|
- You can copy a file in the created instance to the localhost
|
||||||
with `command=lxc file pull container_name/dir/filename filename`.
|
with `command=lxc file pull instance_name/dir/filename filename`.
|
||||||
See the first example below.
|
See the first example below.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
@@ -241,6 +249,7 @@ EXAMPLES = '''
|
|||||||
community.general.lxd_container:
|
community.general.lxd_container:
|
||||||
name: mycontainer
|
name: mycontainer
|
||||||
state: absent
|
state: absent
|
||||||
|
type: container
|
||||||
|
|
||||||
# An example for restarting a container
|
# An example for restarting a container
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
@@ -250,6 +259,7 @@ EXAMPLES = '''
|
|||||||
community.general.lxd_container:
|
community.general.lxd_container:
|
||||||
name: mycontainer
|
name: mycontainer
|
||||||
state: restarted
|
state: restarted
|
||||||
|
type: container
|
||||||
|
|
||||||
# An example for restarting a container using https to connect to the LXD server
|
# An example for restarting a container using https to connect to the LXD server
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
@@ -307,16 +317,36 @@ EXAMPLES = '''
|
|||||||
mode: pull
|
mode: pull
|
||||||
alias: ubuntu/xenial/amd64
|
alias: ubuntu/xenial/amd64
|
||||||
target: node02
|
target: node02
|
||||||
|
|
||||||
|
# An example for creating a virtual machine
|
||||||
|
- hosts: localhost
|
||||||
|
connection: local
|
||||||
|
tasks:
|
||||||
|
- name: Create container on another node
|
||||||
|
community.general.lxd_container:
|
||||||
|
name: new-vm-1
|
||||||
|
type: virtual-machine
|
||||||
|
state: started
|
||||||
|
ignore_volatile_options: true
|
||||||
|
wait_for_ipv4_addresses: true
|
||||||
|
profiles: ["default"]
|
||||||
|
source:
|
||||||
|
protocol: simplestreams
|
||||||
|
type: image
|
||||||
|
mode: pull
|
||||||
|
server: https://images.linuxcontainers.org
|
||||||
|
alias: debian/11
|
||||||
|
timeout: 600
|
||||||
'''
|
'''
|
||||||
|
|
||||||
RETURN = '''
|
RETURN = '''
|
||||||
addresses:
|
addresses:
|
||||||
description: Mapping from the network device name to a list of IPv4 addresses in the container
|
description: Mapping from the network device name to a list of IPv4 addresses in the instance.
|
||||||
returned: when state is started or restarted
|
returned: when state is started or restarted
|
||||||
type: dict
|
type: dict
|
||||||
sample: {"eth0": ["10.155.92.191"]}
|
sample: {"eth0": ["10.155.92.191"]}
|
||||||
old_state:
|
old_state:
|
||||||
description: The old state of the container
|
description: The old state of the instance.
|
||||||
returned: when state is started or restarted
|
returned: when state is started or restarted
|
||||||
type: str
|
type: str
|
||||||
sample: "stopped"
|
sample: "stopped"
|
||||||
@@ -326,7 +356,7 @@ logs:
|
|||||||
type: list
|
type: list
|
||||||
sample: "(too long to be placed here)"
|
sample: "(too long to be placed here)"
|
||||||
actions:
|
actions:
|
||||||
description: List of actions performed for the container.
|
description: List of actions performed for the instance.
|
||||||
returned: success
|
returned: success
|
||||||
type: list
|
type: list
|
||||||
sample: '["create", "start"]'
|
sample: '["create", "start"]'
|
||||||
@@ -385,6 +415,15 @@ class LXDContainerManagement(object):
|
|||||||
self.addresses = None
|
self.addresses = None
|
||||||
self.target = self.module.params['target']
|
self.target = self.module.params['target']
|
||||||
|
|
||||||
|
self.type = self.module.params['type']
|
||||||
|
|
||||||
|
# LXD Rest API provides additional endpoints for creating containers and virtual-machines.
|
||||||
|
self.api_endpoint = None
|
||||||
|
if self.type == 'container':
|
||||||
|
self.api_endpoint = '/1.0/containers'
|
||||||
|
elif self.type == 'virtual-machine':
|
||||||
|
self.api_endpoint = '/1.0/virtual-machines'
|
||||||
|
|
||||||
self.key_file = self.module.params.get('client_key')
|
self.key_file = self.module.params.get('client_key')
|
||||||
if self.key_file is None:
|
if self.key_file is None:
|
||||||
self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
|
self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
|
||||||
@@ -420,20 +459,20 @@ class LXDContainerManagement(object):
|
|||||||
if param_val is not None:
|
if param_val is not None:
|
||||||
self.config[attr] = param_val
|
self.config[attr] = param_val
|
||||||
|
|
||||||
def _get_container_json(self):
|
def _get_instance_json(self):
|
||||||
return self.client.do(
|
return self.client.do(
|
||||||
'GET', '/1.0/containers/{0}'.format(self.name),
|
'GET', '{0}/{1}'.format(self.api_endpoint, self.name),
|
||||||
ok_error_codes=[404]
|
ok_error_codes=[404]
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_container_state_json(self):
|
def _get_instance_state_json(self):
|
||||||
return self.client.do(
|
return self.client.do(
|
||||||
'GET', '/1.0/containers/{0}/state'.format(self.name),
|
'GET', '{0}/{1}/state'.format(self.api_endpoint, self.name),
|
||||||
ok_error_codes=[404]
|
ok_error_codes=[404]
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _container_json_to_module_state(resp_json):
|
def _instance_json_to_module_state(resp_json):
|
||||||
if resp_json['type'] == 'error':
|
if resp_json['type'] == 'error':
|
||||||
return 'absent'
|
return 'absent'
|
||||||
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
||||||
@@ -442,45 +481,45 @@ class LXDContainerManagement(object):
|
|||||||
body_json = {'action': action, 'timeout': self.timeout}
|
body_json = {'action': action, 'timeout': self.timeout}
|
||||||
if force_stop:
|
if force_stop:
|
||||||
body_json['force'] = True
|
body_json['force'] = True
|
||||||
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
|
return self.client.do('PUT', '{0}/{1}/state'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||||
|
|
||||||
def _create_container(self):
|
def _create_instance(self):
|
||||||
config = self.config.copy()
|
config = self.config.copy()
|
||||||
config['name'] = self.name
|
config['name'] = self.name
|
||||||
if self.target:
|
if self.target:
|
||||||
self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
|
self.client.do('POST', '{0}?{1}'.format(self.api_endpoint, urlencode(dict(target=self.target))), config)
|
||||||
else:
|
else:
|
||||||
self.client.do('POST', '/1.0/containers', config)
|
self.client.do('POST', self.api_endpoint, config)
|
||||||
self.actions.append('create')
|
self.actions.append('create')
|
||||||
|
|
||||||
def _start_container(self):
|
def _start_instance(self):
|
||||||
self._change_state('start')
|
self._change_state('start')
|
||||||
self.actions.append('start')
|
self.actions.append('start')
|
||||||
|
|
||||||
def _stop_container(self):
|
def _stop_instance(self):
|
||||||
self._change_state('stop', self.force_stop)
|
self._change_state('stop', self.force_stop)
|
||||||
self.actions.append('stop')
|
self.actions.append('stop')
|
||||||
|
|
||||||
def _restart_container(self):
|
def _restart_instance(self):
|
||||||
self._change_state('restart', self.force_stop)
|
self._change_state('restart', self.force_stop)
|
||||||
self.actions.append('restart')
|
self.actions.append('restart')
|
||||||
|
|
||||||
def _delete_container(self):
|
def _delete_instance(self):
|
||||||
self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
|
self.client.do('DELETE', '{0}/{1}'.format(self.api_endpoint, self.name))
|
||||||
self.actions.append('delete')
|
self.actions.append('delete')
|
||||||
|
|
||||||
def _freeze_container(self):
|
def _freeze_instance(self):
|
||||||
self._change_state('freeze')
|
self._change_state('freeze')
|
||||||
self.actions.append('freeze')
|
self.actions.append('freeze')
|
||||||
|
|
||||||
def _unfreeze_container(self):
|
def _unfreeze_instance(self):
|
||||||
self._change_state('unfreeze')
|
self._change_state('unfreeze')
|
||||||
self.actions.append('unfreez')
|
self.actions.append('unfreez')
|
||||||
|
|
||||||
def _container_ipv4_addresses(self, ignore_devices=None):
|
def _instance_ipv4_addresses(self, ignore_devices=None):
|
||||||
ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
|
ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
|
||||||
|
|
||||||
resp_json = self._get_container_state_json()
|
resp_json = self._get_instance_state_json()
|
||||||
network = resp_json['metadata']['network'] or {}
|
network = resp_json['metadata']['network'] or {}
|
||||||
network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
|
network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
|
||||||
addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
|
addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
|
||||||
@@ -495,7 +534,7 @@ class LXDContainerManagement(object):
|
|||||||
due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
|
due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
|
||||||
while datetime.datetime.now() < due:
|
while datetime.datetime.now() < due:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
addresses = self._container_ipv4_addresses()
|
addresses = self._instance_ipv4_addresses()
|
||||||
if self._has_all_ipv4_addresses(addresses):
|
if self._has_all_ipv4_addresses(addresses):
|
||||||
self.addresses = addresses
|
self.addresses = addresses
|
||||||
return
|
return
|
||||||
@@ -505,72 +544,72 @@ class LXDContainerManagement(object):
|
|||||||
|
|
||||||
def _started(self):
|
def _started(self):
|
||||||
if self.old_state == 'absent':
|
if self.old_state == 'absent':
|
||||||
self._create_container()
|
self._create_instance()
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
else:
|
else:
|
||||||
if self.old_state == 'frozen':
|
if self.old_state == 'frozen':
|
||||||
self._unfreeze_container()
|
self._unfreeze_instance()
|
||||||
elif self.old_state == 'stopped':
|
elif self.old_state == 'stopped':
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
if self._needs_to_apply_container_configs():
|
if self._needs_to_apply_instance_configs():
|
||||||
self._apply_container_configs()
|
self._apply_instance_configs()
|
||||||
if self.wait_for_ipv4_addresses:
|
if self.wait_for_ipv4_addresses:
|
||||||
self._get_addresses()
|
self._get_addresses()
|
||||||
|
|
||||||
def _stopped(self):
|
def _stopped(self):
|
||||||
if self.old_state == 'absent':
|
if self.old_state == 'absent':
|
||||||
self._create_container()
|
self._create_instance()
|
||||||
else:
|
else:
|
||||||
if self.old_state == 'stopped':
|
if self.old_state == 'stopped':
|
||||||
if self._needs_to_apply_container_configs():
|
if self._needs_to_apply_instance_configs():
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
self._apply_container_configs()
|
self._apply_instance_configs()
|
||||||
self._stop_container()
|
self._stop_instance()
|
||||||
else:
|
else:
|
||||||
if self.old_state == 'frozen':
|
if self.old_state == 'frozen':
|
||||||
self._unfreeze_container()
|
self._unfreeze_instance()
|
||||||
if self._needs_to_apply_container_configs():
|
if self._needs_to_apply_instance_configs():
|
||||||
self._apply_container_configs()
|
self._apply_instance_configs()
|
||||||
self._stop_container()
|
self._stop_instance()
|
||||||
|
|
||||||
def _restarted(self):
|
def _restarted(self):
|
||||||
if self.old_state == 'absent':
|
if self.old_state == 'absent':
|
||||||
self._create_container()
|
self._create_instance()
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
else:
|
else:
|
||||||
if self.old_state == 'frozen':
|
if self.old_state == 'frozen':
|
||||||
self._unfreeze_container()
|
self._unfreeze_instance()
|
||||||
if self._needs_to_apply_container_configs():
|
if self._needs_to_apply_instance_configs():
|
||||||
self._apply_container_configs()
|
self._apply_instance_configs()
|
||||||
self._restart_container()
|
self._restart_instance()
|
||||||
if self.wait_for_ipv4_addresses:
|
if self.wait_for_ipv4_addresses:
|
||||||
self._get_addresses()
|
self._get_addresses()
|
||||||
|
|
||||||
def _destroyed(self):
|
def _destroyed(self):
|
||||||
if self.old_state != 'absent':
|
if self.old_state != 'absent':
|
||||||
if self.old_state == 'frozen':
|
if self.old_state == 'frozen':
|
||||||
self._unfreeze_container()
|
self._unfreeze_instance()
|
||||||
if self.old_state != 'stopped':
|
if self.old_state != 'stopped':
|
||||||
self._stop_container()
|
self._stop_instance()
|
||||||
self._delete_container()
|
self._delete_instance()
|
||||||
|
|
||||||
def _frozen(self):
|
def _frozen(self):
|
||||||
if self.old_state == 'absent':
|
if self.old_state == 'absent':
|
||||||
self._create_container()
|
self._create_instance()
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
self._freeze_container()
|
self._freeze_instance()
|
||||||
else:
|
else:
|
||||||
if self.old_state == 'stopped':
|
if self.old_state == 'stopped':
|
||||||
self._start_container()
|
self._start_instance()
|
||||||
if self._needs_to_apply_container_configs():
|
if self._needs_to_apply_instance_configs():
|
||||||
self._apply_container_configs()
|
self._apply_instance_configs()
|
||||||
self._freeze_container()
|
self._freeze_instance()
|
||||||
|
|
||||||
def _needs_to_change_container_config(self, key):
|
def _needs_to_change_instance_config(self, key):
|
||||||
if key not in self.config:
|
if key not in self.config:
|
||||||
return False
|
return False
|
||||||
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
||||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
|
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||||
for k, v in self.config['config'].items():
|
for k, v in self.config['config'].items():
|
||||||
if k not in old_configs:
|
if k not in old_configs:
|
||||||
return True
|
return True
|
||||||
@@ -578,7 +617,7 @@ class LXDContainerManagement(object):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
elif key == 'config': # next default behavior
|
elif key == 'config': # next default behavior
|
||||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items())
|
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items())
|
||||||
for k, v in self.config['config'].items():
|
for k, v in self.config['config'].items():
|
||||||
if k not in old_configs:
|
if k not in old_configs:
|
||||||
return True
|
return True
|
||||||
@@ -586,39 +625,41 @@ class LXDContainerManagement(object):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
old_configs = self.old_container_json['metadata'][key]
|
old_configs = self.old_instance_json['metadata'][key]
|
||||||
return self.config[key] != old_configs
|
return self.config[key] != old_configs
|
||||||
|
|
||||||
def _needs_to_apply_container_configs(self):
|
def _needs_to_apply_instance_configs(self):
|
||||||
return (
|
return (
|
||||||
self._needs_to_change_container_config('architecture') or
|
self._needs_to_change_instance_config('architecture') or
|
||||||
self._needs_to_change_container_config('config') or
|
self._needs_to_change_instance_config('config') or
|
||||||
self._needs_to_change_container_config('ephemeral') or
|
self._needs_to_change_instance_config('ephemeral') or
|
||||||
self._needs_to_change_container_config('devices') or
|
self._needs_to_change_instance_config('devices') or
|
||||||
self._needs_to_change_container_config('profiles')
|
self._needs_to_change_instance_config('profiles')
|
||||||
)
|
)
|
||||||
|
|
||||||
def _apply_container_configs(self):
|
def _apply_instance_configs(self):
|
||||||
old_metadata = self.old_container_json['metadata']
|
old_metadata = self.old_instance_json['metadata']
|
||||||
body_json = {
|
body_json = {
|
||||||
'architecture': old_metadata['architecture'],
|
'architecture': old_metadata['architecture'],
|
||||||
'config': old_metadata['config'],
|
'config': old_metadata['config'],
|
||||||
'devices': old_metadata['devices'],
|
'devices': old_metadata['devices'],
|
||||||
'profiles': old_metadata['profiles']
|
'profiles': old_metadata['profiles']
|
||||||
}
|
}
|
||||||
if self._needs_to_change_container_config('architecture'):
|
|
||||||
|
if self._needs_to_change_instance_config('architecture'):
|
||||||
body_json['architecture'] = self.config['architecture']
|
body_json['architecture'] = self.config['architecture']
|
||||||
if self._needs_to_change_container_config('config'):
|
if self._needs_to_change_instance_config('config'):
|
||||||
for k, v in self.config['config'].items():
|
for k, v in self.config['config'].items():
|
||||||
body_json['config'][k] = v
|
body_json['config'][k] = v
|
||||||
if self._needs_to_change_container_config('ephemeral'):
|
if self._needs_to_change_instance_config('ephemeral'):
|
||||||
body_json['ephemeral'] = self.config['ephemeral']
|
body_json['ephemeral'] = self.config['ephemeral']
|
||||||
if self._needs_to_change_container_config('devices'):
|
if self._needs_to_change_instance_config('devices'):
|
||||||
body_json['devices'] = self.config['devices']
|
body_json['devices'] = self.config['devices']
|
||||||
if self._needs_to_change_container_config('profiles'):
|
if self._needs_to_change_instance_config('profiles'):
|
||||||
body_json['profiles'] = self.config['profiles']
|
body_json['profiles'] = self.config['profiles']
|
||||||
self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
|
|
||||||
self.actions.append('apply_container_configs')
|
self.client.do('PUT', '{0}/{1}'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||||
|
self.actions.append('apply_instance_configs')
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""Run the main method."""
|
"""Run the main method."""
|
||||||
@@ -628,8 +669,8 @@ class LXDContainerManagement(object):
|
|||||||
self.client.authenticate(self.trust_password)
|
self.client.authenticate(self.trust_password)
|
||||||
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
||||||
|
|
||||||
self.old_container_json = self._get_container_json()
|
self.old_instance_json = self._get_instance_json()
|
||||||
self.old_state = self._container_json_to_module_state(self.old_container_json)
|
self.old_state = self._instance_json_to_module_state(self.old_instance_json)
|
||||||
action = getattr(self, LXD_ANSIBLE_STATES[self.state])
|
action = getattr(self, LXD_ANSIBLE_STATES[self.state])
|
||||||
action()
|
action()
|
||||||
|
|
||||||
@@ -674,7 +715,6 @@ def main():
|
|||||||
),
|
),
|
||||||
ignore_volatile_options=dict(
|
ignore_volatile_options=dict(
|
||||||
type='bool',
|
type='bool',
|
||||||
default=True
|
|
||||||
),
|
),
|
||||||
devices=dict(
|
devices=dict(
|
||||||
type='dict',
|
type='dict',
|
||||||
@@ -700,6 +740,11 @@ def main():
|
|||||||
type='int',
|
type='int',
|
||||||
default=30
|
default=30
|
||||||
),
|
),
|
||||||
|
type=dict(
|
||||||
|
type='str',
|
||||||
|
default='container',
|
||||||
|
choices=['container', 'virtual-machine'],
|
||||||
|
),
|
||||||
wait_for_ipv4_addresses=dict(
|
wait_for_ipv4_addresses=dict(
|
||||||
type='bool',
|
type='bool',
|
||||||
default=False
|
default=False
|
||||||
@@ -728,13 +773,17 @@ def main():
|
|||||||
),
|
),
|
||||||
supports_check_mode=False,
|
supports_check_mode=False,
|
||||||
)
|
)
|
||||||
# if module.params['ignore_volatile_options'] is None:
|
|
||||||
# module.params['ignore_volatile_options'] = True
|
if module.params['ignore_volatile_options'] is None:
|
||||||
# module.deprecate(
|
module.params['ignore_volatile_options'] = True
|
||||||
# 'If the keyword "volatile" is used in a playbook in the config section, a
|
module.deprecate(
|
||||||
# "changed" message will appear with every run, even without a change to the playbook.
|
'If the keyword "volatile" is used in a playbook in the config'
|
||||||
# This will change in the future.
|
'section, a "changed" message will appear with every run, even without a change'
|
||||||
# Please test your scripts by "ignore_volatile_options: false"', version='5.0.0', collection_name='community.general')
|
'to the playbook.'
|
||||||
|
'This will change in the future. Please test your scripts'
|
||||||
|
'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"',
|
||||||
|
version='6.0.0', collection_name='community.general')
|
||||||
|
|
||||||
lxd_manage = LXDContainerManagement(module=module)
|
lxd_manage = LXDContainerManagement(module=module)
|
||||||
lxd_manage.run()
|
lxd_manage.run()
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ short_description: management of instances in Proxmox VE cluster
|
|||||||
description:
|
description:
|
||||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||||
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
||||||
- From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
|
- Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
|
||||||
options:
|
options:
|
||||||
password:
|
password:
|
||||||
description:
|
description:
|
||||||
@@ -40,37 +40,27 @@ options:
|
|||||||
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
|
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
|
||||||
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
|
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
|
||||||
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
|
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3).
|
||||||
option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
cores:
|
cores:
|
||||||
description:
|
description:
|
||||||
- Specify number of cores per socket.
|
- Specify number of cores per socket.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
cpus:
|
cpus:
|
||||||
description:
|
description:
|
||||||
- numbers of allocated cpus for instance
|
- numbers of allocated cpus for instance
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
memory:
|
memory:
|
||||||
description:
|
description:
|
||||||
- memory size in MB for instance
|
- memory size in MB for instance
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
|
||||||
option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
swap:
|
swap:
|
||||||
description:
|
description:
|
||||||
- swap memory size in MB for instance
|
- swap memory size in MB for instance
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
|
||||||
option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
netif:
|
netif:
|
||||||
description:
|
description:
|
||||||
@@ -94,9 +84,7 @@ options:
|
|||||||
onboot:
|
onboot:
|
||||||
description:
|
description:
|
||||||
- specifies whether a VM will be started during system bootup
|
- specifies whether a VM will be started during system bootup
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
storage:
|
storage:
|
||||||
description:
|
description:
|
||||||
@@ -106,9 +94,7 @@ options:
|
|||||||
cpuunits:
|
cpuunits:
|
||||||
description:
|
description:
|
||||||
- CPU weight for a VM
|
- CPU weight for a VM
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
|
||||||
option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
nameserver:
|
nameserver:
|
||||||
description:
|
description:
|
||||||
@@ -168,20 +154,38 @@ options:
|
|||||||
version_added: '0.2.0'
|
version_added: '0.2.0'
|
||||||
proxmox_default_behavior:
|
proxmox_default_behavior:
|
||||||
description:
|
description:
|
||||||
- Various module options used to have default values. This cause problems when
|
- As of community.general 4.0.0, various options no longer have default values.
|
||||||
user expects different behavior from proxmox by default or fill options which cause
|
These default values caused problems when users expected different behavior from Proxmox
|
||||||
problems when they have been set.
|
by default or filled options which caused problems when set.
|
||||||
- The default value is C(compatibility), which will ensure that the default values
|
- The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
|
||||||
are used when the values are not explicitly specified by the user.
|
are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
|
||||||
- From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
|
which makes sure these options have no defaults.
|
||||||
deprecation warnings, please set I(proxmox_default_behavior) to an explicit
|
|
||||||
value.
|
|
||||||
- This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
|
- This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
|
||||||
type: str
|
type: str
|
||||||
|
default: no_defaults
|
||||||
choices:
|
choices:
|
||||||
- compatibility
|
- compatibility
|
||||||
- no_defaults
|
- no_defaults
|
||||||
version_added: "1.3.0"
|
version_added: "1.3.0"
|
||||||
|
clone:
|
||||||
|
description:
|
||||||
|
- ID of the container to be cloned.
|
||||||
|
- I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
|
||||||
|
- The type of clone created is defined by the I(clone_type) parameter.
|
||||||
|
- This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
|
||||||
|
type: int
|
||||||
|
version_added: 4.3.0
|
||||||
|
clone_type:
|
||||||
|
description:
|
||||||
|
- Type of the clone created.
|
||||||
|
- C(full) creates a full clone, and I(storage) must be specified.
|
||||||
|
- C(linked) creates a linked clone, and the cloned container must be a template container.
|
||||||
|
- C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
|
||||||
|
I(storage) may be specified, if not it will fall back to the default.
|
||||||
|
type: str
|
||||||
|
choices: ['full', 'linked', 'opportunistic']
|
||||||
|
default: opportunistic
|
||||||
|
version_added: 4.3.0
|
||||||
author: Sergei Antipov (@UnderGreen)
|
author: Sergei Antipov (@UnderGreen)
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.proxmox.documentation
|
- community.general.proxmox.documentation
|
||||||
@@ -307,6 +311,28 @@ EXAMPLES = r'''
|
|||||||
- nesting=1
|
- nesting=1
|
||||||
- mount=cifs,nfs
|
- mount=cifs,nfs
|
||||||
|
|
||||||
|
- name: >
|
||||||
|
Create a linked clone of the template container with id 100. The newly created container with be a
|
||||||
|
linked clone, because no storage parameter is defined
|
||||||
|
community.general.proxmox:
|
||||||
|
vmid: 201
|
||||||
|
node: uk-mc02
|
||||||
|
api_user: root@pam
|
||||||
|
api_password: 1q2w3e
|
||||||
|
api_host: node1
|
||||||
|
clone: 100
|
||||||
|
hostname: clone.example.org
|
||||||
|
|
||||||
|
- name: Create a full clone of the container with id 100
|
||||||
|
community.general.proxmox:
|
||||||
|
vmid: 201
|
||||||
|
node: uk-mc02
|
||||||
|
api_user: root@pam
|
||||||
|
api_password: 1q2w3e
|
||||||
|
api_host: node1
|
||||||
|
clone: 100
|
||||||
|
hostname: clone.example.org
|
||||||
|
storage: local
|
||||||
|
|
||||||
- name: Start container
|
- name: Start container
|
||||||
community.general.proxmox:
|
community.general.proxmox:
|
||||||
@@ -363,7 +389,8 @@ EXAMPLES = r'''
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from proxmoxer import ProxmoxAPI
|
from proxmoxer import ProxmoxAPI
|
||||||
@@ -374,6 +401,10 @@ except ImportError:
|
|||||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils.common.text.converters import to_native
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||||
|
ansible_to_proxmox_bool
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
VZ_TYPE = None
|
VZ_TYPE = None
|
||||||
|
|
||||||
@@ -399,6 +430,13 @@ def content_check(proxmox, node, ostemplate, template_store):
|
|||||||
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
|
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
|
||||||
|
|
||||||
|
|
||||||
|
def is_template_container(proxmox, node, vmid):
|
||||||
|
"""Check if the specified container is a template."""
|
||||||
|
proxmox_node = proxmox.nodes(node)
|
||||||
|
config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
|
||||||
|
return config['template']
|
||||||
|
|
||||||
|
|
||||||
def node_check(proxmox, node):
|
def node_check(proxmox, node):
|
||||||
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
|
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
|
||||||
|
|
||||||
@@ -408,8 +446,10 @@ def proxmox_version(proxmox):
|
|||||||
return LooseVersion(apireturn['version'])
|
return LooseVersion(apireturn['version'])
|
||||||
|
|
||||||
|
|
||||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
|
||||||
proxmox_node = proxmox.nodes(node)
|
proxmox_node = proxmox.nodes(node)
|
||||||
|
|
||||||
|
# Remove all empty kwarg entries
|
||||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||||
|
|
||||||
if VZ_TYPE == 'lxc':
|
if VZ_TYPE == 'lxc':
|
||||||
@@ -429,7 +469,49 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw
|
|||||||
kwargs['cpus'] = cpus
|
kwargs['cpus'] = cpus
|
||||||
kwargs['disk'] = disk
|
kwargs['disk'] = disk
|
||||||
|
|
||||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
if clone is not None:
|
||||||
|
if VZ_TYPE != 'lxc':
|
||||||
|
module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
|
||||||
|
|
||||||
|
clone_is_template = is_template_container(proxmox, node, clone)
|
||||||
|
|
||||||
|
# By default, create a full copy only when the cloned container is not a template.
|
||||||
|
create_full_copy = not clone_is_template
|
||||||
|
|
||||||
|
# Only accept parameters that are compatible with the clone endpoint.
|
||||||
|
valid_clone_parameters = ['hostname', 'pool', 'description']
|
||||||
|
if module.params['storage'] is not None and clone_is_template:
|
||||||
|
# Cloning a template, so create a full copy instead of a linked copy
|
||||||
|
create_full_copy = True
|
||||||
|
elif module.params['storage'] is None and not clone_is_template:
|
||||||
|
# Not cloning a template, but also no defined storage. This isn't possible.
|
||||||
|
module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
|
||||||
|
|
||||||
|
if module.params['clone_type'] == 'linked':
|
||||||
|
if not clone_is_template:
|
||||||
|
module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
|
||||||
|
# Don't need to do more, by default create_full_copy is set to false already
|
||||||
|
elif module.params['clone_type'] == 'opportunistic':
|
||||||
|
if not clone_is_template:
|
||||||
|
# Cloned container is not a template, so we need our 'storage' parameter
|
||||||
|
valid_clone_parameters.append('storage')
|
||||||
|
elif module.params['clone_type'] == 'full':
|
||||||
|
create_full_copy = True
|
||||||
|
valid_clone_parameters.append('storage')
|
||||||
|
|
||||||
|
clone_parameters = {}
|
||||||
|
|
||||||
|
if create_full_copy:
|
||||||
|
clone_parameters['full'] = '1'
|
||||||
|
else:
|
||||||
|
clone_parameters['full'] = '0'
|
||||||
|
for param in valid_clone_parameters:
|
||||||
|
if module.params[param] is not None:
|
||||||
|
clone_parameters[param] = module.params[param]
|
||||||
|
|
||||||
|
taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
|
||||||
|
else:
|
||||||
|
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||||
|
|
||||||
while timeout:
|
while timeout:
|
||||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||||
@@ -529,11 +611,20 @@ def main():
|
|||||||
unprivileged=dict(type='bool', default=False),
|
unprivileged=dict(type='bool', default=False),
|
||||||
description=dict(type='str'),
|
description=dict(type='str'),
|
||||||
hookscript=dict(type='str'),
|
hookscript=dict(type='str'),
|
||||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||||
|
clone=dict(type='int'),
|
||||||
|
clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
|
||||||
),
|
),
|
||||||
required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])],
|
required_if=[
|
||||||
required_together=[('api_token_id', 'api_token_secret')],
|
('state', 'present', ['node', 'hostname']),
|
||||||
|
('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
|
||||||
|
# either clone a container or create a new one from a template file.
|
||||||
|
],
|
||||||
|
required_together=[
|
||||||
|
('api_token_id', 'api_token_secret')
|
||||||
|
],
|
||||||
required_one_of=[('api_password', 'api_token_id')],
|
required_one_of=[('api_password', 'api_token_id')],
|
||||||
|
mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_PROXMOXER:
|
if not HAS_PROXMOXER:
|
||||||
@@ -557,14 +648,8 @@ def main():
|
|||||||
if module.params['ostemplate'] is not None:
|
if module.params['ostemplate'] is not None:
|
||||||
template_store = module.params['ostemplate'].split(":")[0]
|
template_store = module.params['ostemplate'].split(":")[0]
|
||||||
timeout = module.params['timeout']
|
timeout = module.params['timeout']
|
||||||
|
clone = module.params['clone']
|
||||||
|
|
||||||
if module.params['proxmox_default_behavior'] is None:
|
|
||||||
module.params['proxmox_default_behavior'] = 'compatibility'
|
|
||||||
module.deprecate(
|
|
||||||
'The proxmox_default_behavior option will change its default value from "compatibility" to '
|
|
||||||
'"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
|
|
||||||
version='4.0.0', collection_name='community.general'
|
|
||||||
)
|
|
||||||
if module.params['proxmox_default_behavior'] == 'compatibility':
|
if module.params['proxmox_default_behavior'] == 'compatibility':
|
||||||
old_default_values = dict(
|
old_default_values = dict(
|
||||||
disk="3",
|
disk="3",
|
||||||
@@ -605,7 +690,8 @@ def main():
|
|||||||
elif not vmid:
|
elif not vmid:
|
||||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||||
|
|
||||||
if state == 'present':
|
# Create a new container
|
||||||
|
if state == 'present' and clone is None:
|
||||||
try:
|
try:
|
||||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||||
@@ -617,8 +703,11 @@ def main():
|
|||||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||||
% (module.params['ostemplate'], node, template_store))
|
% (module.params['ostemplate'], node, template_store))
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||||
|
|
||||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
try:
|
||||||
|
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
|
||||||
cores=module.params['cores'],
|
cores=module.params['cores'],
|
||||||
pool=module.params['pool'],
|
pool=module.params['pool'],
|
||||||
password=module.params['password'],
|
password=module.params['password'],
|
||||||
@@ -627,20 +716,40 @@ def main():
|
|||||||
netif=module.params['netif'],
|
netif=module.params['netif'],
|
||||||
mounts=module.params['mounts'],
|
mounts=module.params['mounts'],
|
||||||
ip_address=module.params['ip_address'],
|
ip_address=module.params['ip_address'],
|
||||||
onboot=int(module.params['onboot']),
|
onboot=ansible_to_proxmox_bool(module.params['onboot']),
|
||||||
cpuunits=module.params['cpuunits'],
|
cpuunits=module.params['cpuunits'],
|
||||||
nameserver=module.params['nameserver'],
|
nameserver=module.params['nameserver'],
|
||||||
searchdomain=module.params['searchdomain'],
|
searchdomain=module.params['searchdomain'],
|
||||||
force=int(module.params['force']),
|
force=ansible_to_proxmox_bool(module.params['force']),
|
||||||
pubkey=module.params['pubkey'],
|
pubkey=module.params['pubkey'],
|
||||||
features=",".join(module.params['features']) if module.params['features'] is not None else None,
|
features=",".join(module.params['features']) if module.params['features'] is not None else None,
|
||||||
unprivileged=int(module.params['unprivileged']),
|
unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
|
||||||
description=module.params['description'],
|
description=module.params['description'],
|
||||||
hookscript=module.params['hookscript'])
|
hookscript=module.params['hookscript'])
|
||||||
|
|
||||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||||
|
|
||||||
|
# Clone a container
|
||||||
|
elif state == 'present' and clone is not None:
|
||||||
|
try:
|
||||||
|
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||||
|
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||||
|
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||||
|
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||||
|
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||||
|
if not get_instance(proxmox, clone):
|
||||||
|
module.exit_json(changed=False, msg="Container to be cloned does not exist")
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
|
||||||
|
|
||||||
|
module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||||
|
|
||||||
elif state == 'started':
|
elif state == 'started':
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -13,15 +13,13 @@ module: proxmox_kvm
|
|||||||
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||||
description:
|
description:
|
||||||
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||||
- From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
|
- Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
|
||||||
author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
|
author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
|
||||||
options:
|
options:
|
||||||
acpi:
|
acpi:
|
||||||
description:
|
description:
|
||||||
- Specify if ACPI should be enabled/disabled.
|
- Specify if ACPI should be enabled/disabled.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
agent:
|
agent:
|
||||||
description:
|
description:
|
||||||
@@ -31,24 +29,19 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Pass arbitrary arguments to kvm.
|
- Pass arbitrary arguments to kvm.
|
||||||
- This option is for experts only!
|
- This option is for experts only!
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of
|
||||||
option has a default of C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
|
C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
|
||||||
Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
autostart:
|
autostart:
|
||||||
description:
|
description:
|
||||||
- Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
|
- Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
balloon:
|
balloon:
|
||||||
description:
|
description:
|
||||||
- Specify the amount of RAM for the VM in MB.
|
- Specify the amount of RAM for the VM in MB.
|
||||||
- Using zero disables the balloon driver.
|
- Using zero disables the balloon driver.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
|
||||||
option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
bios:
|
bios:
|
||||||
description:
|
description:
|
||||||
@@ -59,9 +52,7 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
|
- Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
|
||||||
- You can combine to set order.
|
- You can combine to set order.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd).
|
||||||
option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
bootdisk:
|
bootdisk:
|
||||||
description:
|
description:
|
||||||
@@ -97,16 +88,12 @@ options:
|
|||||||
cores:
|
cores:
|
||||||
description:
|
description:
|
||||||
- Specify number of cores per socket.
|
- Specify number of cores per socket.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
cpu:
|
cpu:
|
||||||
description:
|
description:
|
||||||
- Specify emulated CPU type.
|
- Specify emulated CPU type.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64).
|
||||||
option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
cpulimit:
|
cpulimit:
|
||||||
description:
|
description:
|
||||||
@@ -117,9 +104,7 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Specify CPU weight for a VM.
|
- Specify CPU weight for a VM.
|
||||||
- You can disable fair-scheduler configuration by setting this to 0
|
- You can disable fair-scheduler configuration by setting this to 0
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
|
||||||
option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
delete:
|
delete:
|
||||||
description:
|
description:
|
||||||
@@ -139,19 +124,15 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Allow to force stop VM.
|
- Allow to force stop VM.
|
||||||
- Can be used with states C(stopped), C(restarted) and C(absent).
|
- Can be used with states C(stopped), C(restarted) and C(absent).
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
format:
|
format:
|
||||||
description:
|
description:
|
||||||
- Target drive's backing file's data format.
|
- Target drive's backing file's data format.
|
||||||
- Used only with clone
|
- Used only with clone
|
||||||
- Use I(format=unspecified) and I(full=false) for a linked clone.
|
- Use I(format=unspecified) and I(full=false) for a linked clone.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2).
|
||||||
option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
|
If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified).
|
||||||
not specifying this option is equivalent to setting it to C(unspecified).
|
|
||||||
Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
|
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
|
||||||
freeze:
|
freeze:
|
||||||
@@ -216,9 +197,7 @@ options:
|
|||||||
kvm:
|
kvm:
|
||||||
description:
|
description:
|
||||||
- Enable/disable KVM hardware virtualization.
|
- Enable/disable KVM hardware virtualization.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
localtime:
|
localtime:
|
||||||
description:
|
description:
|
||||||
@@ -238,9 +217,7 @@ options:
|
|||||||
memory:
|
memory:
|
||||||
description:
|
description:
|
||||||
- Memory size in MB for instance.
|
- Memory size in MB for instance.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
|
||||||
option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
migrate_downtime:
|
migrate_downtime:
|
||||||
description:
|
description:
|
||||||
@@ -296,17 +273,13 @@ options:
|
|||||||
onboot:
|
onboot:
|
||||||
description:
|
description:
|
||||||
- Specifies whether a VM will be started during system bootup.
|
- Specifies whether a VM will be started during system bootup.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
|
||||||
option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
ostype:
|
ostype:
|
||||||
description:
|
description:
|
||||||
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
|
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
|
||||||
- The l26 is Linux 2.6/3.X Kernel.
|
- The l26 is Linux 2.6/3.X Kernel.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26).
|
||||||
option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
|
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
|
||||||
parallel:
|
parallel:
|
||||||
@@ -387,9 +360,7 @@ options:
|
|||||||
sockets:
|
sockets:
|
||||||
description:
|
description:
|
||||||
- Sets the number of CPU sockets. (1 - N).
|
- Sets the number of CPU sockets. (1 - N).
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
|
||||||
option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: int
|
type: int
|
||||||
sshkeys:
|
sshkeys:
|
||||||
description:
|
description:
|
||||||
@@ -421,9 +392,7 @@ options:
|
|||||||
tablet:
|
tablet:
|
||||||
description:
|
description:
|
||||||
- Enables/disables the USB tablet device.
|
- Enables/disables the USB tablet device.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
tags:
|
tags:
|
||||||
description:
|
description:
|
||||||
@@ -445,9 +414,7 @@ options:
|
|||||||
template:
|
template:
|
||||||
description:
|
description:
|
||||||
- Enables/disables the template.
|
- Enables/disables the template.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
|
||||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: bool
|
type: bool
|
||||||
timeout:
|
timeout:
|
||||||
description:
|
description:
|
||||||
@@ -469,9 +436,7 @@ options:
|
|||||||
vga:
|
vga:
|
||||||
description:
|
description:
|
||||||
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
|
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
|
||||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
- This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std).
|
||||||
option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
|
|
||||||
changes in community.general 4.0.0.
|
|
||||||
type: str
|
type: str
|
||||||
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
|
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
|
||||||
virtio:
|
virtio:
|
||||||
@@ -489,18 +454,17 @@ options:
|
|||||||
type: str
|
type: str
|
||||||
proxmox_default_behavior:
|
proxmox_default_behavior:
|
||||||
description:
|
description:
|
||||||
- Various module options used to have default values. This cause problems when
|
- As of community.general 4.0.0, various options no longer have default values.
|
||||||
user expects different behavior from proxmox by default or fill options which cause
|
These default values caused problems when users expected different behavior from Proxmox
|
||||||
problems when they have been set.
|
by default or filled options which caused problems when set.
|
||||||
- The default value is C(compatibility), which will ensure that the default values
|
- The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
|
||||||
are used when the values are not explicitly specified by the user.
|
are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
|
||||||
- From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
|
which makes sure these options have no defaults.
|
||||||
deprecation warnings, please set I(proxmox_default_behavior) to an explicit
|
|
||||||
value.
|
|
||||||
- This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
|
- This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
|
||||||
I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
|
I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
|
||||||
I(tablet), I(template), I(vga), options.
|
I(tablet), I(template), I(vga), options.
|
||||||
type: str
|
type: str
|
||||||
|
default: no_defaults
|
||||||
choices:
|
choices:
|
||||||
- compatibility
|
- compatibility
|
||||||
- no_defaults
|
- no_defaults
|
||||||
@@ -761,9 +725,10 @@ msg:
|
|||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from distutils.version import LooseVersion
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from proxmoxer import ProxmoxAPI
|
from proxmoxer import ProxmoxAPI
|
||||||
HAS_PROXMOXER = True
|
HAS_PROXMOXER = True
|
||||||
@@ -1091,7 +1056,7 @@ def main():
|
|||||||
virtio=dict(type='dict'),
|
virtio=dict(type='dict'),
|
||||||
vmid=dict(type='int'),
|
vmid=dict(type='int'),
|
||||||
watchdog=dict(),
|
watchdog=dict(),
|
||||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||||
),
|
),
|
||||||
mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
|
mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
|
||||||
required_together=[('api_token_id', 'api_token_secret')],
|
required_together=[('api_token_id', 'api_token_secret')],
|
||||||
@@ -1122,13 +1087,6 @@ def main():
|
|||||||
vmid = module.params['vmid']
|
vmid = module.params['vmid']
|
||||||
validate_certs = module.params['validate_certs']
|
validate_certs = module.params['validate_certs']
|
||||||
|
|
||||||
if module.params['proxmox_default_behavior'] is None:
|
|
||||||
module.params['proxmox_default_behavior'] = 'compatibility'
|
|
||||||
module.deprecate(
|
|
||||||
'The proxmox_default_behavior option will change its default value from "compatibility" to '
|
|
||||||
'"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
|
|
||||||
version='4.0.0', collection_name='community.general'
|
|
||||||
)
|
|
||||||
if module.params['proxmox_default_behavior'] == 'compatibility':
|
if module.params['proxmox_default_behavior'] == 'compatibility':
|
||||||
old_default_values = dict(
|
old_default_values = dict(
|
||||||
acpi=True,
|
acpi=True,
|
||||||
|
|||||||
@@ -230,11 +230,12 @@ command:
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import tempfile
|
import tempfile
|
||||||
from distutils.version import LooseVersion
|
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
module = None
|
module = None
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the servers.
|
- Gather information about the servers.
|
||||||
- U(https://www.online.net/en/dedicated-server)
|
- U(https://www.online.net/en/dedicated-server)
|
||||||
author:
|
author:
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.online
|
- community.general.online
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ short_description: Gather information about Online user.
|
|||||||
description:
|
description:
|
||||||
- Gather information about the user.
|
- Gather information about the user.
|
||||||
author:
|
author:
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.online
|
- community.general.online
|
||||||
'''
|
'''
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ EXAMPLES = '''
|
|||||||
register: my_volume
|
register: my_volume
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import pyrax
|
import pyrax
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
|||||||
---
|
---
|
||||||
module: scaleway_compute
|
module: scaleway_compute
|
||||||
short_description: Scaleway compute management module
|
short_description: Scaleway compute management module
|
||||||
author: Remy Leone (@sieben)
|
author: Remy Leone (@remyleone)
|
||||||
description:
|
description:
|
||||||
- "This module manages compute instances on Scaleway."
|
- "This module manages compute instances on Scaleway."
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
@@ -54,8 +54,15 @@ options:
|
|||||||
organization:
|
organization:
|
||||||
type: str
|
type: str
|
||||||
description:
|
description:
|
||||||
- Organization identifier
|
- Organization identifier.
|
||||||
required: true
|
- Exactly one of I(project) and I(organization) must be specified.
|
||||||
|
|
||||||
|
project:
|
||||||
|
type: str
|
||||||
|
description:
|
||||||
|
- Project identifier.
|
||||||
|
- Exactly one of I(project) and I(organization) must be specified.
|
||||||
|
version_added: 4.3.0
|
||||||
|
|
||||||
state:
|
state:
|
||||||
type: str
|
type: str
|
||||||
@@ -132,7 +139,7 @@ EXAMPLES = '''
|
|||||||
name: foobar
|
name: foobar
|
||||||
state: present
|
state: present
|
||||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||||
region: ams1
|
region: ams1
|
||||||
commercial_type: VC1S
|
commercial_type: VC1S
|
||||||
tags:
|
tags:
|
||||||
@@ -144,7 +151,7 @@ EXAMPLES = '''
|
|||||||
name: foobar
|
name: foobar
|
||||||
state: present
|
state: present
|
||||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||||
region: ams1
|
region: ams1
|
||||||
commercial_type: VC1S
|
commercial_type: VC1S
|
||||||
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
|
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
|
||||||
@@ -157,7 +164,7 @@ EXAMPLES = '''
|
|||||||
name: foobar
|
name: foobar
|
||||||
state: absent
|
state: absent
|
||||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||||
region: ams1
|
region: ams1
|
||||||
commercial_type: VC1S
|
commercial_type: VC1S
|
||||||
'''
|
'''
|
||||||
@@ -269,10 +276,15 @@ def create_server(compute_api, server):
|
|||||||
"commercial_type": server["commercial_type"],
|
"commercial_type": server["commercial_type"],
|
||||||
"image": server["image"],
|
"image": server["image"],
|
||||||
"dynamic_ip_required": server["dynamic_ip_required"],
|
"dynamic_ip_required": server["dynamic_ip_required"],
|
||||||
"name": server["name"],
|
"name": server["name"]
|
||||||
"organization": server["organization"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if server["project"]:
|
||||||
|
data["project"] = server["project"]
|
||||||
|
|
||||||
|
if server["organization"]:
|
||||||
|
data["organization"] = server["organization"]
|
||||||
|
|
||||||
if server["security_group"]:
|
if server["security_group"]:
|
||||||
data["security_group"] = server["security_group"]
|
data["security_group"] = server["security_group"]
|
||||||
|
|
||||||
@@ -628,6 +640,7 @@ def core(module):
|
|||||||
"enable_ipv6": module.params["enable_ipv6"],
|
"enable_ipv6": module.params["enable_ipv6"],
|
||||||
"tags": module.params["tags"],
|
"tags": module.params["tags"],
|
||||||
"organization": module.params["organization"],
|
"organization": module.params["organization"],
|
||||||
|
"project": module.params["project"],
|
||||||
"security_group": module.params["security_group"]
|
"security_group": module.params["security_group"]
|
||||||
}
|
}
|
||||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||||
@@ -655,7 +668,8 @@ def main():
|
|||||||
public_ip=dict(default="absent"),
|
public_ip=dict(default="absent"),
|
||||||
state=dict(choices=list(state_strategy.keys()), default='present'),
|
state=dict(choices=list(state_strategy.keys()), default='present'),
|
||||||
tags=dict(type="list", elements="str", default=[]),
|
tags=dict(type="list", elements="str", default=[]),
|
||||||
organization=dict(required=True),
|
organization=dict(),
|
||||||
|
project=dict(),
|
||||||
wait=dict(type="bool", default=False),
|
wait=dict(type="bool", default=False),
|
||||||
wait_timeout=dict(type="int", default=300),
|
wait_timeout=dict(type="int", default=300),
|
||||||
wait_sleep_time=dict(type="int", default=3),
|
wait_sleep_time=dict(type="int", default=3),
|
||||||
@@ -664,6 +678,12 @@ def main():
|
|||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
|
mutually_exclusive=[
|
||||||
|
('organization', 'project'),
|
||||||
|
],
|
||||||
|
required_one_of=[
|
||||||
|
('organization', 'project'),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
core(module)
|
core(module)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway images available.
|
- Gather information about the Scaleway images available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ DOCUMENTATION = '''
|
|||||||
---
|
---
|
||||||
module: scaleway_ip
|
module: scaleway_ip
|
||||||
short_description: Scaleway IP management module
|
short_description: Scaleway IP management module
|
||||||
author: Remy Leone (@sieben)
|
author: Remy Leone (@remyleone)
|
||||||
description:
|
description:
|
||||||
- This module manages IP on Scaleway account
|
- This module manages IP on Scaleway account
|
||||||
U(https://developer.scaleway.com)
|
U(https://developer.scaleway.com)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway ips available.
|
- Gather information about the Scaleway ips available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
|||||||
---
|
---
|
||||||
module: scaleway_lb
|
module: scaleway_lb
|
||||||
short_description: Scaleway load-balancer management module
|
short_description: Scaleway load-balancer management module
|
||||||
author: Remy Leone (@sieben)
|
author: Remy Leone (@remyleone)
|
||||||
description:
|
description:
|
||||||
- "This module manages load-balancers on Scaleway."
|
- "This module manages load-balancers on Scaleway."
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway organizations available.
|
- Gather information about the Scaleway organizations available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
options:
|
options:
|
||||||
api_url:
|
api_url:
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway security groups available.
|
- Gather information about the Scaleway security groups available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
options:
|
options:
|
||||||
region:
|
region:
|
||||||
type: str
|
type: str
|
||||||
|
|||||||
@@ -18,11 +18,12 @@ module: scaleway_security_group_rule
|
|||||||
short_description: Scaleway Security Group Rule management module
|
short_description: Scaleway Security Group Rule management module
|
||||||
author: Antoine Barbare (@abarbare)
|
author: Antoine Barbare (@abarbare)
|
||||||
description:
|
description:
|
||||||
- This module manages Security Group Rule on Scaleway account
|
- This module manages Security Group Rule on Scaleway account
|
||||||
U(https://developer.scaleway.com)
|
U(https://developer.scaleway.com)
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
requirements:
|
||||||
|
- ipaddress
|
||||||
|
|
||||||
options:
|
options:
|
||||||
state:
|
state:
|
||||||
@@ -130,10 +131,19 @@ data:
|
|||||||
}
|
}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
|
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
|
||||||
from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
|
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
from ansible.module_utils.common.text.converters import to_text
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ipaddress import ip_network
|
||||||
|
except ImportError:
|
||||||
|
IPADDRESS_IMP_ERR = traceback.format_exc()
|
||||||
|
HAS_IPADDRESS = False
|
||||||
|
else:
|
||||||
|
HAS_IPADDRESS = True
|
||||||
|
|
||||||
|
|
||||||
def get_sgr_from_api(security_group_rules, security_group_rule):
|
def get_sgr_from_api(security_group_rules, security_group_rule):
|
||||||
@@ -256,6 +266,8 @@ def main():
|
|||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
)
|
)
|
||||||
|
if not HAS_IPADDRESS:
|
||||||
|
module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
|
||||||
|
|
||||||
core(module)
|
core(module)
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway servers available.
|
- Gather information about the Scaleway servers available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway snapshot available.
|
- Gather information about the Scaleway snapshot available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
|||||||
---
|
---
|
||||||
module: scaleway_sshkey
|
module: scaleway_sshkey
|
||||||
short_description: Scaleway SSH keys management module
|
short_description: Scaleway SSH keys management module
|
||||||
author: Remy Leone (@sieben)
|
author: Remy Leone (@remyleone)
|
||||||
description:
|
description:
|
||||||
- This module manages SSH keys on Scaleway account
|
- This module manages SSH keys on Scaleway account
|
||||||
U(https://developer.scaleway.com)
|
U(https://developer.scaleway.com)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
|||||||
---
|
---
|
||||||
module: scaleway_user_data
|
module: scaleway_user_data
|
||||||
short_description: Scaleway user_data management module
|
short_description: Scaleway user_data management module
|
||||||
author: Remy Leone (@sieben)
|
author: Remy Leone (@remyleone)
|
||||||
description:
|
description:
|
||||||
- "This module manages user_data on compute instances on Scaleway."
|
- "This module manages user_data on compute instances on Scaleway."
|
||||||
- "It can be used to configure cloud-init for instance"
|
- "It can be used to configure cloud-init for instance"
|
||||||
@@ -75,7 +75,7 @@ def patch_user_data(compute_api, server_id, key, value):
|
|||||||
compute_api.module.debug("Starting patching user_data attributes")
|
compute_api.module.debug("Starting patching user_data attributes")
|
||||||
|
|
||||||
path = "servers/%s/user_data/%s" % (server_id, key)
|
path = "servers/%s/user_data/%s" % (server_id, key)
|
||||||
response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
|
response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"})
|
||||||
if not response.ok:
|
if not response.ok:
|
||||||
msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
|
msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
|
||||||
compute_api.module.fail_json(msg=msg)
|
compute_api.module.fail_json(msg=msg)
|
||||||
|
|||||||
@@ -51,6 +51,11 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Name used to identify the volume.
|
- Name used to identify the volume.
|
||||||
required: true
|
required: true
|
||||||
|
project:
|
||||||
|
type: str
|
||||||
|
description:
|
||||||
|
- Scaleway project ID to which volume belongs.
|
||||||
|
version_added: 4.3.0
|
||||||
organization:
|
organization:
|
||||||
type: str
|
type: str
|
||||||
description:
|
description:
|
||||||
@@ -71,7 +76,7 @@ EXAMPLES = '''
|
|||||||
name: my-volume
|
name: my-volume
|
||||||
state: present
|
state: present
|
||||||
region: par1
|
region: par1
|
||||||
organization: "{{ scw_org }}"
|
project: "{{ scw_org }}"
|
||||||
"size": 10000000000
|
"size": 10000000000
|
||||||
volume_type: l_ssd
|
volume_type: l_ssd
|
||||||
register: server_creation_check_task
|
register: server_creation_check_task
|
||||||
@@ -93,7 +98,7 @@ data:
|
|||||||
"export_uri": null,
|
"export_uri": null,
|
||||||
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
|
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
|
||||||
"name": "volume-0-3",
|
"name": "volume-0-3",
|
||||||
"organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
"project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||||
"server": null,
|
"server": null,
|
||||||
"size": 10000000000,
|
"size": 10000000000,
|
||||||
"volume_type": "l_ssd"
|
"volume_type": "l_ssd"
|
||||||
@@ -106,31 +111,37 @@ from ansible.module_utils.basic import AnsibleModule
|
|||||||
|
|
||||||
|
|
||||||
def core(module):
|
def core(module):
|
||||||
|
region = module.params["region"]
|
||||||
state = module.params['state']
|
state = module.params['state']
|
||||||
name = module.params['name']
|
name = module.params['name']
|
||||||
organization = module.params['organization']
|
organization = module.params['organization']
|
||||||
|
project = module.params['project']
|
||||||
size = module.params['size']
|
size = module.params['size']
|
||||||
volume_type = module.params['volume_type']
|
volume_type = module.params['volume_type']
|
||||||
|
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||||
|
|
||||||
account_api = Scaleway(module)
|
account_api = Scaleway(module)
|
||||||
response = account_api.get('volumes')
|
response = account_api.get('volumes')
|
||||||
status_code = response.status_code
|
status_code = response.status_code
|
||||||
volumes_json = response.json
|
volumes_json = response.json
|
||||||
|
|
||||||
|
if project is None:
|
||||||
|
project = organization
|
||||||
|
|
||||||
if not response.ok:
|
if not response.ok:
|
||||||
module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
|
module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
|
||||||
status_code, response.json['message']))
|
status_code, response.json['message']))
|
||||||
|
|
||||||
volumeByName = None
|
volumeByName = None
|
||||||
for volume in volumes_json['volumes']:
|
for volume in volumes_json['volumes']:
|
||||||
if volume['organization'] == organization and volume['name'] == name:
|
if volume['project'] == project and volume['name'] == name:
|
||||||
volumeByName = volume
|
volumeByName = volume
|
||||||
|
|
||||||
if state in ('present',):
|
if state in ('present',):
|
||||||
if volumeByName is not None:
|
if volumeByName is not None:
|
||||||
module.exit_json(changed=False)
|
module.exit_json(changed=False)
|
||||||
|
|
||||||
payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
|
payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type}
|
||||||
|
|
||||||
response = account_api.post('/volumes', payload)
|
response = account_api.post('/volumes', payload)
|
||||||
|
|
||||||
@@ -161,6 +172,7 @@ def main():
|
|||||||
state=dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
name=dict(required=True),
|
name=dict(required=True),
|
||||||
size=dict(type='int'),
|
size=dict(type='int'),
|
||||||
|
project=dict(),
|
||||||
organization=dict(),
|
organization=dict(),
|
||||||
volume_type=dict(),
|
volume_type=dict(),
|
||||||
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
|
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
|
||||||
@@ -168,6 +180,12 @@ def main():
|
|||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
|
mutually_exclusive=[
|
||||||
|
('organization', 'project'),
|
||||||
|
],
|
||||||
|
required_one_of=[
|
||||||
|
('organization', 'project'),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
core(module)
|
core(module)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ description:
|
|||||||
- Gather information about the Scaleway volumes available.
|
- Gather information about the Scaleway volumes available.
|
||||||
author:
|
author:
|
||||||
- "Yanis Guenane (@Spredzy)"
|
- "Yanis Guenane (@Spredzy)"
|
||||||
- "Remy Leone (@sieben)"
|
- "Remy Leone (@remyleone)"
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.scaleway
|
- community.general.scaleway
|
||||||
|
|
||||||
|
|||||||
@@ -117,9 +117,10 @@ state:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from distutils.version import LooseVersion
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||||
|
|
||||||
|
|
||||||
PACKAGE_STATE_MAP = dict(
|
PACKAGE_STATE_MAP = dict(
|
||||||
present="--install",
|
present="--install",
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ extends_documentation_fragment:
|
|||||||
- community.general.redis.documentation
|
- community.general.redis.documentation
|
||||||
|
|
||||||
seealso:
|
seealso:
|
||||||
|
- module: community.general.redis_data_incr
|
||||||
- module: community.general.redis_data_info
|
- module: community.general.redis_data_info
|
||||||
- module: community.general.redis
|
- module: community.general.redis
|
||||||
'''
|
'''
|
||||||
|
|||||||
187
plugins/modules/database/misc/redis_data_incr.py
Normal file
187
plugins/modules/database/misc/redis_data_incr.py
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: redis_data_incr
|
||||||
|
short_description: Increment keys in Redis
|
||||||
|
version_added: 4.0.0
|
||||||
|
description:
|
||||||
|
- Increment integers or float keys in Redis database and get new value.
|
||||||
|
- Default increment for all keys is 1. For specific increments use the
|
||||||
|
I(increment_int) and I(increment_float) options.
|
||||||
|
- When using I(check_mode) the module will try to calculate the value that
|
||||||
|
Redis would return. If the key is not present, 0.0 is used as value.
|
||||||
|
author: "Andreas Botzner (@paginabianca)"
|
||||||
|
options:
|
||||||
|
key:
|
||||||
|
description:
|
||||||
|
- Database key.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
increment_int:
|
||||||
|
description:
|
||||||
|
- Integer amount to increment the key by.
|
||||||
|
required: false
|
||||||
|
type: int
|
||||||
|
increment_float:
|
||||||
|
description:
|
||||||
|
- Float amount to increment the key by.
|
||||||
|
- This only works with keys that contain float values
|
||||||
|
in their string representation.
|
||||||
|
type: float
|
||||||
|
required: false
|
||||||
|
|
||||||
|
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- community.general.redis.documentation
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- For C(check_mode) to work, the specified I(redis_user) needs permission to
|
||||||
|
run the C(GET) command on the key, otherwise the module will fail.
|
||||||
|
|
||||||
|
seealso:
|
||||||
|
- module: community.general.redis_data
|
||||||
|
- module: community.general.redis_data_info
|
||||||
|
- module: community.general.redis
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Increment integer key foo on localhost with no username and print new value
|
||||||
|
community.general.redis_data_incr:
|
||||||
|
login_host: localhost
|
||||||
|
login_password: supersecret
|
||||||
|
key: foo
|
||||||
|
increment_int: 1
|
||||||
|
register: result
|
||||||
|
- name: Print new value
|
||||||
|
debug:
|
||||||
|
var: result.value
|
||||||
|
|
||||||
|
- name: Increment float key foo by 20.4
|
||||||
|
community.general.redis_data_incr:
|
||||||
|
login_host: redishost
|
||||||
|
login_user: redisuser
|
||||||
|
login_password: somepass
|
||||||
|
key: foo
|
||||||
|
increment_float: '20.4'
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
value:
|
||||||
|
description: Incremented value of key
|
||||||
|
returned: on success
|
||||||
|
type: float
|
||||||
|
sample: '4039.4'
|
||||||
|
msg:
|
||||||
|
description: A short message.
|
||||||
|
returned: always
|
||||||
|
type: str
|
||||||
|
sample: 'Incremented key: foo by 20.4 to 65.9'
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||||
|
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
redis_auth_args = redis_auth_argument_spec()
|
||||||
|
module_args = dict(
|
||||||
|
key=dict(type='str', required=True, no_log=False),
|
||||||
|
increment_int=dict(type='int', required=False),
|
||||||
|
increment_float=dict(type='float', required=False),
|
||||||
|
)
|
||||||
|
module_args.update(redis_auth_args)
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=module_args,
|
||||||
|
supports_check_mode=True,
|
||||||
|
mutually_exclusive=[['increment_int', 'increment_float']],
|
||||||
|
)
|
||||||
|
fail_imports(module)
|
||||||
|
|
||||||
|
redis = RedisAnsible(module)
|
||||||
|
key = module.params['key']
|
||||||
|
increment_float = module.params['increment_float']
|
||||||
|
increment_int = module.params['increment_int']
|
||||||
|
increment = 1
|
||||||
|
if increment_float is not None:
|
||||||
|
increment = increment_float
|
||||||
|
elif increment_int is not None:
|
||||||
|
increment = increment_int
|
||||||
|
|
||||||
|
result = {'changed': False}
|
||||||
|
if module.check_mode:
|
||||||
|
value = 0.0
|
||||||
|
try:
|
||||||
|
res = redis.connection.get(key)
|
||||||
|
if res is not None:
|
||||||
|
value = float(res)
|
||||||
|
except ValueError as e:
|
||||||
|
msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format(
|
||||||
|
res, key)
|
||||||
|
result['msg'] = msg
|
||||||
|
module.fail_json(**result)
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||||
|
key, str(e))
|
||||||
|
result['msg'] = msg
|
||||||
|
module.fail_json(**result)
|
||||||
|
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||||
|
key, increment, value + increment)
|
||||||
|
result['msg'] = msg
|
||||||
|
result['value'] = float(value + increment)
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
if increment_float is not None:
|
||||||
|
try:
|
||||||
|
value = redis.connection.incrbyfloat(key, increment)
|
||||||
|
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||||
|
key, increment, value)
|
||||||
|
result['msg'] = msg
|
||||||
|
result['value'] = float(value)
|
||||||
|
result['changed'] = True
|
||||||
|
module.exit_json(**result)
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
|
||||||
|
key, increment, str(e))
|
||||||
|
result['msg'] = msg
|
||||||
|
module.fail_json(**result)
|
||||||
|
elif increment_int is not None:
|
||||||
|
try:
|
||||||
|
value = redis.connection.incrby(key, increment)
|
||||||
|
msg = 'Incremented key: {0} by {1} to {2}'.format(
|
||||||
|
key, increment, value)
|
||||||
|
result['msg'] = msg
|
||||||
|
result['value'] = float(value)
|
||||||
|
result['changed'] = True
|
||||||
|
module.exit_json(**result)
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
|
||||||
|
key, increment, str(e))
|
||||||
|
result['msg'] = msg
|
||||||
|
module.fail_json(**result)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
value = redis.connection.incr(key)
|
||||||
|
msg = 'Incremented key: {0} to {1}'.format(key, value)
|
||||||
|
result['msg'] = msg
|
||||||
|
result['value'] = float(value)
|
||||||
|
result['changed'] = True
|
||||||
|
module.exit_json(**result)
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to increment key: {0} with exception: {1}'.format(
|
||||||
|
key, str(e))
|
||||||
|
result['msg'] = msg
|
||||||
|
module.fail_json(**result)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -26,6 +26,8 @@ extends_documentation_fragment:
|
|||||||
- community.general.redis
|
- community.general.redis
|
||||||
|
|
||||||
seealso:
|
seealso:
|
||||||
|
- module: community.general.redis_data
|
||||||
|
- module: community.general.redis_data_incr
|
||||||
- module: community.general.redis_info
|
- module: community.general.redis_info
|
||||||
- module: community.general.redis
|
- module: community.general.redis
|
||||||
'''
|
'''
|
||||||
|
|||||||
301
plugins/modules/database/mssql/mssql_script.py
Normal file
301
plugins/modules/database/mssql/mssql_script.py
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright: (c) 2021, Kris Budde <kris@budd.ee
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
---
|
||||||
|
module: mssql_script
|
||||||
|
|
||||||
|
short_description: Execute SQL scripts on a MSSQL database
|
||||||
|
|
||||||
|
version_added: "4.0.0"
|
||||||
|
|
||||||
|
description:
|
||||||
|
- Execute SQL scripts on a MSSQL database.
|
||||||
|
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description: Database to run script against.
|
||||||
|
aliases: [ db ]
|
||||||
|
default: ''
|
||||||
|
type: str
|
||||||
|
login_user:
|
||||||
|
description: The username used to authenticate with.
|
||||||
|
type: str
|
||||||
|
login_password:
|
||||||
|
description: The password used to authenticate with.
|
||||||
|
type: str
|
||||||
|
login_host:
|
||||||
|
description: Host running the database.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
login_port:
|
||||||
|
description: Port of the MSSQL server. Requires I(login_host) be defined as well.
|
||||||
|
default: 1433
|
||||||
|
type: int
|
||||||
|
script:
|
||||||
|
description:
|
||||||
|
- The SQL script to be executed.
|
||||||
|
- Script can contain multiple SQL statements. Multiple Batches can be separated by C(GO) command.
|
||||||
|
- Each batch must return at least one result set.
|
||||||
|
required: true
|
||||||
|
type: str
|
||||||
|
output:
|
||||||
|
description:
|
||||||
|
- With C(default) each row will be returned as a list of values. See C(query_results).
|
||||||
|
- Output format C(dict) will return dictionary with the column names as keys. See C(query_results_dict).
|
||||||
|
- C(dict) requires named columns to be returned by each query otherwise an error is thrown.
|
||||||
|
choices: [ "dict", "default" ]
|
||||||
|
default: 'default'
|
||||||
|
type: str
|
||||||
|
params:
|
||||||
|
description: |
|
||||||
|
Parameters passed to the script as SQL parameters. ('SELECT %(name)s"' with C(example: '{"name": "John Doe"}).)'
|
||||||
|
type: dict
|
||||||
|
notes:
|
||||||
|
- Requires the pymssql Python package on the remote host. For Ubuntu, this
|
||||||
|
is as easy as C(pip install pymssql) (See M(ansible.builtin.pip).)
|
||||||
|
requirements:
|
||||||
|
- python >= 2.7
|
||||||
|
- pymssql
|
||||||
|
|
||||||
|
author:
|
||||||
|
- Kris Budde (@kbudde)
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = r'''
|
||||||
|
- name: Check DB connection
|
||||||
|
community.general.mssql_script:
|
||||||
|
login_user: "{{ mssql_login_user }}"
|
||||||
|
login_password: "{{ mssql_login_password }}"
|
||||||
|
login_host: "{{ mssql_host }}"
|
||||||
|
login_port: "{{ mssql_port }}"
|
||||||
|
db: master
|
||||||
|
script: "SELECT 1"
|
||||||
|
|
||||||
|
- name: Query with parameter
|
||||||
|
community.general.mssql_script:
|
||||||
|
login_user: "{{ mssql_login_user }}"
|
||||||
|
login_password: "{{ mssql_login_password }}"
|
||||||
|
login_host: "{{ mssql_host }}"
|
||||||
|
login_port: "{{ mssql_port }}"
|
||||||
|
script: |
|
||||||
|
SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s
|
||||||
|
params:
|
||||||
|
dbname: msdb
|
||||||
|
register: result_params
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result_params.query_results[0][0][0][0] == 'msdb'
|
||||||
|
- result_params.query_results[0][0][0][1] == 'ONLINE'
|
||||||
|
|
||||||
|
- name: two batches with default output
|
||||||
|
community.general.mssql_script:
|
||||||
|
login_user: "{{ mssql_login_user }}"
|
||||||
|
login_password: "{{ mssql_login_password }}"
|
||||||
|
login_host: "{{ mssql_host }}"
|
||||||
|
login_port: "{{ mssql_port }}"
|
||||||
|
script: |
|
||||||
|
SELECT 'Batch 0 - Select 0'
|
||||||
|
SELECT 'Batch 0 - Select 1'
|
||||||
|
GO
|
||||||
|
SELECT 'Batch 1 - Select 0'
|
||||||
|
register: result_batches
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result_batches.query_results | length == 2 # two batch results
|
||||||
|
- result_batches.query_results[0] | length == 2 # two selects in first batch
|
||||||
|
- result_batches.query_results[0][0] | length == 1 # one row in first select
|
||||||
|
- result_batches.query_results[0][0][0] | length == 1 # one column in first row
|
||||||
|
- result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values.
|
||||||
|
|
||||||
|
- name: two batches with dict output
|
||||||
|
community.general.mssql_script:
|
||||||
|
login_user: "{{ mssql_login_user }}"
|
||||||
|
login_password: "{{ mssql_login_password }}"
|
||||||
|
login_host: "{{ mssql_host }}"
|
||||||
|
login_port: "{{ mssql_port }}"
|
||||||
|
output: dict
|
||||||
|
script: |
|
||||||
|
SELECT 'Batch 0 - Select 0' as b0s0
|
||||||
|
SELECT 'Batch 0 - Select 1' as b0s1
|
||||||
|
GO
|
||||||
|
SELECT 'Batch 1 - Select 0' as b1s0
|
||||||
|
register: result_batches_dict
|
||||||
|
- assert:
|
||||||
|
that:
|
||||||
|
- result_batches_dict.query_results_dict | length == 2 # two batch results
|
||||||
|
- result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch
|
||||||
|
- result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select
|
||||||
|
- result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = r'''
|
||||||
|
query_results:
|
||||||
|
description: List of batches (queries separated by C(GO) keyword).
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
returned: success and I(output=default)
|
||||||
|
sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
|
||||||
|
contains:
|
||||||
|
queries:
|
||||||
|
description:
|
||||||
|
- List of result sets of each query.
|
||||||
|
- If a query returns no results, the results of this and all the following queries will not be included in the output.
|
||||||
|
- Use the C(GO) keyword in I(script) to separate queries.
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
contains:
|
||||||
|
rows:
|
||||||
|
description: List of rows returned by query.
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
contains:
|
||||||
|
column_value:
|
||||||
|
description:
|
||||||
|
- List of column values.
|
||||||
|
- Any non-standard JSON type is converted to string.
|
||||||
|
type: list
|
||||||
|
example: ["Batch 0 - Select 0"]
|
||||||
|
returned: success, if output is default
|
||||||
|
query_results_dict:
|
||||||
|
description: List of batches (queries separated by C(GO) keyword).
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
returned: success and I(output=dict)
|
||||||
|
sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
|
||||||
|
contains:
|
||||||
|
queries:
|
||||||
|
description:
|
||||||
|
- List of result sets of each query.
|
||||||
|
- If a query returns no results, the results of this and all the following queries will not be included in the output.
|
||||||
|
Use 'GO' keyword to separate queries.
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
contains:
|
||||||
|
rows:
|
||||||
|
description: List of rows returned by query.
|
||||||
|
type: list
|
||||||
|
elements: list
|
||||||
|
contains:
|
||||||
|
column_dict:
|
||||||
|
description:
|
||||||
|
- Dictionary of column names and values.
|
||||||
|
- Any non-standard JSON type is converted to string.
|
||||||
|
type: dict
|
||||||
|
example: {"col_name": "Batch 0 - Select 0"}
|
||||||
|
returned: success, if output is dict
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||||
|
import traceback
|
||||||
|
import json
|
||||||
|
PYMSSQL_IMP_ERR = None
|
||||||
|
try:
|
||||||
|
import pymssql
|
||||||
|
except ImportError:
|
||||||
|
PYMSSQL_IMP_ERR = traceback.format_exc()
|
||||||
|
MSSQL_FOUND = False
|
||||||
|
else:
|
||||||
|
MSSQL_FOUND = True
|
||||||
|
|
||||||
|
|
||||||
|
def clean_output(o):
|
||||||
|
return str(o)
|
||||||
|
|
||||||
|
|
||||||
|
def run_module():
|
||||||
|
module_args = dict(
|
||||||
|
name=dict(required=False, aliases=['db'], default=''),
|
||||||
|
login_user=dict(),
|
||||||
|
login_password=dict(no_log=True),
|
||||||
|
login_host=dict(required=True),
|
||||||
|
login_port=dict(type='int', default=1433),
|
||||||
|
script=dict(required=True),
|
||||||
|
output=dict(default='default', choices=['dict', 'default']),
|
||||||
|
params=dict(type='dict'),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = dict(
|
||||||
|
changed=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=module_args,
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
if not MSSQL_FOUND:
|
||||||
|
module.fail_json(msg=missing_required_lib(
|
||||||
|
'pymssql'), exception=PYMSSQL_IMP_ERR)
|
||||||
|
|
||||||
|
db = module.params['name']
|
||||||
|
login_user = module.params['login_user']
|
||||||
|
login_password = module.params['login_password']
|
||||||
|
login_host = module.params['login_host']
|
||||||
|
login_port = module.params['login_port']
|
||||||
|
script = module.params['script']
|
||||||
|
output = module.params['output']
|
||||||
|
sql_params = module.params['params']
|
||||||
|
|
||||||
|
login_querystring = login_host
|
||||||
|
if login_port != 1433:
|
||||||
|
login_querystring = "%s:%s" % (login_host, login_port)
|
||||||
|
|
||||||
|
if login_user is not None and login_password is None:
|
||||||
|
module.fail_json(
|
||||||
|
msg="when supplying login_user argument, login_password must also be provided")
|
||||||
|
|
||||||
|
try:
|
||||||
|
conn = pymssql.connect(
|
||||||
|
user=login_user, password=login_password, host=login_querystring, database=db)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
except Exception as e:
|
||||||
|
if "Unknown database" in str(e):
|
||||||
|
errno, errstr = e.args
|
||||||
|
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
|
||||||
|
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
|
||||||
|
|
||||||
|
conn.autocommit(True)
|
||||||
|
|
||||||
|
query_results_key = 'query_results'
|
||||||
|
if output == 'dict':
|
||||||
|
cursor = conn.cursor(as_dict=True)
|
||||||
|
query_results_key = 'query_results_dict'
|
||||||
|
|
||||||
|
queries = script.split('\nGO\n')
|
||||||
|
result['changed'] = True
|
||||||
|
if module.check_mode:
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
query_results = []
|
||||||
|
try:
|
||||||
|
for query in queries:
|
||||||
|
cursor.execute(query, sql_params)
|
||||||
|
qry_result = []
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
while rows:
|
||||||
|
qry_result.append(rows)
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
query_results.append(qry_result)
|
||||||
|
except Exception as e:
|
||||||
|
return module.fail_json(msg="query failed", query=query, error=str(e), **result)
|
||||||
|
|
||||||
|
# ensure that the result is json serializable
|
||||||
|
qry_results = json.loads(json.dumps(query_results, default=clean_output))
|
||||||
|
|
||||||
|
result[query_results_key] = qry_results
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
run_module()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
1
plugins/modules/dnf_versionlock.py
Symbolic link
1
plugins/modules/dnf_versionlock.py
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
./packaging/os/dnf_versionlock.py
|
||||||
1
plugins/modules/dnsimple_info.py
Symbolic link
1
plugins/modules/dnsimple_info.py
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
./net_tools/dnsimple_info.py
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user