mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 09:56:53 +00:00
Compare commits
434 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
026a54a634 | ||
|
|
6b8698a402 | ||
|
|
225818b1d4 | ||
|
|
02cd2e9252 | ||
|
|
f6edbe1f98 | ||
|
|
973da9670f | ||
|
|
e5a5e5a3ff | ||
|
|
7279ea33dd | ||
|
|
7b302f1d06 | ||
|
|
46b60dfdca | ||
|
|
9df16f405b | ||
|
|
af5920a4f2 | ||
|
|
2295837edf | ||
|
|
099e17d408 | ||
|
|
9943c564d5 | ||
|
|
8dd0a14666 | ||
|
|
35442f9e28 | ||
|
|
9101fb5046 | ||
|
|
5f968f609b | ||
|
|
3ed0a9c141 | ||
|
|
7ef0d9371b | ||
|
|
d053b6a1dc | ||
|
|
44e6f3bd97 | ||
|
|
fcf9e08dfa | ||
|
|
9b973ce633 | ||
|
|
0bc29c344d | ||
|
|
cc52e194be | ||
|
|
9e112272be | ||
|
|
8361aecf15 | ||
|
|
ebe214074e | ||
|
|
8db1ffeb49 | ||
|
|
4ea6ad08fa | ||
|
|
5fb79182e9 | ||
|
|
01f50d249a | ||
|
|
5d3ea0977a | ||
|
|
0c213d76c5 | ||
|
|
98e0f0b6ff | ||
|
|
421b5253f8 | ||
|
|
5e1ff39c8d | ||
|
|
a87aa9e360 | ||
|
|
744c92ae0d | ||
|
|
5392e0f1cc | ||
|
|
d64e1e8e64 | ||
|
|
7d3f6be2a2 | ||
|
|
624c5f7b68 | ||
|
|
3db3608335 | ||
|
|
af4ac4813e | ||
|
|
5dc60cbc9e | ||
|
|
8f6ea3de8e | ||
|
|
e4765f2e2b | ||
|
|
a91255824e | ||
|
|
e60daa8509 | ||
|
|
fd2528dc3c | ||
|
|
72b282fe85 | ||
|
|
cb49b96b4d | ||
|
|
2aca1a910c | ||
|
|
c44f07a405 | ||
|
|
e113c8cf11 | ||
|
|
552e1e826c | ||
|
|
516107aaf6 | ||
|
|
fb7797702a | ||
|
|
21478e3be0 | ||
|
|
3e6974815f | ||
|
|
737789b1e3 | ||
|
|
3176c08b5b | ||
|
|
40e0379112 | ||
|
|
e3a12f1e54 | ||
|
|
3e740112a0 | ||
|
|
8ce81d24f2 | ||
|
|
ba7f680486 | ||
|
|
637eb8a275 | ||
|
|
7e608db3e5 | ||
|
|
afb16dc0a2 | ||
|
|
6d03887c55 | ||
|
|
f2d980f75a | ||
|
|
d0b27d68e2 | ||
|
|
70747b0db8 | ||
|
|
c9728c2869 | ||
|
|
adced191b0 | ||
|
|
adda78b162 | ||
|
|
5e010994d9 | ||
|
|
51bf8ed298 | ||
|
|
c3b1a3a873 | ||
|
|
5da6dc7d72 | ||
|
|
d96600d4de | ||
|
|
1f5703d0af | ||
|
|
170a099101 | ||
|
|
04df03f365 | ||
|
|
76f9bc5b69 | ||
|
|
f9fdf816eb | ||
|
|
7ff20a5e98 | ||
|
|
865a8e9961 | ||
|
|
f61a6bd991 | ||
|
|
f4bae2d717 | ||
|
|
66e3c19a57 | ||
|
|
24210b32cf | ||
|
|
6932a937c5 | ||
|
|
a467da2f39 | ||
|
|
7c76d92ed0 | ||
|
|
74c7cee446 | ||
|
|
7b532be10d | ||
|
|
38616e43f9 | ||
|
|
dfac632d25 | ||
|
|
270754dc10 | ||
|
|
fef77b3c9c | ||
|
|
b022b87362 | ||
|
|
2c92db98d5 | ||
|
|
b66df6932e | ||
|
|
5060f19a05 | ||
|
|
99652cb06d | ||
|
|
6aeeab18c2 | ||
|
|
2a2bfb6c5b | ||
|
|
773ab9ba25 | ||
|
|
4fde0617c8 | ||
|
|
8cc343110f | ||
|
|
37feac8f68 | ||
|
|
7392a8e52c | ||
|
|
bfebc93f15 | ||
|
|
b6e822aad2 | ||
|
|
2a6f91b4e4 | ||
|
|
8d94d16eec | ||
|
|
f2c08bebd6 | ||
|
|
e7f66d9bc1 | ||
|
|
21e402e2bb | ||
|
|
6d4c5b76a4 | ||
|
|
a1fd642008 | ||
|
|
8298b2c7c1 | ||
|
|
25ff8d4179 | ||
|
|
145ceb693b | ||
|
|
40d094e63a | ||
|
|
6988ea052d | ||
|
|
f26883f45f | ||
|
|
03b312c0ae | ||
|
|
a634cc2928 | ||
|
|
14f23fbebe | ||
|
|
77aabcd8f5 | ||
|
|
3a1f23323c | ||
|
|
6ccb9a9813 | ||
|
|
e6e4260926 | ||
|
|
7111edd631 | ||
|
|
a84a004308 | ||
|
|
ac5bbe666e | ||
|
|
0273420e70 | ||
|
|
ce8d9d56ca | ||
|
|
7f6623657f | ||
|
|
ccdec10c67 | ||
|
|
09abc09b52 | ||
|
|
c87ba3a626 | ||
|
|
cda6248cea | ||
|
|
2cbd8ba71e | ||
|
|
eb2b8f4409 | ||
|
|
a27b1a135d | ||
|
|
db6cb07028 | ||
|
|
6ccffc3de5 | ||
|
|
17cc574b04 | ||
|
|
1f2c352b83 | ||
|
|
b90f87f3d8 | ||
|
|
b23fdc3be3 | ||
|
|
bc83586c15 | ||
|
|
b765938b79 | ||
|
|
4ed5177d60 | ||
|
|
88ac419c0e | ||
|
|
cc63dd884c | ||
|
|
d817fc7215 | ||
|
|
64897d762c | ||
|
|
64cbf1900b | ||
|
|
7e23ef3801 | ||
|
|
27fc80895c | ||
|
|
0754449d60 | ||
|
|
460cd523fe | ||
|
|
ad2d899713 | ||
|
|
3182be1a2f | ||
|
|
b726110f1f | ||
|
|
24a4d6e685 | ||
|
|
1badcffe1c | ||
|
|
b87196348a | ||
|
|
21423ca6a0 | ||
|
|
4926f15d86 | ||
|
|
9466103a4a | ||
|
|
b26df2a008 | ||
|
|
cba4fa2fe8 | ||
|
|
833530ab47 | ||
|
|
1f0361a1c6 | ||
|
|
4df53dbacf | ||
|
|
f25519e308 | ||
|
|
1bb47ad73e | ||
|
|
fd3e84fcd6 | ||
|
|
f75471e7d2 | ||
|
|
aadf1d4f6a | ||
|
|
f5ff54979a | ||
|
|
957a74b525 | ||
|
|
29afed337a | ||
|
|
e748acdd51 | ||
|
|
eed45fe6aa | ||
|
|
cce52e1812 | ||
|
|
0bcce340ed | ||
|
|
da8a11b8d8 | ||
|
|
9ed0603072 | ||
|
|
e2fa11b381 | ||
|
|
36f7ff15e9 | ||
|
|
8eac491057 | ||
|
|
607f3d83a0 | ||
|
|
d6cd90838f | ||
|
|
22e0a6dac7 | ||
|
|
3751e188ca | ||
|
|
cd4f3ca445 | ||
|
|
1d05f81e53 | ||
|
|
4ef80ecd46 | ||
|
|
68e184eba8 | ||
|
|
5dcd2c7df5 | ||
|
|
134a0dc7e2 | ||
|
|
13e3e176fb | ||
|
|
eb98be580d | ||
|
|
49ef8b1900 | ||
|
|
19d22d605a | ||
|
|
f17b10bfa2 | ||
|
|
258eb68022 | ||
|
|
264c98189c | ||
|
|
7aec01190a | ||
|
|
00fd2847e4 | ||
|
|
94ea18f1cb | ||
|
|
0b42aca72f | ||
|
|
2658bf31cd | ||
|
|
869e1a1eab | ||
|
|
d25b6e7681 | ||
|
|
8beb5d70c5 | ||
|
|
f9fecf12e7 | ||
|
|
b165337bbe | ||
|
|
6572f46998 | ||
|
|
b4ae2ce44d | ||
|
|
baec510c40 | ||
|
|
96cda3a48a | ||
|
|
9dc2e2d032 | ||
|
|
86c0af6cbb | ||
|
|
f5a2d32caa | ||
|
|
c11b7b4c86 | ||
|
|
cdbcf8abc7 | ||
|
|
2e781bf8c4 | ||
|
|
8f588ac745 | ||
|
|
7cd9b839c5 | ||
|
|
e4651c1bf3 | ||
|
|
0342cf1730 | ||
|
|
269df75421 | ||
|
|
bc32914424 | ||
|
|
21ad7c8bfa | ||
|
|
ae3fe51929 | ||
|
|
5d6047a21a | ||
|
|
c0d11f631f | ||
|
|
173c8b1dfa | ||
|
|
005db8177e | ||
|
|
8595601708 | ||
|
|
7229ef4ac4 | ||
|
|
5170c26ffd | ||
|
|
64049a35de | ||
|
|
3427e02fce | ||
|
|
f2e1abfe5e | ||
|
|
fc0d813d5f | ||
|
|
b51e008358 | ||
|
|
b32b69742b | ||
|
|
73ee9702db | ||
|
|
1cddae2265 | ||
|
|
ea2df93116 | ||
|
|
e646d21935 | ||
|
|
f522802977 | ||
|
|
3710f28efb | ||
|
|
2f24bcef8f | ||
|
|
6223b755e1 | ||
|
|
813c6e232d | ||
|
|
3e4aae56bd | ||
|
|
b099a465c5 | ||
|
|
d2b0a0d2bf | ||
|
|
fe0a5fd85f | ||
|
|
f3a2750205 | ||
|
|
5657889b28 | ||
|
|
1b2fbd72de | ||
|
|
5a2e7f9b1b | ||
|
|
c5ff49db56 | ||
|
|
43dc6ba533 | ||
|
|
eb1f0c28a9 | ||
|
|
41993d44e2 | ||
|
|
9345db2001 | ||
|
|
527417dce9 | ||
|
|
7bca37a098 | ||
|
|
4f18ecacfe | ||
|
|
cf17033278 | ||
|
|
1c07537dd5 | ||
|
|
a886633758 | ||
|
|
2e11c2dfa4 | ||
|
|
43e599abb1 | ||
|
|
ef11714708 | ||
|
|
6cd2b4f93b | ||
|
|
a509c08b3a | ||
|
|
4f036cc7f8 | ||
|
|
bc2403042a | ||
|
|
f67708f91c | ||
|
|
2587a2588d | ||
|
|
5a93168d88 | ||
|
|
e962da8b9d | ||
|
|
0d598c3542 | ||
|
|
7b73a83b91 | ||
|
|
82cdc354fa | ||
|
|
a125458748 | ||
|
|
df59034d75 | ||
|
|
6a029bcba3 | ||
|
|
1790dbd5e1 | ||
|
|
7933fe7ea3 | ||
|
|
45990b464e | ||
|
|
22a400d626 | ||
|
|
203747027e | ||
|
|
ffd51d1e22 | ||
|
|
4e13c8b969 | ||
|
|
bccf77a461 | ||
|
|
6b3c797bf6 | ||
|
|
a81e94ddc7 | ||
|
|
e56dafde94 | ||
|
|
767a296b60 | ||
|
|
963bbaccb7 | ||
|
|
9358640ed9 | ||
|
|
2846242e95 | ||
|
|
ce934aa49b | ||
|
|
083bd49976 | ||
|
|
2cc72c2213 | ||
|
|
1f4a98c8cc | ||
|
|
53b2d69bd7 | ||
|
|
981c7849ce | ||
|
|
258471b267 | ||
|
|
3020b305bb | ||
|
|
66cbd926f2 | ||
|
|
37fb2137b3 | ||
|
|
f083a0f4e7 | ||
|
|
9dc82793c4 | ||
|
|
aab93949e1 | ||
|
|
c8d6181f64 | ||
|
|
c286758248 | ||
|
|
6e685e740e | ||
|
|
695599e7d5 | ||
|
|
29e7fae303 | ||
|
|
303000c1a1 | ||
|
|
371ffaeabe | ||
|
|
42854887eb | ||
|
|
5386f7d8cd | ||
|
|
e86fcf76fc | ||
|
|
4d2895676f | ||
|
|
97b3ad6843 | ||
|
|
d7ecd40118 | ||
|
|
fbf3b85d6b | ||
|
|
1bb1e882df | ||
|
|
a7dbefcaf1 | ||
|
|
9d5490e510 | ||
|
|
2245742255 | ||
|
|
6058a5e5b1 | ||
|
|
224567e604 | ||
|
|
1eb872ccea | ||
|
|
ba18d6232a | ||
|
|
6115f18837 | ||
|
|
3f4e3dd2b6 | ||
|
|
a573661458 | ||
|
|
0977152b39 | ||
|
|
91fe881226 | ||
|
|
b78c0cf64f | ||
|
|
c79ccbbf7e | ||
|
|
a80761a8a4 | ||
|
|
213419fb0c | ||
|
|
39a1f280e3 | ||
|
|
a13d407247 | ||
|
|
ba3903e6e0 | ||
|
|
4b6b00d249 | ||
|
|
0a0b0cb42d | ||
|
|
d0b39271b3 | ||
|
|
f07cb76b09 | ||
|
|
09031fc9e6 | ||
|
|
4481d0a4a9 | ||
|
|
5861388f11 | ||
|
|
c581daa48a | ||
|
|
75e2de3581 | ||
|
|
6c7bee1225 | ||
|
|
eafcdfbceb | ||
|
|
82a764446b | ||
|
|
a0032f3513 | ||
|
|
8444367cd0 | ||
|
|
de5fbe457f | ||
|
|
40b35acee2 | ||
|
|
9835deb17f | ||
|
|
6fe9cf11f1 | ||
|
|
d3ebdd2874 | ||
|
|
4275bfe87b | ||
|
|
2f87b8c63f | ||
|
|
100fffb4c1 | ||
|
|
1206900488 | ||
|
|
c28ae26636 | ||
|
|
e1e626cdcb | ||
|
|
f8d35eeb14 | ||
|
|
c44298c437 | ||
|
|
1b580476a8 | ||
|
|
44d2d62d38 | ||
|
|
82b2d294b7 | ||
|
|
812fbef786 | ||
|
|
9d795c334b | ||
|
|
512d412eb4 | ||
|
|
8f0ee6966f | ||
|
|
3af9e39043 | ||
|
|
7b78512c59 | ||
|
|
9f0913bf73 | ||
|
|
aea851018b | ||
|
|
69c79f618e | ||
|
|
6a51ba5169 | ||
|
|
52e8e7e928 | ||
|
|
d71b90be07 | ||
|
|
1293b0ac91 | ||
|
|
d4330a3e54 | ||
|
|
7c3bf118ca | ||
|
|
742b438b32 | ||
|
|
c59ebc82cd | ||
|
|
36a4fbfe60 | ||
|
|
ad3e7bc21d | ||
|
|
66a7fdfeba | ||
|
|
77e25a7503 | ||
|
|
05ac79cee6 | ||
|
|
22b4fc8d8b | ||
|
|
6252f3bded | ||
|
|
15c4e08295 | ||
|
|
a77bb090b3 | ||
|
|
99958cea49 | ||
|
|
b2802dc8eb | ||
|
|
8617568146 | ||
|
|
7569d6315c | ||
|
|
ab5143b15d | ||
|
|
a71ff614f0 | ||
|
|
bfd3e15074 | ||
|
|
90b815de50 | ||
|
|
056e16baa5 | ||
|
|
890cf636a0 | ||
|
|
8c2b2aeb83 | ||
|
|
3c597d8b04 |
@@ -1,9 +0,0 @@
|
||||
<!--
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
-->
|
||||
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
@@ -1,488 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 8 * * *
|
||||
displayName: Nightly (main)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- cron: 0 10 * * *
|
||||
displayName: Nightly (active stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-7
|
||||
- stable-6
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-5
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/general
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:3.0.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
### Sanity
|
||||
- stage: Sanity_devel
|
||||
displayName: Sanity devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: devel/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_15
|
||||
displayName: Sanity 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.15/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.14/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.13/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_15
|
||||
displayName: Units 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/units/{0}/1
|
||||
targets:
|
||||
- test: "3.10"
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.8
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel_extra_vms
|
||||
displayName: Remote devel extra VMs
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: Alpine 3.17
|
||||
test: alpine/3.17
|
||||
# - name: Fedora 37
|
||||
# test: fedora/37
|
||||
# - name: Ubuntu 20.04
|
||||
# test: ubuntu/20.04
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu/22.04
|
||||
groups:
|
||||
- vm
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: macOS 13.2
|
||||
test: macos/13.2
|
||||
- name: RHEL 9.1
|
||||
test: rhel/9.1
|
||||
- name: FreeBSD 13.2
|
||||
test: freebsd/13.2
|
||||
- name: FreeBSD 12.4
|
||||
test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_15
|
||||
displayName: Remote 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/{0}
|
||||
targets:
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/{0}
|
||||
targets:
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 12.3
|
||||
test: freebsd/12.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_15
|
||||
displayName: Docker 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
displayName: Docker (community images) devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.11
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Generic
|
||||
- stage: Generic_devel
|
||||
displayName: Generic devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_15
|
||||
displayName: Generic 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/generic/{0}/1
|
||||
targets:
|
||||
- test: '3.10'
|
||||
- stage: Generic_2_13
|
||||
displayName: Generic 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_12
|
||||
displayName: Generic 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Sanity_2_15
|
||||
- Units_devel
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Units_2_15
|
||||
- Remote_devel_extra_vms
|
||||
- Remote_devel
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Remote_2_15
|
||||
- Docker_devel
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_2_15
|
||||
- Docker_community_devel
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_12
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
# - Generic_2_15
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
||||
@@ -1,105 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing as t
|
||||
import urllib.request
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CoverageFile:
|
||||
name: str
|
||||
path: pathlib.Path
|
||||
flags: t.List[str]
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Args:
|
||||
dry_run: bool
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
def parse_args() -> Args:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||
parser.add_argument('path', type=pathlib.Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store arguments in a typed dataclass
|
||||
fields = dataclasses.fields(Args)
|
||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||
|
||||
return Args(**kwargs)
|
||||
|
||||
|
||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||
processed = []
|
||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||
name = file.stem.replace('coverage=', '')
|
||||
|
||||
# Get flags from name
|
||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||
|
||||
processed.append(CoverageFile(name, file, flags))
|
||||
|
||||
return tuple(processed)
|
||||
|
||||
|
||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||
for file in files:
|
||||
cmd = [
|
||||
str(codecov_bin),
|
||||
'--name', file.name,
|
||||
'--file', str(file.path),
|
||||
]
|
||||
for flag in file.flags:
|
||||
cmd.extend(['--flags', flag])
|
||||
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would run command: {cmd}')
|
||||
continue
|
||||
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||
return
|
||||
|
||||
with urllib.request.urlopen(url) as resp:
|
||||
with dest.open('w+b') as f:
|
||||
# Read data in chunks rather than all at once
|
||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||
|
||||
dest.chmod(flags)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||
|
||||
files = process_files(args.path)
|
||||
upload_files(codecov_bin, files, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
||||
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||
@@ -1,29 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
@@ -1,60 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
2013
.github/BOTMETA.yml
vendored
2013
.github/BOTMETA.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -47,7 +47,7 @@ body:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*. Do not include `community.general.`!
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -46,8 +46,8 @@ body:
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the file, module, plugin, task or feature below,
|
||||
*use your best guess if unsure*. Do not include `community.general.`!
|
||||
Write the short name of the rst file, module, plugin, task or
|
||||
feature below, *use your best guess if unsure*.
|
||||
placeholder: mysql_user
|
||||
validations:
|
||||
required: true
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
4
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -42,8 +42,8 @@ body:
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
|
||||
*use your best guess if unsure*. Do not include `community.general.`!
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
32
.github/pull_request_template.md
vendored
32
.github/pull_request_template.md
vendored
@@ -1,32 +0,0 @@
|
||||
##### SUMMARY
|
||||
<!--- Describe the change below, including rationale and design decisions -->
|
||||
|
||||
<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
|
||||
|
||||
<!--- Please do not forget to include a changelog fragment:
|
||||
https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-changelog-fragments
|
||||
No need to include one for docs-only or test-only PR, and for new plugin/module PRs.
|
||||
Read about more details in CONTRIBUTING.md.
|
||||
-->
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one or more below and delete the rest.
|
||||
'Test Pull Request' is for PRs that add/extend tests without code changes. -->
|
||||
- Bugfix Pull Request
|
||||
- Docs Pull Request
|
||||
- Feature Pull Request
|
||||
- New Module/Plugin Pull Request
|
||||
- Refactoring Pull Request
|
||||
- Test Pull Request
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!--- Write the SHORT NAME of the module, plugin, task or feature below. -->
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
<!--- Include additional information to help people understand the change here -->
|
||||
<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
|
||||
|
||||
<!--- Paste verbatim command output below, e.g. before and after your change -->
|
||||
```paste below
|
||||
|
||||
```
|
||||
3
.github/pull_request_template.md.license
vendored
3
.github/pull_request_template.md.license
vendored
@@ -1,3 +0,0 @@
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
193
.github/workflows/ansible-test.yml
vendored
193
.github/workflows/ansible-test.yml
vendored
@@ -1,193 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
|
||||
# https://github.com/marketplace/actions/ansible-test
|
||||
|
||||
name: EOL CI
|
||||
on:
|
||||
# Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run EOL CI once per day (at 08:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
|
||||
concurrency:
|
||||
# Make sure there is at most one active run per PR, but do not cancel any non-PR runs
|
||||
group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sanity:
|
||||
name: EOL Sanity (Ⓐ${{ matrix.ansible }})
|
||||
strategy:
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.11'
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Perform sanity testing
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
testing-type: sanity
|
||||
|
||||
units:
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
|
||||
strategy:
|
||||
# As soon as the first unit test fails, cancel the others to free up the CI queue
|
||||
fail-fast: true
|
||||
matrix:
|
||||
ansible:
|
||||
- ''
|
||||
python:
|
||||
- ''
|
||||
exclude:
|
||||
- ansible: ''
|
||||
include:
|
||||
- ansible: '2.11'
|
||||
python: '2.7'
|
||||
- ansible: '2.11'
|
||||
python: '3.5'
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
Perform unit testing against
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pre-test-cmd: >-
|
||||
mkdir -p ../../ansible
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
|
||||
pull-request-change-detection: 'true'
|
||||
target-python-version: ${{ matrix.python }}
|
||||
testing-type: units
|
||||
|
||||
integration:
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ansible:
|
||||
- ''
|
||||
docker:
|
||||
- ''
|
||||
python:
|
||||
- ''
|
||||
target:
|
||||
- ''
|
||||
exclude:
|
||||
- ansible: ''
|
||||
include:
|
||||
# 2.11
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '2.7'
|
||||
# target: azp/generic/1/
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '3.5'
|
||||
# target: azp/generic/2/
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
Perform integration testing against
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
under Python ${{ matrix.python }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
docker-image: ${{ matrix.docker }}
|
||||
integration-continue-on-error: 'false'
|
||||
integration-diff: 'false'
|
||||
integration-retry-on-error: 'true'
|
||||
pre-test-cmd: >-
|
||||
mkdir -p ../../ansible
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
|
||||
pull-request-change-detection: 'true'
|
||||
target: ${{ matrix.target }}
|
||||
target-python-version: ${{ matrix.python }}
|
||||
testing-type: integration
|
||||
36
.github/workflows/codeql-analysis.yml
vendored
36
.github/workflows/codeql-analysis.yml
vendored
@@ -1,36 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '26 19 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
permissions:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/autobuild to send a status report
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: python
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
35
.github/workflows/reuse.yml
vendored
35
.github/workflows/reuse.yml
vendored
@@ -1,35 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Verify REUSE
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [main]
|
||||
# Run CI once per day (at 07:30 UTC)
|
||||
schedule:
|
||||
- cron: '30 7 * * *'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha || '' }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install reuse
|
||||
|
||||
- name: Check REUSE compliance
|
||||
run: |
|
||||
reuse lint
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
args: [--fix=lf]
|
||||
- id: fix-encoding-pragma
|
||||
- id: check-ast
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
||||
rev: v1.9.0
|
||||
hooks:
|
||||
- id: rst-backticks
|
||||
types: [file]
|
||||
files: changelogs/fragments/.*\.(yml|yaml)$
|
||||
1191
CHANGELOG.rst
1191
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -31,7 +31,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
@@ -112,28 +112,22 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
||||
which run in CI.
|
||||
|
||||
4. Action plugins need to be accompanied by a module, even if the module file only contains documentation
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and add a redirect entry
|
||||
in `meta/runtime.yml`. For example, for the `aerospike_migrations` module located in
|
||||
`plugins/modules/database/aerospike/aerospike_migrations.py`, you need to create the following entry:
|
||||
```.yaml
|
||||
aerospike_migrations:
|
||||
redirect: community.general.database.aerospike.aerospike_migrations
|
||||
```
|
||||
Here, the relative path `database/aerospike/` is inserted into the module's FQCN (Fully Qualified Collection Name) after the
|
||||
collection's name and before the module's name. This must not be done for other plugin types but modules and action plugins!
|
||||
|
||||
- Action plugins need to be accompanied by a module, even if the module file only contains documentation
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
|
||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
|
||||
|
||||
## pre-commit
|
||||
|
||||
To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which
|
||||
corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see
|
||||
the [Installation](#installation) section below.
|
||||
|
||||
This is optional and not required to contribute to this repository.
|
||||
|
||||
### Installation
|
||||
|
||||
Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`.
|
||||
|
||||
This is optional to run it locally.
|
||||
|
||||
You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`.
|
||||
|
||||
28
README.md
28
README.md
@@ -6,7 +6,7 @@ SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://github.com/ansible-collections/community.general/actions)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
@@ -24,7 +24,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14, and ansible-core 2.15 releases. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
|
||||
Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
|
||||
|
||||
@@ -34,13 +34,13 @@ Some modules and plugins require external libraries. Please check the requiremen
|
||||
|
||||
## Included content
|
||||
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
|
||||
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool:
|
||||
|
||||
ansible-galaxy collection install community.general
|
||||
|
||||
@@ -57,7 +57,7 @@ Note that if you install the collection manually, it will not be upgraded automa
|
||||
ansible-galaxy collection install community.general --upgrade
|
||||
```
|
||||
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/):
|
||||
|
||||
```bash
|
||||
ansible-galaxy collection install community.general:==X.Y.Z
|
||||
@@ -65,6 +65,10 @@ ansible-galaxy collection install community.general:==X.Y.Z
|
||||
|
||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||
|
||||
### FQCNs for modules and actions
|
||||
|
||||
⚠️ The collection uses a similar directory structure for modules as the Ansible repository used for Ansible 2.9 and before. This directory structure was never exposed to the user. Due to changes in community.general 5.0.0 (using `meta/runtime.yml` redirects instead of symbolic links) some tooling started exposing the internal module names to end-users. These **internal names**, like `community.general.system.ufw` for the UFW firewall managing module, do work, but should be avoided since they are treated as an implementation detail that can change at any time, even in bugfix releases. Always use the three-component FQCN form, for example `community.general.ufw` for the UFW module. ⚠️
|
||||
|
||||
## Contributing to this collection
|
||||
|
||||
The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
|
||||
@@ -73,13 +77,13 @@ We are actively accepting new contributors.
|
||||
|
||||
All types of contributions are very welcome.
|
||||
|
||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-7/CONTRIBUTING.md)!
|
||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)!
|
||||
|
||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-7/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-7/CONTRIBUTING.md).
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Running tests
|
||||
|
||||
@@ -89,7 +93,7 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
||||
|
||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-7/commit-rights.md).
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||
|
||||
It is necessary for maintainers of this collection to be subscribed to:
|
||||
@@ -117,7 +121,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-7/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-5/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
@@ -136,8 +140,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues
|
||||
|
||||
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
|
||||
|
||||
See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-7/COPYING) for the full text.
|
||||
See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text.
|
||||
|
||||
Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-7/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-7/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-7/LICENSES/PSF-2.0.txt).
|
||||
Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt).
|
||||
|
||||
All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 7.0.0
|
||||
version: 5.8.10
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
5189
meta/runtime.yml
5189
meta/runtime.yml
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,6 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
@@ -81,6 +80,13 @@ class ActionModule(ActionBase):
|
||||
getattr(self, default_value))))
|
||||
return value
|
||||
|
||||
def get_shutdown_command_args(self, distribution):
|
||||
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
|
||||
# Convert seconds to minutes. If less that 60, set it to 0.
|
||||
delay_sec = self.delay
|
||||
shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
|
||||
return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
|
||||
|
||||
def get_distribution(self, task_vars):
|
||||
# FIXME: only execute the module if we don't already have the facts we need
|
||||
distribution = {}
|
||||
@@ -95,8 +101,7 @@ class ActionModule(ActionBase):
|
||||
to_native(module_output['module_stdout']).strip(),
|
||||
to_native(module_output['module_stderr']).strip()))
|
||||
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
|
||||
distribution['version'] = to_text(
|
||||
module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
|
||||
distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
|
||||
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
|
||||
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
|
||||
return distribution
|
||||
@@ -104,23 +109,6 @@ class ActionModule(ActionBase):
|
||||
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
|
||||
|
||||
def get_shutdown_command(self, task_vars, distribution):
|
||||
def find_command(command, find_search_paths):
|
||||
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
|
||||
action=self._task.action,
|
||||
command=command,
|
||||
paths=find_search_paths))
|
||||
find_result = self._execute_module(
|
||||
task_vars=task_vars,
|
||||
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
|
||||
module_name='ansible.legacy.find',
|
||||
module_args={
|
||||
'paths': find_search_paths,
|
||||
'patterns': [command],
|
||||
'file_type': 'any'
|
||||
}
|
||||
)
|
||||
return [x['path'] for x in find_result['files']]
|
||||
|
||||
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
|
||||
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
|
||||
search_paths = self._task.args.get('search_paths', default_search_paths)
|
||||
@@ -139,53 +127,45 @@ class ActionModule(ActionBase):
|
||||
except TypeError:
|
||||
raise AnsibleError(err_msg.format(search_paths))
|
||||
|
||||
full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command
|
||||
if not full_path: # if we could not find the shutdown command
|
||||
display.vvv('Unable to find command "{0}" in search paths: {1}, will attempt a shutdown using systemd '
|
||||
'directly.'.format(shutdown_bin, search_paths)) # tell the user we will try with systemd
|
||||
systemctl_search_paths = ['/bin', '/usr/bin']
|
||||
full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command
|
||||
if not full_path: # if we couldn't find systemctl
|
||||
raise AnsibleError(
|
||||
'Could not find command "{0}" in search paths: {1} or systemctl command in search paths: {2}, unable to shutdown.'.
|
||||
format(shutdown_bin, search_paths, systemctl_search_paths)) # we give up here
|
||||
else:
|
||||
return "{0} poweroff".format(full_path[0]) # done, since we cannot use args with systemd shutdown
|
||||
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
|
||||
action=self._task.action,
|
||||
command=shutdown_bin,
|
||||
paths=search_paths))
|
||||
find_result = self._execute_module(
|
||||
task_vars=task_vars,
|
||||
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
|
||||
module_name='ansible.legacy.find',
|
||||
module_args={
|
||||
'paths': search_paths,
|
||||
'patterns': [shutdown_bin],
|
||||
'file_type': 'any'
|
||||
}
|
||||
)
|
||||
|
||||
# systemd case taken care of, here we add args to the command
|
||||
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
|
||||
# Convert seconds to minutes. If less that 60, set it to 0.
|
||||
delay_sec = self.delay
|
||||
shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
|
||||
return '{0} {1}'. \
|
||||
format(
|
||||
full_path[0],
|
||||
args.format(
|
||||
delay_sec=delay_sec,
|
||||
delay_min=delay_sec // 60,
|
||||
message=shutdown_message
|
||||
)
|
||||
)
|
||||
full_path = [x['path'] for x in find_result['files']]
|
||||
if not full_path:
|
||||
raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
|
||||
self._shutdown_command = full_path[0]
|
||||
return self._shutdown_command
|
||||
|
||||
def perform_shutdown(self, task_vars, distribution):
|
||||
result = {}
|
||||
shutdown_result = {}
|
||||
shutdown_command_exec = self.get_shutdown_command(task_vars, distribution)
|
||||
shutdown_command = self.get_shutdown_command(task_vars, distribution)
|
||||
shutdown_command_args = self.get_shutdown_command_args(distribution)
|
||||
shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
|
||||
|
||||
self.cleanup(force=True)
|
||||
try:
|
||||
display.vvv("{action}: shutting down server...".format(action=self._task.action))
|
||||
display.debug("{action}: shutting down server with command '{command}'".
|
||||
format(action=self._task.action, command=shutdown_command_exec))
|
||||
display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
|
||||
if self._play_context.check_mode:
|
||||
shutdown_result['rc'] = 0
|
||||
else:
|
||||
shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
|
||||
except AnsibleConnectionFailure as e:
|
||||
# If the connection is closed too quickly due to the system being shutdown, carry on
|
||||
display.debug(
|
||||
'{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action,
|
||||
error=to_text(e)))
|
||||
display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
|
||||
shutdown_result['rc'] = 0
|
||||
|
||||
if shutdown_result['rc'] != 0:
|
||||
@@ -49,9 +49,8 @@ options:
|
||||
sender:
|
||||
description:
|
||||
- Mail sender.
|
||||
- This is required since community.general 6.0.0.
|
||||
- Note that this will be required from community.general 6.0.0 on.
|
||||
type: str
|
||||
required: true
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: sender
|
||||
@@ -105,6 +104,10 @@ class CallbackModule(CallbackBase):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
self.sender = self.get_option('sender')
|
||||
if self.sender is None:
|
||||
self._display.deprecated(
|
||||
'The sender for the mail callback has not been specified. This will be an error in the future',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
self.to = self.get_option('to')
|
||||
self.smtphost = self.get_option('mta')
|
||||
self.smtpport = self.get_option('mtaport')
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
description: Can run in C(check_mode) and return changed status prediction without modifying target.
|
||||
diff_mode:
|
||||
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||
'''
|
||||
|
||||
PLATFORM = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
platform:
|
||||
description: Target OS/families that can be operated against.
|
||||
support: N/A
|
||||
'''
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
INFO_MODULE = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
'''
|
||||
|
||||
CONN = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
become:
|
||||
description: Is usable alongside C(become) keywords.
|
||||
connection:
|
||||
description: Uses the target's configured connection information to execute code on it.
|
||||
delegation:
|
||||
description: Can be used in conjunction with C(delegate_to) and related keywords.
|
||||
'''
|
||||
|
||||
FACTS = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
facts:
|
||||
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
|
||||
'''
|
||||
|
||||
# Should be used together with the standard fragment and the FACTS fragment
|
||||
FACTS_MODULE = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
facts:
|
||||
support: full
|
||||
'''
|
||||
|
||||
FILES = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
safe_file_operations:
|
||||
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
|
||||
'''
|
||||
|
||||
FLOW = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
action:
|
||||
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
|
||||
async:
|
||||
description: Supports being used with the C(async) keyword.
|
||||
'''
|
||||
@@ -27,10 +27,8 @@ options:
|
||||
description:
|
||||
- The username.
|
||||
- If not set the environment variable C(BITBUCKET_USERNAME) will be used.
|
||||
- I(username) is an alias of I(user) since community.genreal 6.0.0. It was an alias of I(workspace) before.
|
||||
type: str
|
||||
version_added: 4.0.0
|
||||
aliases: [ username ]
|
||||
password:
|
||||
description:
|
||||
- The App password.
|
||||
|
||||
@@ -29,7 +29,8 @@ options:
|
||||
required: true
|
||||
|
||||
requirements:
|
||||
- hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk).
|
||||
- hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
|
||||
- WSAPI service should be enabled on the 3PAR storage array.
|
||||
notes:
|
||||
- check_mode not supported
|
||||
'''
|
||||
|
||||
@@ -24,11 +24,6 @@ options:
|
||||
- The password to use with I(bind_dn).
|
||||
type: str
|
||||
default: ''
|
||||
ca_path:
|
||||
description:
|
||||
- Set the path to PEM file with CA certs.
|
||||
type: path
|
||||
version_added: "6.5.0"
|
||||
dn:
|
||||
required: true
|
||||
description:
|
||||
@@ -70,15 +65,4 @@ options:
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
version_added: "2.0.0"
|
||||
xorder_discovery:
|
||||
description:
|
||||
- Set the behavior on how to process Xordered DNs.
|
||||
- C(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
|
||||
- C(disable) will always use the DN unmodified (as passed by the I(dn) parameter).
|
||||
- C(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
|
||||
- Possible choices are C(enable), C(auto), C(disable).
|
||||
type: str
|
||||
choices: ['enable', 'auto', 'disable']
|
||||
default: auto
|
||||
version_added: "6.4.0"
|
||||
'''
|
||||
|
||||
@@ -105,10 +105,6 @@ options:
|
||||
- Whether or not to require SSL validation of API endpoints.
|
||||
type: bool
|
||||
aliases: [ verify_ssl ]
|
||||
deprecated:
|
||||
removed_in: 9.0.0
|
||||
why: This module relies on the deprecated package pyrax.
|
||||
alternative: Use the Openstack modules instead.
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- pyrax
|
||||
|
||||
@@ -138,14 +138,8 @@ def jc_filter(data, parser, quiet=True, raw=False):
|
||||
raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
|
||||
|
||||
try:
|
||||
# new API (jc v1.18.0 and higher) allows use of plugin parsers
|
||||
if hasattr(jc, 'parse'):
|
||||
return jc.parse(parser, data, quiet=quiet, raw=raw)
|
||||
|
||||
# old API (jc v1.17.7 and lower)
|
||||
else:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
return jc_parser.parse(data, quiet=quiet, raw=raw)
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
return jc_parser.parse(data, quiet=quiet, raw=raw)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
|
||||
|
||||
@@ -121,8 +121,11 @@ compose:
|
||||
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.template import Templar
|
||||
|
||||
|
||||
try:
|
||||
@@ -141,14 +144,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
def _build_client(self, loader):
|
||||
"""Build the Linode client."""
|
||||
|
||||
t = Templar(loader=loader)
|
||||
|
||||
access_token = self.get_option('access_token')
|
||||
if self.templar.is_template(access_token):
|
||||
access_token = self.templar.template(variable=access_token, disable_lookups=False)
|
||||
if t.is_template(access_token):
|
||||
access_token = t.template(variable=access_token, disable_lookups=False)
|
||||
|
||||
if access_token is None:
|
||||
try:
|
||||
access_token = os.environ['LINODE_ACCESS_TOKEN']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if access_token is None:
|
||||
raise AnsibleError((
|
||||
'Could not retrieve Linode access token '
|
||||
'from plugin configuration sources'
|
||||
'from plugin configuration or environment'
|
||||
))
|
||||
|
||||
self.client = LinodeClient(access_token)
|
||||
|
||||
@@ -55,11 +55,6 @@ DOCUMENTATION = r'''
|
||||
type: str
|
||||
default: none
|
||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||
project:
|
||||
description: Filter the instance according to the given project.
|
||||
type: str
|
||||
default: default
|
||||
version_added: 6.2.0
|
||||
type_filter:
|
||||
description:
|
||||
- Filter the instances by type C(virtual-machine), C(container) or C(both).
|
||||
@@ -145,9 +140,6 @@ groupby:
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
projectInternals:
|
||||
type: project
|
||||
attribute: internals
|
||||
'''
|
||||
|
||||
import json
|
||||
@@ -159,7 +151,6 @@ from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||
|
||||
try:
|
||||
@@ -337,15 +328,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
# "status_code": 200,
|
||||
# "type": "sync"
|
||||
# }
|
||||
url = '/1.0/instances'
|
||||
if self.project:
|
||||
url = url + '?{0}'.format(urlencode(dict(project=self.project)))
|
||||
|
||||
instances = self.socket.do('GET', url)
|
||||
|
||||
if self.project:
|
||||
return [m.split('/')[3].split('?')[0] for m in instances['metadata']]
|
||||
|
||||
instances = self.socket.do('GET', '/1.0/instances')
|
||||
return [m.split('/')[3] for m in instances['metadata']]
|
||||
|
||||
def _get_config(self, branch, name):
|
||||
@@ -366,11 +349,9 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
dict(config): Config of the instance"""
|
||||
config = {}
|
||||
if isinstance(branch, (tuple, list)):
|
||||
config[name] = {branch[1]: self.socket.do(
|
||||
'GET', '/1.0/{0}/{1}/{2}?{3}'.format(to_native(branch[0]), to_native(name), to_native(branch[1]), urlencode(dict(project=self.project))))}
|
||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||
else:
|
||||
config[name] = {branch: self.socket.do(
|
||||
'GET', '/1.0/{0}/{1}?{2}'.format(to_native(branch), to_native(name), urlencode(dict(project=self.project))))}
|
||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||
return config
|
||||
|
||||
def get_instance_data(self, names):
|
||||
@@ -600,8 +581,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
|
||||
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
|
||||
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
|
||||
self._set_data_entry(instance_name, 'project', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/project'.format(instance_name)))
|
||||
|
||||
def build_inventory_network(self, instance_name):
|
||||
"""Add the network interfaces of the instance to the inventory
|
||||
@@ -705,8 +684,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
# add VLAN_ID information
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
|
||||
# add project
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_project', self._get_data_entry('inventory/{0}/project'.format(instance_name)))
|
||||
|
||||
def build_inventory_groups_location(self, group_name):
|
||||
"""create group by attribute: location
|
||||
@@ -782,28 +759,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
# Ignore invalid IP addresses returned by lxd
|
||||
pass
|
||||
|
||||
def build_inventory_groups_project(self, group_name):
|
||||
"""create group by attribute: project
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_os(self, group_name):
|
||||
"""create group by attribute: os
|
||||
|
||||
@@ -942,7 +897,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
* 'profile'
|
||||
* 'vlanid'
|
||||
* 'type'
|
||||
* 'project'
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
@@ -970,8 +924,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.build_inventory_groups_vlanid(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'type':
|
||||
self.build_inventory_groups_type(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'project':
|
||||
self.build_inventory_groups_project(group_name)
|
||||
else:
|
||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||
|
||||
@@ -1078,7 +1030,6 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
try:
|
||||
self.client_key = self.get_option('client_key')
|
||||
self.client_cert = self.get_option('client_cert')
|
||||
self.project = self.get_option('project')
|
||||
self.debug = self.DEBUG
|
||||
self.data = {} # store for inventory-data
|
||||
self.groupby = self.get_option('groupby')
|
||||
|
||||
@@ -30,27 +30,12 @@ DOCUMENTATION = '''
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: true
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_ADDRESS
|
||||
version_added: 6.6.0
|
||||
exclude:
|
||||
description:
|
||||
- List of addresses to exclude.
|
||||
- For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
|
||||
description: list of addresses to exclude
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_EXCLUDE
|
||||
version_added: 6.6.0
|
||||
port:
|
||||
description:
|
||||
- Only scan specific port or port range (C(-p)).
|
||||
- For example, you could pass C(22) for a single port, C(1-65535) for a range of ports,
|
||||
or C(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
|
||||
type: string
|
||||
version_added: 6.5.0
|
||||
ports:
|
||||
description: Enable/disable scanning ports.
|
||||
description: Enable/disable scanning for open ports
|
||||
type: boolean
|
||||
default: true
|
||||
ipv4:
|
||||
@@ -61,30 +46,6 @@ DOCUMENTATION = '''
|
||||
description: use IPv6 type addresses
|
||||
type: boolean
|
||||
default: true
|
||||
udp_scan:
|
||||
description:
|
||||
- Scan via UDP.
|
||||
- Depending on your system you might need I(sudo=true) for this to work.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
icmp_timestamp:
|
||||
description:
|
||||
- Scan via ICMP Timestamp (C(-PP)).
|
||||
- Depending on your system you might need I(sudo=true) for this to work.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
open:
|
||||
description: Only scan for open (or possibly open) ports.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
dns_resolve:
|
||||
description: Whether to always (C(true)) or never (C(false)) do DNS resolution.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
notes:
|
||||
- At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
|
||||
- 'TODO: add OS fingerprinting'
|
||||
@@ -101,14 +62,6 @@ plugin: community.general.nmap
|
||||
sudo: true
|
||||
strict: false
|
||||
address: 192.168.0.0/24
|
||||
|
||||
# an nmap scan specifying ports and classifying results to an inventory group
|
||||
plugin: community.general.nmap
|
||||
address: 192.168.0.0/24
|
||||
exclude: 192.168.0.1, web.example.com
|
||||
port: 22, 443
|
||||
groups:
|
||||
web_servers: "ports | selectattr('port', 'equalto', '443')"
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -199,10 +152,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self._options['sudo']:
|
||||
cmd.insert(0, 'sudo')
|
||||
|
||||
if self._options['port']:
|
||||
cmd.append('-p')
|
||||
cmd.append(self._options['port'])
|
||||
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
@@ -217,18 +166,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
if self._options['dns_resolve']:
|
||||
cmd.append('-n')
|
||||
|
||||
if self._options['udp_scan']:
|
||||
cmd.append('-sU')
|
||||
|
||||
if self._options['icmp_timestamp']:
|
||||
cmd.append('-PP')
|
||||
|
||||
if self._options['open']:
|
||||
cmd.append('--open')
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
|
||||
@@ -113,9 +113,10 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- Whether to set C(ansbile_host) for proxmox nodes.
|
||||
- When set to C(true) (default), will use the first available interface. This can be different from what you expect.
|
||||
- The default of this option changed from C(true) to C(false) in community.general 6.0.0.
|
||||
- This currently defaults to C(true), but the default is deprecated since community.general 4.8.0.
|
||||
The default will change to C(false) in community.general 6.0.0. To avoid a deprecation warning, please
|
||||
set this parameter explicitly.
|
||||
type: bool
|
||||
default: false
|
||||
filters:
|
||||
version_added: 4.6.0
|
||||
description: A list of Jinja templates that allow filtering hosts.
|
||||
@@ -222,6 +223,7 @@ from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.utils.display import Display
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
@@ -277,11 +279,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
|
||||
|
||||
a = self._get_session()
|
||||
|
||||
if a.verify is False:
|
||||
from requests.packages.urllib3 import disable_warnings
|
||||
disable_warnings()
|
||||
|
||||
ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
|
||||
|
||||
json = ret.json()
|
||||
@@ -570,6 +567,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self.inventory.add_group(nodes_group)
|
||||
|
||||
want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
|
||||
if want_proxmox_nodes_ansible_host is None:
|
||||
display.deprecated(
|
||||
'The want_proxmox_nodes_ansible_host option of the community.general.proxmox inventory plugin'
|
||||
' currently defaults to `true`, but this default has been deprecated and will change to `false`'
|
||||
' in community.general 6.0.0. To keep the current behavior and remove this deprecation warning,'
|
||||
' explicitly set `want_proxmox_nodes_ansible_host` to `true` in your inventory configuration',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
want_proxmox_nodes_ansible_host = True
|
||||
|
||||
# gather vm's on nodes
|
||||
self._get_auth()
|
||||
@@ -616,23 +621,40 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
|
||||
# read and template auth options
|
||||
for o in ('url', 'user', 'password', 'token_id', 'token_secret'):
|
||||
v = self.get_option(o)
|
||||
if self.templar.is_template(v):
|
||||
v = self.templar.template(v, disable_lookups=False)
|
||||
setattr(self, 'proxmox_%s' % o, v)
|
||||
t = Templar(loader=loader)
|
||||
|
||||
# some more cleanup and validation
|
||||
self.proxmox_url = self.proxmox_url.rstrip('/')
|
||||
# read options
|
||||
proxmox_url = self.get_option('url')
|
||||
if t.is_template(proxmox_url):
|
||||
proxmox_url = t.template(variable=proxmox_url, disable_lookups=False)
|
||||
self.proxmox_url = proxmox_url.rstrip('/')
|
||||
|
||||
if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None):
|
||||
proxmox_user = self.get_option('user')
|
||||
if t.is_template(proxmox_user):
|
||||
proxmox_user = t.template(variable=proxmox_user, disable_lookups=False)
|
||||
self.proxmox_user = proxmox_user
|
||||
|
||||
proxmox_password = self.get_option('password')
|
||||
if t.is_template(proxmox_password):
|
||||
proxmox_password = t.template(variable=proxmox_password, disable_lookups=False)
|
||||
self.proxmox_password = proxmox_password
|
||||
|
||||
proxmox_token_id = self.get_option('token_id')
|
||||
if t.is_template(proxmox_token_id):
|
||||
proxmox_token_id = t.template(variable=proxmox_token_id, disable_lookups=False)
|
||||
self.proxmox_token_id = proxmox_token_id
|
||||
|
||||
proxmox_token_secret = self.get_option('token_secret')
|
||||
if t.is_template(proxmox_token_secret):
|
||||
proxmox_token_secret = t.template(variable=proxmox_token_secret, disable_lookups=False)
|
||||
self.proxmox_token_secret = proxmox_token_secret
|
||||
|
||||
if proxmox_password is None and (proxmox_token_id is None or proxmox_token_secret is None):
|
||||
raise AnsibleError('You must specify either a password or both token_id and token_secret.')
|
||||
|
||||
if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
|
||||
raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
|
||||
|
||||
# read rest of options
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
self.host_filters = self.get_option('filters')
|
||||
|
||||
@@ -28,12 +28,8 @@ DOCUMENTATION = """
|
||||
default: name
|
||||
version_added: 5.7.0
|
||||
field:
|
||||
description: Field to fetch. Leave unset to fetch whole response.
|
||||
description: Field to fetch; leave unset to fetch whole response.
|
||||
type: str
|
||||
collection_id:
|
||||
description: Collection ID to filter results by collection. Leave unset to skip filtering.
|
||||
type: str
|
||||
version_added: 6.3.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -47,20 +43,10 @@ EXAMPLES = """
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
|
||||
|
||||
- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
|
||||
|
||||
- name: "Get full Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test') }}
|
||||
|
||||
- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -105,17 +91,10 @@ class Bitwarden(object):
|
||||
raise BitwardenException(err)
|
||||
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
|
||||
|
||||
def _get_matches(self, search_value, search_field, collection_id):
|
||||
def _get_matches(self, search_value, search_field):
|
||||
"""Return matching records whose search_field is equal to key.
|
||||
"""
|
||||
|
||||
# Prepare set of params for Bitwarden CLI
|
||||
params = ['list', 'items', '--search', search_value]
|
||||
|
||||
if collection_id:
|
||||
params.extend(['--collectionid', collection_id])
|
||||
|
||||
out, err = self._run(params)
|
||||
out, err = self._run(['list', 'items', '--search', search_value])
|
||||
|
||||
# This includes things that matched in different fields.
|
||||
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
|
||||
@@ -123,27 +102,17 @@ class Bitwarden(object):
|
||||
# Filter to only include results from the right field.
|
||||
return [item for item in initial_matches if item[search_field] == search_value]
|
||||
|
||||
def get_field(self, field, search_value, search_field="name", collection_id=None):
|
||||
"""Return a list of the specified field for records whose search_field match search_value
|
||||
and filtered by collection if collection has been provided.
|
||||
def get_field(self, field, search_value, search_field="name"):
|
||||
"""Return a list of the specified field for records whose search_field match search_value.
|
||||
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value, search_field, collection_id)
|
||||
matches = self._get_matches(search_value, search_field)
|
||||
|
||||
if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
|
||||
if field:
|
||||
return [match['login'][field] for match in matches]
|
||||
elif not field:
|
||||
return matches
|
||||
else:
|
||||
custom_field_matches = []
|
||||
for match in matches:
|
||||
for custom_field in match['fields']:
|
||||
if custom_field['name'] == field:
|
||||
custom_field_matches.append(custom_field['value'])
|
||||
if matches and not custom_field_matches:
|
||||
raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return custom_field_matches
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
@@ -152,11 +121,10 @@ class LookupModule(LookupBase):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
field = self.get_option('field')
|
||||
search_field = self.get_option('search')
|
||||
collection_id = self.get_option('collection_id')
|
||||
if not _bitwarden.unlocked:
|
||||
raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
|
||||
|
||||
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
|
||||
return [_bitwarden.get_field(field, term, search_field) for term in terms]
|
||||
|
||||
|
||||
_bitwarden = Bitwarden()
|
||||
|
||||
@@ -15,11 +15,9 @@ DOCUMENTATION = '''
|
||||
- It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
|
||||
You can see the exact syntax in the examples section.
|
||||
options:
|
||||
_terms:
|
||||
_raw:
|
||||
description:
|
||||
- a set of lists
|
||||
type: list
|
||||
elements: list
|
||||
required: true
|
||||
'''
|
||||
|
||||
@@ -76,7 +74,6 @@ class LookupModule(LookupBase):
|
||||
return results
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
terms = self._lookup_variables(terms)
|
||||
|
||||
|
||||
@@ -22,33 +22,25 @@ DOCUMENTATION = '''
|
||||
required: true
|
||||
table:
|
||||
description: name of the credstash table to query
|
||||
type: str
|
||||
default: 'credential-store'
|
||||
version:
|
||||
description: Credstash version
|
||||
type: str
|
||||
default: ''
|
||||
region:
|
||||
description: AWS region
|
||||
type: str
|
||||
profile_name:
|
||||
description: AWS profile to use for authentication
|
||||
type: str
|
||||
env:
|
||||
- name: AWS_PROFILE
|
||||
aws_access_key_id:
|
||||
description: AWS access key ID
|
||||
type: str
|
||||
env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
aws_secret_access_key:
|
||||
description: AWS access key
|
||||
type: str
|
||||
env:
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
aws_session_token:
|
||||
description: AWS session token
|
||||
type: str
|
||||
env:
|
||||
- name: AWS_SESSION_TOKEN
|
||||
'''
|
||||
@@ -93,6 +85,8 @@ RETURN = """
|
||||
type: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
@@ -106,39 +100,28 @@ except ImportError:
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
def run(self, terms, variables, **kwargs):
|
||||
|
||||
if not CREDSTASH_INSTALLED:
|
||||
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
version = self.get_option('version')
|
||||
region = self.get_option('region')
|
||||
table = self.get_option('table')
|
||||
profile_name = self.get_option('profile_name')
|
||||
aws_access_key_id = self.get_option('aws_access_key_id')
|
||||
aws_secret_access_key = self.get_option('aws_secret_access_key')
|
||||
aws_session_token = self.get_option('aws_session_token')
|
||||
|
||||
context = dict(
|
||||
(k, v) for k, v in kwargs.items()
|
||||
if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token')
|
||||
)
|
||||
|
||||
kwargs_pass = {
|
||||
'profile_name': profile_name,
|
||||
'aws_access_key_id': aws_access_key_id,
|
||||
'aws_secret_access_key': aws_secret_access_key,
|
||||
'aws_session_token': aws_session_token,
|
||||
}
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
try:
|
||||
ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass))
|
||||
version = kwargs.pop('version', '')
|
||||
region = kwargs.pop('region', None)
|
||||
table = kwargs.pop('table', 'credential-store')
|
||||
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
|
||||
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
|
||||
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
|
||||
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
|
||||
kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
|
||||
'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
|
||||
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
|
||||
except credstash.ItemNotFound:
|
||||
raise AnsibleError('Key {0} not found'.format(term))
|
||||
except Exception as e:
|
||||
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
|
||||
ret.append(val)
|
||||
|
||||
return ret
|
||||
|
||||
@@ -173,6 +173,7 @@ class LookupModule(LookupBase):
|
||||
"""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
display.vvvv("%s" % terms)
|
||||
if isinstance(terms, list):
|
||||
return_values = []
|
||||
|
||||
@@ -16,7 +16,7 @@ description:
|
||||
or template expressions which evaluate to lists or dicts, composed of the elements of
|
||||
the input evaluated lists and dictionaries."
|
||||
options:
|
||||
_terms:
|
||||
_raw:
|
||||
description:
|
||||
- A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary.
|
||||
The name is the index that is used in the result object. The value is iterated over as described below.
|
||||
@@ -191,8 +191,6 @@ class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Generate list."""
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
result = []
|
||||
if len(terms) > 0:
|
||||
templar = Templar(loader=self._templar._loader)
|
||||
|
||||
@@ -21,27 +21,22 @@ DOCUMENTATION = '''
|
||||
- In addition to (default) A record, it is also possible to specify a different record type that should be queried.
|
||||
This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
|
||||
- If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
|
||||
In such cases you may want to pass option I(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
|
||||
which will result in the record values being returned as a list over which you can iterate later on.
|
||||
In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list
|
||||
over which you can iterate later on.
|
||||
- By default, the lookup will rely on system-wide configured DNS servers for performing the query.
|
||||
It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
|
||||
This needs to be passed-in as an additional parameter to the lookup
|
||||
options:
|
||||
_terms:
|
||||
description: Domain(s) to query.
|
||||
type: list
|
||||
elements: str
|
||||
qtype:
|
||||
description:
|
||||
- Record type to query.
|
||||
- C(DLV) has been removed in community.general 6.0.0.
|
||||
- C(CAA) has been added in community.general 6.3.0.
|
||||
type: str
|
||||
- C(DLV) is deprecated and will be removed in community.general 6.0.0.
|
||||
default: 'A'
|
||||
choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
|
||||
choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
|
||||
flat:
|
||||
description: If 0 each record is returned as a dictionary, otherwise a string.
|
||||
type: int
|
||||
default: 1
|
||||
retry_servfail:
|
||||
description: Retry a nameserver if it returns SERVFAIL.
|
||||
@@ -57,19 +52,6 @@ DOCUMENTATION = '''
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 5.4.0
|
||||
real_empty:
|
||||
description:
|
||||
- Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
|
||||
- The default for this option will likely change to C(true) in the future.
|
||||
- This option will be forced to C(true) if multiple domains to be queried are specified.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.0.0
|
||||
class:
|
||||
description:
|
||||
- "Class."
|
||||
type: str
|
||||
default: 'IN'
|
||||
notes:
|
||||
- ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
|
||||
- While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
|
||||
@@ -85,7 +67,7 @@ EXAMPLES = """
|
||||
|
||||
- name: "The TXT record for example.org."
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.org.', qtype='TXT') }}"
|
||||
msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}"
|
||||
|
||||
- name: "The TXT record for example.org, alternative syntax."
|
||||
ansible.builtin.debug:
|
||||
@@ -94,39 +76,24 @@ EXAMPLES = """
|
||||
- name: use in a loop
|
||||
ansible.builtin.debug:
|
||||
msg: "MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}"
|
||||
|
||||
- name: Lookup multiple names at once
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}"
|
||||
|
||||
- name: Lookup multiple names at once (from list variable)
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', *hosts) }}"
|
||||
vars:
|
||||
hosts:
|
||||
- example.org.
|
||||
- example.com.
|
||||
- gmail.com.
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}"
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', qtype='PTR') }}"
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
|
||||
with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', flat=0, wantlist=true) }}"
|
||||
with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
|
||||
|
||||
- name: Retry nameservers that return SERVFAIL
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}"
|
||||
msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -146,18 +113,15 @@ RETURN = """
|
||||
AAAA:
|
||||
description:
|
||||
- address
|
||||
CAA:
|
||||
description:
|
||||
- flags
|
||||
- tag
|
||||
- value
|
||||
version_added: 6.3.0
|
||||
CNAME:
|
||||
description:
|
||||
- target
|
||||
DNAME:
|
||||
description:
|
||||
- target
|
||||
DLV:
|
||||
description:
|
||||
- algorithm, digest_type, key_tag, digest
|
||||
DNSKEY:
|
||||
description:
|
||||
- flags, algorithm, protocol, key
|
||||
@@ -221,7 +185,7 @@ try:
|
||||
import dns.resolver
|
||||
import dns.reversename
|
||||
import dns.rdataclass
|
||||
from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC,
|
||||
from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
|
||||
MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
|
||||
HAVE_DNS = True
|
||||
except ImportError:
|
||||
@@ -241,9 +205,9 @@ def make_rdata_dict(rdata):
|
||||
supported_types = {
|
||||
A: ['address'],
|
||||
AAAA: ['address'],
|
||||
CAA: ['flags', 'tag', 'value'],
|
||||
CNAME: ['target'],
|
||||
DNAME: ['target'],
|
||||
DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'],
|
||||
DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
|
||||
DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
|
||||
HINFO: ['cpu', 'os'],
|
||||
@@ -273,6 +237,8 @@ def make_rdata_dict(rdata):
|
||||
if isinstance(val, dns.name.Name):
|
||||
val = dns.name.Name.to_text(val)
|
||||
|
||||
if rdata.rdtype == DLV and f == 'digest':
|
||||
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
|
||||
if rdata.rdtype == DS and f == 'digest':
|
||||
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
|
||||
if rdata.rdtype == DNSKEY and f == 'algorithm':
|
||||
@@ -314,26 +280,20 @@ class LookupModule(LookupBase):
|
||||
|
||||
... flat=0 # returns a dict; default is 1 == string
|
||||
'''
|
||||
|
||||
if HAVE_DNS is False:
|
||||
raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
# Create Resolver object so that we can set NS if necessary
|
||||
myres = dns.resolver.Resolver(configure=True)
|
||||
edns_size = 4096
|
||||
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
|
||||
|
||||
domains = []
|
||||
qtype = self.get_option('qtype')
|
||||
flat = self.get_option('flat')
|
||||
fail_on_error = self.get_option('fail_on_error')
|
||||
real_empty = self.get_option('real_empty')
|
||||
try:
|
||||
rdclass = dns.rdataclass.from_text(self.get_option('class'))
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
|
||||
myres.retry_servfail = self.get_option('retry_servfail')
|
||||
domain = None
|
||||
qtype = 'A'
|
||||
flat = True
|
||||
fail_on_error = False
|
||||
rdclass = dns.rdataclass.from_text('IN')
|
||||
|
||||
for t in terms:
|
||||
if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
|
||||
@@ -356,7 +316,7 @@ class LookupModule(LookupBase):
|
||||
continue
|
||||
if '=' in t:
|
||||
try:
|
||||
opt, arg = t.split('=', 1)
|
||||
opt, arg = t.split('=')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -373,79 +333,71 @@ class LookupModule(LookupBase):
|
||||
myres.retry_servfail = boolean(arg)
|
||||
elif opt == 'fail_on_error':
|
||||
fail_on_error = boolean(arg)
|
||||
elif opt == 'real_empty':
|
||||
real_empty = boolean(arg)
|
||||
|
||||
continue
|
||||
|
||||
if '/' in t:
|
||||
try:
|
||||
domain, qtype = t.split('/')
|
||||
domains.append(domain)
|
||||
except Exception:
|
||||
domains.append(t)
|
||||
domain = t
|
||||
else:
|
||||
domains.append(t)
|
||||
domain = t
|
||||
|
||||
# print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
reversed_domains = []
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
reversed_domains.append(n.to_text())
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
domains = reversed_domains
|
||||
|
||||
if len(domains) > 1:
|
||||
real_empty = True
|
||||
|
||||
ret = []
|
||||
|
||||
for domain in domains:
|
||||
if qtype.upper() == 'DLV':
|
||||
display.deprecated('The DLV record type has been decommissioned in 2017 and support for'
|
||||
' it will be removed from community.general 6.0.0',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
n = dns.reversename.from_address(domain)
|
||||
domain = n.to_text()
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append('')
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
|
||||
return ret
|
||||
|
||||
@@ -20,13 +20,6 @@ DOCUMENTATION = '''
|
||||
required: true
|
||||
type: list
|
||||
elements: string
|
||||
real_empty:
|
||||
description:
|
||||
- Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
|
||||
- The default for this option will likely change to C(true) in the future.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.0.0
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -78,13 +71,10 @@ from ansible.plugins.lookup import LookupBase
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
if HAVE_DNS is False:
|
||||
raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
|
||||
|
||||
real_empty = self.get_option('real_empty')
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
domain = term.split()[0]
|
||||
@@ -96,16 +86,10 @@ class LookupModule(LookupBase):
|
||||
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
|
||||
|
||||
except dns.resolver.NXDOMAIN:
|
||||
if real_empty:
|
||||
continue
|
||||
string = 'NXDOMAIN'
|
||||
except dns.resolver.Timeout:
|
||||
if real_empty:
|
||||
continue
|
||||
string = ''
|
||||
except dns.resolver.NoAnswer:
|
||||
if real_empty:
|
||||
continue
|
||||
string = ''
|
||||
except DNSException as e:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
|
||||
|
||||
@@ -201,8 +201,6 @@ def file_props(root, path):
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
basedir = self.get_basedir(variables)
|
||||
|
||||
ret = []
|
||||
|
||||
@@ -11,17 +11,14 @@ DOCUMENTATION = '''
|
||||
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
||||
short_description: return single list completely flattened
|
||||
description:
|
||||
- Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
|
||||
- given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
|
||||
options:
|
||||
_terms:
|
||||
description: lists to flatten
|
||||
type: list
|
||||
elements: raw
|
||||
required: true
|
||||
notes:
|
||||
- Unlike the R(items lookup,ansible_collections.ansible.builtin.items_lookup) which only flattens 1 level,
|
||||
this plugin will continue to flatten until it cannot find lists anymore.
|
||||
- Aka highlander plugin, there can only be one (list).
|
||||
- unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore.
|
||||
- aka highlander plugin, there can only be one (list).
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -86,10 +83,9 @@ class LookupModule(LookupBase):
|
||||
|
||||
return ret
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
def run(self, terms, variables, **kwargs):
|
||||
|
||||
if not isinstance(terms, list):
|
||||
raise AnsibleError("with_flattened expects a list")
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
return self._do_flatten(terms, variables)
|
||||
|
||||
@@ -14,23 +14,23 @@ DOCUMENTATION = '''
|
||||
requirements:
|
||||
- hiera (command line utility)
|
||||
description:
|
||||
- Retrieves data from an Puppetmaster node using Hiera as ENC.
|
||||
- Retrieves data from an Puppetmaster node using Hiera as ENC
|
||||
options:
|
||||
_terms:
|
||||
_hiera_key:
|
||||
description:
|
||||
- The list of keys to lookup on the Puppetmaster.
|
||||
- The list of keys to lookup on the Puppetmaster
|
||||
type: list
|
||||
elements: string
|
||||
required: true
|
||||
executable:
|
||||
_bin_file:
|
||||
description:
|
||||
- Binary file to execute Hiera.
|
||||
- Binary file to execute Hiera
|
||||
default: '/usr/bin/hiera'
|
||||
env:
|
||||
- name: ANSIBLE_HIERA_BIN
|
||||
config_file:
|
||||
_hierarchy_file:
|
||||
description:
|
||||
- File that describes the hierarchy of Hiera.
|
||||
- File that describes the hierarchy of Hiera
|
||||
default: '/etc/hiera.yaml'
|
||||
env:
|
||||
- name: ANSIBLE_HIERA_CFG
|
||||
@@ -61,32 +61,31 @@ RETURN = """
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
||||
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
||||
|
||||
|
||||
class Hiera(object):
|
||||
def __init__(self, hiera_cfg, hiera_bin):
|
||||
self.hiera_cfg = hiera_cfg
|
||||
self.hiera_bin = hiera_bin
|
||||
|
||||
def get(self, hiera_key):
|
||||
pargs = [self.hiera_bin]
|
||||
pargs.extend(['-c', self.hiera_cfg])
|
||||
pargs = [ANSIBLE_HIERA_BIN]
|
||||
pargs.extend(['-c', ANSIBLE_HIERA_CFG])
|
||||
|
||||
pargs.extend(hiera_key)
|
||||
|
||||
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
||||
self.hiera_bin, self.hiera_cfg, hiera_key[0]))
|
||||
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
||||
|
||||
return to_text(output.strip())
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
hiera = Hiera(self.get_option('config_file'), self.get_option('executable'))
|
||||
def run(self, terms, variables=''):
|
||||
hiera = Hiera()
|
||||
ret = [hiera.get(terms)]
|
||||
return ret
|
||||
|
||||
@@ -26,9 +26,7 @@ EXAMPLES = """
|
||||
- 'servicename username'
|
||||
|
||||
- name: access mysql with password from keyring
|
||||
community.mysql.mysql_db:
|
||||
login_password: "{{ lookup('community.general.keyring', 'mysql joe') }}"
|
||||
login_user: joe
|
||||
mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -55,12 +53,10 @@ display = Display()
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
def run(self, terms, **kwargs):
|
||||
if not HAS_KEYRING:
|
||||
raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
display.vvvv(u"keyring: %s" % keyring.get_keyring())
|
||||
ret = []
|
||||
for term in terms:
|
||||
|
||||
@@ -13,20 +13,15 @@ DOCUMENTATION = '''
|
||||
version_added: '0.2.0'
|
||||
short_description: fetch data from LMDB
|
||||
description:
|
||||
- This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
|
||||
- This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it
|
||||
requirements:
|
||||
- lmdb (python library https://lmdb.readthedocs.io/en/release/)
|
||||
options:
|
||||
_terms:
|
||||
description: List of keys to query.
|
||||
type: list
|
||||
elements: str
|
||||
description: list of keys to query
|
||||
db:
|
||||
description: Path to LMDB database.
|
||||
type: str
|
||||
description: path to LMDB database
|
||||
default: 'ansible.mdb'
|
||||
vars:
|
||||
- name: lmdb_kv_db
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -48,8 +43,8 @@ EXAMPLES = """
|
||||
- item == 'Belgium'
|
||||
vars:
|
||||
- lmdb_kv_db: jp.mdb
|
||||
with_community.general.lmdb_kv:
|
||||
- be
|
||||
with_community.general.lmdb_kv:
|
||||
- be
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -63,7 +58,6 @@ _raw:
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
|
||||
HAVE_LMDB = True
|
||||
try:
|
||||
import lmdb
|
||||
@@ -73,7 +67,8 @@ except ImportError:
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
def run(self, terms, variables, **kwargs):
|
||||
|
||||
'''
|
||||
terms contain any number of keys to be retrieved.
|
||||
If terms is None, all keys from the database are returned
|
||||
@@ -86,15 +81,17 @@ class LookupModule(LookupBase):
|
||||
vars:
|
||||
- lmdb_kv_db: "jp.mdb"
|
||||
'''
|
||||
|
||||
if HAVE_LMDB is False:
|
||||
raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed")
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
db = self.get_option('db')
|
||||
db = variables.get('lmdb_kv_db', None)
|
||||
if db is None:
|
||||
db = kwargs.get('db', 'ansible.mdb')
|
||||
db = str(db)
|
||||
|
||||
try:
|
||||
env = lmdb.open(str(db), readonly=True)
|
||||
env = lmdb.open(db, readonly=True)
|
||||
except Exception as e:
|
||||
raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e)))
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ from ansible.module_utils import six
|
||||
from ansible.utils.display import Display
|
||||
from traceback import format_exception
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
display = Display()
|
||||
@@ -206,7 +207,7 @@ class ManifoldApiClient(object):
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
def run(self, terms, variables=None, api_token=None, project=None, team=None):
|
||||
"""
|
||||
:param terms: a list of resources lookups to run.
|
||||
:param variables: ansible variables active at the time of the lookup
|
||||
@@ -216,11 +217,10 @@ class LookupModule(LookupBase):
|
||||
:return: a dictionary of resources credentials
|
||||
"""
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
api_token = self.get_option('api_token')
|
||||
project = self.get_option('project')
|
||||
team = self.get_option('team')
|
||||
if not api_token:
|
||||
api_token = os.getenv('MANIFOLD_API_TOKEN')
|
||||
if not api_token:
|
||||
raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var')
|
||||
|
||||
try:
|
||||
labels = terms
|
||||
|
||||
@@ -1,212 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Thales Netherlands
|
||||
# Copyright (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
author:
|
||||
- Roy Lenferink (@rlenferink)
|
||||
- Mark Ettema (@m-a-r-k-e)
|
||||
name: merge_variables
|
||||
short_description: merge variables with a certain suffix
|
||||
description:
|
||||
- This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
|
||||
regular expressions, optionally.
|
||||
version_added: 6.5.0
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- Depending on the value of I(pattern_type), this is a list of prefixes, suffixes, or regular expressions
|
||||
that will be used to match all variables that should be merged.
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
pattern_type:
|
||||
description:
|
||||
- Change the way of searching for the specified pattern.
|
||||
type: str
|
||||
default: 'regex'
|
||||
choices:
|
||||
- prefix
|
||||
- suffix
|
||||
- regex
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: pattern_type
|
||||
initial_value:
|
||||
description:
|
||||
- An initial value to start with.
|
||||
type: raw
|
||||
override:
|
||||
description:
|
||||
- Return an error, print a warning or ignore it when a key will be overwritten.
|
||||
- The default behavior C(error) makes the plugin fail when a key would be overwritten.
|
||||
- When C(warn) and C(ignore) are used, note that it is important to know that the variables
|
||||
are sorted by name before being merged. Keys for later variables in this order will overwrite
|
||||
keys of the same name for variables earlier in this order. To avoid potential confusion,
|
||||
better use I(override=error) whenever possible.
|
||||
type: str
|
||||
default: 'error'
|
||||
choices:
|
||||
- error
|
||||
- warn
|
||||
- ignore
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: override
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Some example variables, they can be defined anywhere as long as they are in scope
|
||||
test_init_list:
|
||||
- "list init item 1"
|
||||
- "list init item 2"
|
||||
|
||||
testa__test_list:
|
||||
- "test a item 1"
|
||||
|
||||
testb__test_list:
|
||||
- "test b item 1"
|
||||
|
||||
testa__test_dict:
|
||||
ports:
|
||||
- 1
|
||||
|
||||
testb__test_dict:
|
||||
ports:
|
||||
- 3
|
||||
|
||||
|
||||
# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
|
||||
example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
|
||||
|
||||
# The variable example_a now contains:
|
||||
# ports:
|
||||
# - 1
|
||||
# - 3
|
||||
|
||||
|
||||
# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
|
||||
# result in a variable 'example_b'
|
||||
example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
|
||||
|
||||
# The variable example_b now contains:
|
||||
# - "list init item 1"
|
||||
# - "list init item 2"
|
||||
# - "test a item 1"
|
||||
# - "test b item 1"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
|
||||
dict will be returned.
|
||||
type: raw
|
||||
elements: raw
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
def _verify_and_get_type(variable):
|
||||
if isinstance(variable, list):
|
||||
return "list"
|
||||
elif isinstance(variable, dict):
|
||||
return "dict"
|
||||
else:
|
||||
raise AnsibleError("Not supported type detected, variable must be a list or a dict")
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(direct=kwargs)
|
||||
initial_value = self.get_option("initial_value", None)
|
||||
self._override = self.get_option('override', 'error')
|
||||
self._pattern_type = self.get_option('pattern_type', 'regex')
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
if not isinstance(term, str):
|
||||
raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
|
||||
|
||||
ret.append(self._merge_vars(term, initial_value, variables))
|
||||
|
||||
return ret
|
||||
|
||||
def _var_matches(self, key, search_pattern):
|
||||
if self._pattern_type == "prefix":
|
||||
return key.startswith(search_pattern)
|
||||
elif self._pattern_type == "suffix":
|
||||
return key.endswith(search_pattern)
|
||||
elif self._pattern_type == "regex":
|
||||
matcher = re.compile(search_pattern)
|
||||
return matcher.search(key)
|
||||
|
||||
return False
|
||||
|
||||
def _merge_vars(self, search_pattern, initial_value, variables):
|
||||
display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
|
||||
var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
|
||||
display.vvv("The following variables will be merged: {0}".format(var_merge_names))
|
||||
|
||||
prev_var_type = None
|
||||
result = None
|
||||
|
||||
if initial_value is not None:
|
||||
prev_var_type = _verify_and_get_type(initial_value)
|
||||
result = initial_value
|
||||
|
||||
for var_name in var_merge_names:
|
||||
var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
|
||||
var_type = _verify_and_get_type(var_value)
|
||||
|
||||
if prev_var_type is None:
|
||||
prev_var_type = var_type
|
||||
elif prev_var_type != var_type:
|
||||
raise AnsibleError("Unable to merge, not all variables are of the same type")
|
||||
|
||||
if result is None:
|
||||
result = var_value
|
||||
continue
|
||||
|
||||
if var_type == "dict":
|
||||
result = self._merge_dict(var_value, result, [var_name])
|
||||
else: # var_type == "list"
|
||||
result += var_value
|
||||
|
||||
return result
|
||||
|
||||
def _merge_dict(self, src, dest, path):
|
||||
for key, value in src.items():
|
||||
if isinstance(value, dict):
|
||||
node = dest.setdefault(key, {})
|
||||
self._merge_dict(value, node, path + [key])
|
||||
elif isinstance(value, list) and key in dest:
|
||||
dest[key] += value
|
||||
else:
|
||||
if (key in dest) and dest[key] != value:
|
||||
msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format(
|
||||
key, dest[key], value, ".".join(path))
|
||||
|
||||
if self._override == "error":
|
||||
raise AnsibleError(msg)
|
||||
if self._override == "warn":
|
||||
display.warning(msg)
|
||||
|
||||
dest[key] = value
|
||||
|
||||
return dest
|
||||
@@ -32,7 +32,7 @@ DOCUMENTATION = '''
|
||||
section:
|
||||
description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
|
||||
domain:
|
||||
description: Domain of 1Password.
|
||||
description: Domain of 1Password. Default is U(1password.com).
|
||||
version_added: 3.2.0
|
||||
default: '1password.com'
|
||||
type: str
|
||||
@@ -55,7 +55,7 @@ DOCUMENTATION = '''
|
||||
- This lookup stores potentially sensitive data from 1Password as Ansible facts.
|
||||
Facts are subject to caching if enabled, which means this data could be stored in clear text
|
||||
on disk or in a database.
|
||||
- Tested with C(op) version 2.7.2
|
||||
- Tested with C(op) version 0.5.3
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -74,18 +74,18 @@ EXAMPLES = """
|
||||
|
||||
- name: Retrieve password for HAL when not signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
var: lookup('community.general.onepassword',
|
||||
'HAL 9000',
|
||||
subdomain='Discovery',
|
||||
master_password=vault_master_password)
|
||||
|
||||
- name: Retrieve password for HAL when never signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
master_password=vault_master_password
|
||||
username='tweety@acme.com'
|
||||
var: lookup('community.general.onepassword',
|
||||
'HAL 9000',
|
||||
subdomain='Discovery',
|
||||
master_password=vault_master_password,
|
||||
username='tweety@acme.com',
|
||||
secret_key=vault_secret_key)
|
||||
"""
|
||||
|
||||
@@ -96,123 +96,106 @@ RETURN = """
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import abc
|
||||
import os
|
||||
import errno
|
||||
import json
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.module_utils.six import with_metaclass
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig
|
||||
|
||||
|
||||
class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
|
||||
bin = "op"
|
||||
class OnePass(object):
|
||||
def __init__(self, path='op'):
|
||||
self.cli_path = path
|
||||
self.logged_in = False
|
||||
self.token = None
|
||||
self.subdomain = None
|
||||
self.domain = None
|
||||
self.username = None
|
||||
self.secret_key = None
|
||||
self.master_password = None
|
||||
|
||||
def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
|
||||
self.subdomain = subdomain
|
||||
self.domain = domain
|
||||
self.username = username
|
||||
self.master_password = master_password
|
||||
self.secret_key = secret_key
|
||||
self._config = OnePasswordConfig()
|
||||
|
||||
self._path = None
|
||||
self._version = None
|
||||
def get_token(self):
|
||||
# If the config file exists, assume an initial signin has taken place and try basic sign in
|
||||
if os.path.isfile(self._config.config_file_path):
|
||||
|
||||
def _check_required_params(self, required_params):
|
||||
non_empty_attrs = dict((param, getattr(self, param, None)) for param in required_params if getattr(self, param, None))
|
||||
missing = set(required_params).difference(non_empty_attrs)
|
||||
if missing:
|
||||
prefix = "Unable to sign in to 1Password. Missing required parameter"
|
||||
plural = ""
|
||||
suffix = ": {params}.".format(params=", ".join(missing))
|
||||
if len(missing) > 1:
|
||||
plural = "s"
|
||||
if not self.master_password:
|
||||
raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.')
|
||||
|
||||
msg = "{prefix}{plural}{suffix}".format(prefix=prefix, plural=plural, suffix=suffix)
|
||||
raise AnsibleLookupError(msg)
|
||||
try:
|
||||
args = ['signin', '--output=raw']
|
||||
|
||||
@abc.abstractmethod
|
||||
def _parse_field(self, data_json, field_name, section_title):
|
||||
"""Main method for parsing data returned from the op command line tool"""
|
||||
if self.subdomain:
|
||||
args = ['signin', self.subdomain, '--output=raw']
|
||||
|
||||
def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False, environment_update=None):
|
||||
command = [self.path] + args
|
||||
call_kwargs = {
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"stdin": subprocess.PIPE,
|
||||
}
|
||||
rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
|
||||
self.token = out.strip()
|
||||
|
||||
if environment_update:
|
||||
env = os.environ.copy()
|
||||
env.update(environment_update)
|
||||
call_kwargs["env"] = env
|
||||
except AnsibleLookupError:
|
||||
self.full_login()
|
||||
|
||||
p = subprocess.Popen(command, **call_kwargs)
|
||||
else:
|
||||
# Attempt a full sign in since there appears to be no existing sign in
|
||||
self.full_login()
|
||||
|
||||
def assert_logged_in(self):
|
||||
try:
|
||||
rc, out, err = self._run(['get', 'account'], ignore_errors=True)
|
||||
if rc == 0:
|
||||
self.logged_in = True
|
||||
if not self.logged_in:
|
||||
self.get_token()
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
|
||||
raise e
|
||||
|
||||
def get_raw(self, item_id, vault=None):
|
||||
args = ["get", "item", item_id]
|
||||
if vault is not None:
|
||||
args += ['--vault={0}'.format(vault)]
|
||||
if not self.logged_in:
|
||||
args += [to_bytes('--session=') + self.token]
|
||||
rc, output, dummy = self._run(args)
|
||||
return output
|
||||
|
||||
def get_field(self, item_id, field, section=None, vault=None):
|
||||
output = self.get_raw(item_id, vault)
|
||||
return self._parse_field(output, field, section) if output != '' else ''
|
||||
|
||||
def full_login(self):
|
||||
if None in [self.subdomain, self.username, self.secret_key, self.master_password]:
|
||||
raise AnsibleLookupError('Unable to perform initial sign in to 1Password. '
|
||||
'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
|
||||
|
||||
args = [
|
||||
'signin',
|
||||
'{0}.{1}'.format(self.subdomain, self.domain),
|
||||
to_bytes(self.username),
|
||||
to_bytes(self.secret_key),
|
||||
'--output=raw',
|
||||
]
|
||||
|
||||
rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
|
||||
self.token = out.strip()
|
||||
|
||||
def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
|
||||
command = [self.cli_path] + args
|
||||
p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
|
||||
out, err = p.communicate(input=command_input)
|
||||
rc = p.wait()
|
||||
|
||||
if not ignore_errors and rc != expected_rc:
|
||||
raise AnsibleLookupError(to_text(err))
|
||||
|
||||
return rc, out, err
|
||||
|
||||
@abc.abstractmethod
|
||||
def assert_logged_in(self):
|
||||
"""Check whether a login session exists"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def full_signin(self):
|
||||
"""Performa full login"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_raw(self, item_id, vault=None, token=None):
|
||||
"""Gets the specified item from the vault"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def signin(self):
|
||||
"""Sign in using the master password"""
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
if self._path is None:
|
||||
self._path = get_bin_path(self.bin)
|
||||
|
||||
return self._path
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
if self._version is None:
|
||||
self._version = self.get_current_version()
|
||||
|
||||
return self._version
|
||||
|
||||
@classmethod
|
||||
def get_current_version(cls):
|
||||
"""Standalone method to get the op CLI version. Useful when determining which class to load
|
||||
based on the current version."""
|
||||
try:
|
||||
bin_path = get_bin_path(cls.bin)
|
||||
except ValueError:
|
||||
raise AnsibleLookupError("Unable to locate '%s' command line tool" % cls.bin)
|
||||
|
||||
try:
|
||||
b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError as cpe:
|
||||
raise AnsibleLookupError("Unable to get the op version: %s" % cpe)
|
||||
|
||||
return to_text(b_out).strip()
|
||||
|
||||
|
||||
class OnePassCLIv1(OnePassCLIBase):
|
||||
supports_version = "1"
|
||||
|
||||
def _parse_field(self, data_json, field_name, section_title):
|
||||
def _parse_field(self, data_json, field_name, section_title=None):
|
||||
"""
|
||||
Retrieves the desired field from the `op` response payload
|
||||
|
||||
@@ -266,356 +249,36 @@ class OnePassCLIv1(OnePassCLIBase):
|
||||
# check the details dictionary for `field_name` and return it immediately if it exists
|
||||
# when the entry is a "password" instead of a "login" item, the password field is a key
|
||||
# in the `details` dictionary:
|
||||
if field_name in data["details"]:
|
||||
return data["details"][field_name]
|
||||
if field_name in data['details']:
|
||||
return data['details'][field_name]
|
||||
|
||||
# when the field is not found above, iterate through the fields list in the object details
|
||||
for field_data in data["details"].get("fields", []):
|
||||
if field_data.get("name", "").lower() == field_name.lower():
|
||||
return field_data.get("value", "")
|
||||
|
||||
for section_data in data["details"].get("sections", []):
|
||||
if section_title is not None and section_title.lower() != section_data["title"].lower():
|
||||
for field_data in data['details'].get('fields', []):
|
||||
if field_data.get('name', '').lower() == field_name.lower():
|
||||
return field_data.get('value', '')
|
||||
for section_data in data['details'].get('sections', []):
|
||||
if section_title is not None and section_title.lower() != section_data['title'].lower():
|
||||
continue
|
||||
|
||||
for field_data in section_data.get("fields", []):
|
||||
if field_data.get("t", "").lower() == field_name.lower():
|
||||
return field_data.get("v", "")
|
||||
|
||||
return ""
|
||||
|
||||
def assert_logged_in(self):
|
||||
args = ["get", "account"]
|
||||
if self.subdomain:
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
rc, out, err = self._run(args, ignore_errors=True)
|
||||
|
||||
return not bool(rc)
|
||||
|
||||
def full_signin(self):
|
||||
required_params = [
|
||||
"subdomain",
|
||||
"username",
|
||||
"secret_key",
|
||||
"master_password",
|
||||
]
|
||||
self._check_required_params(required_params)
|
||||
|
||||
args = [
|
||||
"signin",
|
||||
"{0}.{1}".format(self.subdomain, self.domain),
|
||||
to_bytes(self.username),
|
||||
to_bytes(self.secret_key),
|
||||
"--raw",
|
||||
]
|
||||
|
||||
return self._run(args, command_input=to_bytes(self.master_password))
|
||||
|
||||
def get_raw(self, item_id, vault=None, token=None):
|
||||
args = ["get", "item", item_id]
|
||||
if vault is not None:
|
||||
args += ["--vault={0}".format(vault)]
|
||||
|
||||
if token is not None:
|
||||
args += [to_bytes("--session=") + token]
|
||||
|
||||
return self._run(args)
|
||||
|
||||
def signin(self):
|
||||
self._check_required_params(['master_password'])
|
||||
|
||||
args = ["signin", "--raw"]
|
||||
if self.subdomain:
|
||||
args.append(self.subdomain)
|
||||
|
||||
return self._run(args, command_input=to_bytes(self.master_password))
|
||||
|
||||
|
||||
class OnePassCLIv2(OnePassCLIBase):
|
||||
"""
|
||||
CLIv2 Syntax Reference: https://developer.1password.com/docs/cli/upgrade#step-2-update-your-scripts
|
||||
"""
|
||||
supports_version = "2"
|
||||
|
||||
def _parse_field(self, data_json, field_name, section_title=None):
|
||||
"""
|
||||
Schema reference: https://developer.1password.com/docs/cli/item-template-json
|
||||
|
||||
Example Data:
|
||||
|
||||
# Password item
|
||||
{
|
||||
"id": "ywvdbojsguzgrgnokmcxtydgdv",
|
||||
"title": "Authy Backup",
|
||||
"version": 1,
|
||||
"vault": {
|
||||
"id": "bcqxysvcnejjrwzoqrwzcqjqxc",
|
||||
"name": "Personal"
|
||||
},
|
||||
"category": "PASSWORD",
|
||||
"last_edited_by": "7FUPZ8ZNE02KSHMAIMKHIVUE17",
|
||||
"created_at": "2015-01-18T13:13:38Z",
|
||||
"updated_at": "2016-02-20T16:23:54Z",
|
||||
"additional_information": "Jan 18, 2015, 08:13:38",
|
||||
"fields": [
|
||||
{
|
||||
"id": "password",
|
||||
"type": "CONCEALED",
|
||||
"purpose": "PASSWORD",
|
||||
"label": "password",
|
||||
"value": "OctoberPoppyNuttyDraperySabbath",
|
||||
"reference": "op://Personal/Authy Backup/password",
|
||||
"password_details": {
|
||||
"strength": "FANTASTIC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "notesPlain",
|
||||
"type": "STRING",
|
||||
"purpose": "NOTES",
|
||||
"label": "notesPlain",
|
||||
"value": "Backup password to restore Authy",
|
||||
"reference": "op://Personal/Authy Backup/notesPlain"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Login item
|
||||
{
|
||||
"id": "awk4s2u44fhnrgppszcsvc663i",
|
||||
"title": "Dummy Login",
|
||||
"version": 2,
|
||||
"vault": {
|
||||
"id": "stpebbaccrq72xulgouxsk4p7y",
|
||||
"name": "Personal"
|
||||
},
|
||||
"category": "LOGIN",
|
||||
"last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU",
|
||||
"created_at": "2018-04-25T21:55:19Z",
|
||||
"updated_at": "2018-04-25T21:56:06Z",
|
||||
"additional_information": "agent.smith",
|
||||
"urls": [
|
||||
{
|
||||
"primary": true,
|
||||
"href": "https://acme.com"
|
||||
}
|
||||
],
|
||||
"sections": [
|
||||
{
|
||||
"id": "linked items",
|
||||
"label": "Related Items"
|
||||
}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"id": "username",
|
||||
"type": "STRING",
|
||||
"purpose": "USERNAME",
|
||||
"label": "username",
|
||||
"value": "agent.smith",
|
||||
"reference": "op://Personal/Dummy Login/username"
|
||||
},
|
||||
{
|
||||
"id": "password",
|
||||
"type": "CONCEALED",
|
||||
"purpose": "PASSWORD",
|
||||
"label": "password",
|
||||
"value": "Q7vFwTJcqwxKmTU]Dzx7NW*wrNPXmj",
|
||||
"entropy": 159.6083697084228,
|
||||
"reference": "op://Personal/Dummy Login/password",
|
||||
"password_details": {
|
||||
"entropy": 159,
|
||||
"generated": true,
|
||||
"strength": "FANTASTIC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "notesPlain",
|
||||
"type": "STRING",
|
||||
"purpose": "NOTES",
|
||||
"label": "notesPlain",
|
||||
"reference": "op://Personal/Dummy Login/notesPlain"
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
data = json.loads(data_json)
|
||||
for field in data.get("fields", []):
|
||||
if section_title is None:
|
||||
# If the field name exists in the section, return that value
|
||||
if field.get(field_name):
|
||||
return field.get(field_name)
|
||||
|
||||
# If the field name doesn't exist in the section, match on the value of "label"
|
||||
# then "id" and return "value"
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
|
||||
# Look at the section data and get an indentifier. The value of 'id' is either a unique ID
|
||||
# or a human-readable string. If a 'label' field exists, prefer that since
|
||||
# it is the value visible in the 1Password UI when both 'id' and 'label' exist.
|
||||
section = field.get("section", {})
|
||||
current_section_title = section.get("label", section.get("id"))
|
||||
if section_title == current_section_title:
|
||||
# In the correct section. Check "label" then "id" for the desired field_name
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
|
||||
return ""
|
||||
|
||||
def assert_logged_in(self):
|
||||
args = ["account", "list"]
|
||||
if self.subdomain:
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
rc, out, err = self._run(args)
|
||||
|
||||
if out:
|
||||
# Running 'op account get' if there are no accounts configured on the system drops into
|
||||
# an interactive prompt. Only run 'op account get' after first listing accounts to see
|
||||
# if there are any previously configured accounts.
|
||||
args = ["account", "get"]
|
||||
if self.subdomain:
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
rc, out, err = self._run(args, ignore_errors=True)
|
||||
|
||||
return not bool(rc)
|
||||
|
||||
return False
|
||||
|
||||
def full_signin(self):
|
||||
required_params = [
|
||||
"subdomain",
|
||||
"username",
|
||||
"secret_key",
|
||||
"master_password",
|
||||
]
|
||||
self._check_required_params(required_params)
|
||||
|
||||
args = [
|
||||
"account", "add", "--raw",
|
||||
"--address", "{0}.{1}".format(self.subdomain, self.domain),
|
||||
"--email", to_bytes(self.username),
|
||||
"--signin",
|
||||
]
|
||||
|
||||
environment_update = {"OP_SECRET_KEY": self.secret_key}
|
||||
return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update)
|
||||
|
||||
def get_raw(self, item_id, vault=None, token=None):
|
||||
args = ["item", "get", item_id, "--format", "json"]
|
||||
if vault is not None:
|
||||
args += ["--vault={0}".format(vault)]
|
||||
if token is not None:
|
||||
args += [to_bytes("--session=") + token]
|
||||
|
||||
return self._run(args)
|
||||
|
||||
def signin(self):
|
||||
self._check_required_params(['master_password'])
|
||||
|
||||
args = ["signin", "--raw"]
|
||||
if self.subdomain:
|
||||
args.extend(["--account", self.subdomain])
|
||||
|
||||
return self._run(args, command_input=to_bytes(self.master_password))
|
||||
|
||||
|
||||
class OnePass(object):
|
||||
def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
|
||||
self.subdomain = subdomain
|
||||
self.domain = domain
|
||||
self.username = username
|
||||
self.secret_key = secret_key
|
||||
self.master_password = master_password
|
||||
|
||||
self.logged_in = False
|
||||
self.token = None
|
||||
|
||||
self._config = OnePasswordConfig()
|
||||
self._cli = self._get_cli_class()
|
||||
|
||||
def _get_cli_class(self):
|
||||
version = OnePassCLIBase.get_current_version()
|
||||
for cls in OnePassCLIBase.__subclasses__():
|
||||
if cls.supports_version == version.split(".")[0]:
|
||||
try:
|
||||
return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password)
|
||||
except TypeError as e:
|
||||
raise AnsibleLookupError(e)
|
||||
|
||||
raise AnsibleLookupError("op version %s is unsupported" % version)
|
||||
|
||||
def set_token(self):
|
||||
if self._config.config_file_path and os.path.isfile(self._config.config_file_path):
|
||||
# If the config file exists, assume an initial sign in has taken place and try basic sign in
|
||||
try:
|
||||
rc, out, err = self._cli.signin()
|
||||
except AnsibleLookupError as exc:
|
||||
test_strings = (
|
||||
"missing required parameters",
|
||||
"unauthorized",
|
||||
)
|
||||
if any(string in exc.message.lower() for string in test_strings):
|
||||
# A required parameter is missing, or a bad master password was supplied
|
||||
# so don't bother attempting a full signin
|
||||
raise
|
||||
|
||||
rc, out, err = self._cli.full_signin()
|
||||
|
||||
self.token = out.strip()
|
||||
|
||||
else:
|
||||
# Attempt a full signin since there appears to be no existing signin
|
||||
rc, out, err = self._cli.full_signin()
|
||||
self.token = out.strip()
|
||||
|
||||
def assert_logged_in(self):
|
||||
logged_in = self._cli.assert_logged_in()
|
||||
if logged_in:
|
||||
self.logged_in = logged_in
|
||||
pass
|
||||
else:
|
||||
self.set_token()
|
||||
|
||||
def get_raw(self, item_id, vault=None):
|
||||
rc, out, err = self._cli.get_raw(item_id, vault, self.token)
|
||||
return out
|
||||
|
||||
def get_field(self, item_id, field, section=None, vault=None):
|
||||
output = self.get_raw(item_id, vault)
|
||||
if output:
|
||||
return self._cli._parse_field(output, field, section)
|
||||
|
||||
return ""
|
||||
for field_data in section_data.get('fields', []):
|
||||
if field_data.get('t', '').lower() == field_name.lower():
|
||||
return field_data.get('v', '')
|
||||
return ''
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
op = OnePass()
|
||||
|
||||
field = self.get_option("field")
|
||||
section = self.get_option("section")
|
||||
vault = self.get_option("vault")
|
||||
subdomain = self.get_option("subdomain")
|
||||
domain = self.get_option("domain")
|
||||
username = self.get_option("username")
|
||||
secret_key = self.get_option("secret_key")
|
||||
master_password = self.get_option("master_password")
|
||||
field = kwargs.get('field', 'password')
|
||||
section = kwargs.get('section')
|
||||
vault = kwargs.get('vault')
|
||||
op.subdomain = kwargs.get('subdomain')
|
||||
op.domain = kwargs.get('domain', '1password.com')
|
||||
op.username = kwargs.get('username')
|
||||
op.secret_key = kwargs.get('secret_key')
|
||||
op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
|
||||
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password)
|
||||
op.assert_logged_in()
|
||||
|
||||
values = []
|
||||
|
||||
@@ -30,11 +30,6 @@ DOCUMENTATION = '''
|
||||
description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
|
||||
subdomain:
|
||||
description: The 1Password subdomain to authenticate against.
|
||||
domain:
|
||||
description: Domain of 1Password.
|
||||
version_added: 6.0.0
|
||||
default: '1password.com'
|
||||
type: str
|
||||
username:
|
||||
description: The username used to sign in.
|
||||
secret_key:
|
||||
@@ -52,7 +47,7 @@ DOCUMENTATION = '''
|
||||
- This lookup stores potentially sensitive data from 1Password as Ansible facts.
|
||||
Facts are subject to caching if enabled, which means this data could be stored in clear text
|
||||
on disk or in a database.
|
||||
- Tested with C(op) version 2.7.0
|
||||
- Tested with C(op) version 0.5.3
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -81,21 +76,18 @@ from ansible.plugins.lookup import LookupBase
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
op = OnePass()
|
||||
|
||||
vault = self.get_option("vault")
|
||||
subdomain = self.get_option("subdomain")
|
||||
domain = self.get_option("domain", "1password.com")
|
||||
username = self.get_option("username")
|
||||
secret_key = self.get_option("secret_key")
|
||||
master_password = self.get_option("master_password")
|
||||
vault = kwargs.get('vault')
|
||||
op.subdomain = kwargs.get('subdomain')
|
||||
op.username = kwargs.get('username')
|
||||
op.secret_key = kwargs.get('secret_key')
|
||||
op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
|
||||
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password)
|
||||
op.assert_logged_in()
|
||||
|
||||
values = []
|
||||
for term in terms:
|
||||
data = json.loads(op.get_raw(term, vault))
|
||||
values.append(data)
|
||||
|
||||
return values
|
||||
|
||||
@@ -21,15 +21,17 @@ DOCUMENTATION = '''
|
||||
_terms:
|
||||
description: query key.
|
||||
required: true
|
||||
directory:
|
||||
passwordstore:
|
||||
description:
|
||||
- The directory of the password store.
|
||||
- If I(backend=pass), the default is C(~/.password-store) is used.
|
||||
- If I(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
|
||||
falling back to C(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
|
||||
type: path
|
||||
vars:
|
||||
- name: passwordstore
|
||||
- Location of the password store.
|
||||
- 'The value is decided by checking the following in order:'
|
||||
- If set, this value is used.
|
||||
- If C(directory) is set, that value will be used.
|
||||
- If I(backend=pass), then C(~/.password-store) is used.
|
||||
- If I(backend=gopass), then the C(path) field in C(~/.config/gopass/config.yml) is used,
|
||||
falling back to C(~/.local/share/gopass/stores/root) if not defined.
|
||||
directory:
|
||||
description: The directory of the password store.
|
||||
env:
|
||||
- name: PASSWORD_STORE_DIR
|
||||
create:
|
||||
@@ -53,11 +55,9 @@ DOCUMENTATION = '''
|
||||
default: false
|
||||
subkey:
|
||||
description: Return a specific subkey of the password. When set to C(password), always returns the first line.
|
||||
type: str
|
||||
default: password
|
||||
userpass:
|
||||
description: Specify a password to save, instead of a generated one.
|
||||
type: str
|
||||
length:
|
||||
description: The length of the generated password.
|
||||
type: integer
|
||||
@@ -67,7 +67,7 @@ DOCUMENTATION = '''
|
||||
type: bool
|
||||
default: false
|
||||
nosymbols:
|
||||
description: Use alphanumeric characters.
|
||||
description: use alphanumeric characters.
|
||||
type: bool
|
||||
default: false
|
||||
missing:
|
||||
@@ -129,8 +129,6 @@ DOCUMENTATION = '''
|
||||
- pass
|
||||
- gopass
|
||||
version_added: 5.2.0
|
||||
notes:
|
||||
- The lookup supports passing all options as lookup parameters since community.general 6.0.0.
|
||||
'''
|
||||
EXAMPLES = """
|
||||
ansible.cfg: |
|
||||
@@ -138,7 +136,7 @@ ansible.cfg: |
|
||||
lock=readwrite
|
||||
locktimeout=45s
|
||||
|
||||
tasks.yml: |
|
||||
playbook.yml: |
|
||||
---
|
||||
|
||||
# Debug is used for examples, BAD IDEA to show passwords on screen
|
||||
@@ -148,49 +146,45 @@ tasks.yml: |
|
||||
|
||||
- name: Basic lookup. Warns if example/test does not exist and returns empty string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test', missing='warn')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}"
|
||||
|
||||
- name: Create pass with random 16 character password. If password exists just give the password
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test', create=true)}}"
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
|
||||
|
||||
- name: Create pass with random 16 character password. If password exists just give the password
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}"
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}"
|
||||
|
||||
- name: Prints 'abc' if example/test does not exist, just give the password otherwise
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: >-
|
||||
{{ lookup('community.general.passwordstore', 'example/test', missing='empty')
|
||||
| default('abc', true) }}
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}"
|
||||
|
||||
- name: Different size password
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, length=42)}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
|
||||
|
||||
- name: >-
|
||||
Create password and overwrite the password if it exists.
|
||||
As a bonus, this module includes the old password inside the pass file
|
||||
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, overwrite=true)}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
|
||||
|
||||
- name: Create an alphanumeric password
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, nosymbols=true) }}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
|
||||
|
||||
- name: Return the value for user in the KV pair user, username
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test', subkey='user')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
|
||||
|
||||
- name: Return the entire password file content
|
||||
ansible.builtin.set_fact:
|
||||
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}"
|
||||
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -327,7 +321,7 @@ class LookupModule(LookupBase):
|
||||
raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
|
||||
|
||||
# Set PASSWORD_STORE_UMASK if umask is set
|
||||
if self.paramvals.get('umask') is not None:
|
||||
if 'umask' in self.paramvals:
|
||||
if len(self.paramvals['umask']) != 3:
|
||||
raise AnsibleError('Passwordstore umask must have a length of 3.')
|
||||
elif int(self.paramvals['umask'][0]) > 3:
|
||||
@@ -442,7 +436,8 @@ class LookupModule(LookupBase):
|
||||
unit_to_seconds = {"s": 1, "m": 60, "h": 3600}
|
||||
self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]]
|
||||
|
||||
directory = self.get_option('directory')
|
||||
directory = variables.get('passwordstore', os.environ.get('PASSWORD_STORE_DIR', None))
|
||||
|
||||
if directory is None:
|
||||
if self.backend == 'gopass':
|
||||
try:
|
||||
@@ -454,17 +449,16 @@ class LookupModule(LookupBase):
|
||||
directory = os.path.expanduser('~/.password-store')
|
||||
|
||||
self.paramvals = {
|
||||
'subkey': self.get_option('subkey'),
|
||||
'subkey': 'password',
|
||||
'directory': directory,
|
||||
'create': self.get_option('create'),
|
||||
'returnall': self.get_option('returnall'),
|
||||
'overwrite': self.get_option('overwrite'),
|
||||
'nosymbols': self.get_option('nosymbols'),
|
||||
'userpass': self.get_option('userpass') or '',
|
||||
'length': self.get_option('length'),
|
||||
'backup': self.get_option('backup'),
|
||||
'missing': self.get_option('missing'),
|
||||
'umask': self.get_option('umask'),
|
||||
'create': False,
|
||||
'returnall': False,
|
||||
'overwrite': False,
|
||||
'nosymbols': False,
|
||||
'userpass': '',
|
||||
'length': 16,
|
||||
'backup': False,
|
||||
'missing': 'error',
|
||||
}
|
||||
|
||||
def run(self, terms, variables, **kwargs):
|
||||
|
||||
@@ -14,24 +14,23 @@ DOCUMENTATION = '''
|
||||
- Read keys from Python shelve file.
|
||||
options:
|
||||
_terms:
|
||||
description: Sets of key value pairs of parameters.
|
||||
description: sets of key value pairs of parameters
|
||||
key:
|
||||
description: Key to query.
|
||||
description: key to query
|
||||
required: true
|
||||
file:
|
||||
description: Path to shelve file.
|
||||
description: path to shelve file
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Retrieve a string value corresponding to a key inside a Python shelve file
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}"
|
||||
- name: retrieve a string value corresponding to a key inside a Python shelve file
|
||||
ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description: Value(s) of key(s) in shelve file(s).
|
||||
description: value(s) of key(s) in shelve file(s)
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
@@ -54,6 +53,7 @@ class LookupModule(LookupBase):
|
||||
return res
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
if not isinstance(terms, list):
|
||||
terms = [terms]
|
||||
|
||||
|
||||
@@ -26,18 +26,6 @@ options:
|
||||
description: The integer ID of the secret.
|
||||
required: true
|
||||
type: int
|
||||
fetch_attachments:
|
||||
description:
|
||||
- Boolean flag which indicates whether attached files will get downloaded or not.
|
||||
- The download will only happen if I(file_download_path) has been provided.
|
||||
required: false
|
||||
type: bool
|
||||
version_added: 7.0.0
|
||||
file_download_path:
|
||||
description: Indicate the file attachment download location.
|
||||
required: false
|
||||
type: path
|
||||
version_added: 7.0.0
|
||||
base_url:
|
||||
description: The base URL of the server, e.g. C(https://localhost/SecretServer).
|
||||
env:
|
||||
@@ -169,35 +157,10 @@ EXAMPLES = r"""
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: the password is {{ secret_password }}
|
||||
|
||||
# Private key stores into certificate file which is attached with secret.
|
||||
# If fetch_attachments=True then private key file will be download on specified path
|
||||
# and file content will display in debug message.
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
fetch_attachments=True,
|
||||
file_download_path='/home/certs',
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
token='thycotic_access_token'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the private key is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['private-key']
|
||||
}}
|
||||
"""
|
||||
|
||||
import abc
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.module_utils import six
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
@@ -248,27 +211,13 @@ class TSSClient(object):
|
||||
else:
|
||||
return TSSClientV0(**server_parameters)
|
||||
|
||||
def get_secret(self, term, fetch_file_attachments, file_download_path):
|
||||
def get_secret(self, term):
|
||||
display.debug("tss_lookup term: %s" % term)
|
||||
|
||||
secret_id = self._term_to_secret_id(term)
|
||||
display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
|
||||
|
||||
if fetch_file_attachments:
|
||||
obj = self._client.get_secret(secret_id, fetch_file_attachments)
|
||||
for i in obj['items']:
|
||||
if file_download_path and os.path.isdir(file_download_path):
|
||||
if i['isFile']:
|
||||
try:
|
||||
with open(os.path.join(file_download_path, str(obj['id']) + "_" + i['slug']), "w") as f:
|
||||
f.write(i['itemValue'].text)
|
||||
i['itemValue'] = "*** Not Valid For Display ***"
|
||||
except ValueError:
|
||||
raise AnsibleOptionsError("Failed to download {0}".format(str(i['slug'])))
|
||||
else:
|
||||
raise AnsibleOptionsError("File download path does not exist")
|
||||
return obj
|
||||
else:
|
||||
return self._client.get_secret_json(secret_id)
|
||||
return self._client.get_secret_json(secret_id)
|
||||
|
||||
@staticmethod
|
||||
def _term_to_secret_id(term):
|
||||
@@ -345,6 +294,6 @@ class LookupModule(LookupBase):
|
||||
)
|
||||
|
||||
try:
|
||||
return [tss.get_secret(term, self.get_option("fetch_attachments"), self.get_option("file_download_path")) for term in terms]
|
||||
return [tss.get_secret(term) for term in terms]
|
||||
except SecretServerError as error:
|
||||
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||
|
||||
@@ -1,258 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is based on
|
||||
# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py
|
||||
# Copyright (C) <2013> <Emre Yilmaz>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from operator import itemgetter
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from paramiko.config import SSHConfig
|
||||
except ImportError:
|
||||
SSHConfig = object
|
||||
HAS_PARAMIKO = False
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
|
||||
else:
|
||||
HAS_PARAMIKO = True
|
||||
PARAMIKO_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class StormConfig(SSHConfig):
|
||||
def parse(self, file_obj):
|
||||
"""
|
||||
Read an OpenSSH config from the given file object.
|
||||
@param file_obj: a file-like object to read the config file from
|
||||
@type file_obj: file
|
||||
"""
|
||||
order = 1
|
||||
host = {"host": ['*'], "config": {}, }
|
||||
for line in file_obj:
|
||||
line = line.rstrip('\n').lstrip()
|
||||
if line == '':
|
||||
self._config.append({
|
||||
'type': 'empty_line',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
self._config.append({
|
||||
'type': 'comment',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if '=' in line:
|
||||
# Ensure ProxyCommand gets properly split
|
||||
if line.lower().strip().startswith('proxycommand'):
|
||||
proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
|
||||
match = proxy_re.match(line)
|
||||
key, value = match.group(1).lower(), match.group(2)
|
||||
else:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip().lower()
|
||||
else:
|
||||
# find first whitespace, and split there
|
||||
i = 0
|
||||
while (i < len(line)) and not line[i].isspace():
|
||||
i += 1
|
||||
if i == len(line):
|
||||
raise Exception('Unparsable line: %r' % line)
|
||||
key = line[:i].lower()
|
||||
value = line[i:].lstrip()
|
||||
if key == 'host':
|
||||
self._config.append(host)
|
||||
value = value.split()
|
||||
host = {
|
||||
key: value,
|
||||
'config': {},
|
||||
'type': 'entry',
|
||||
'order': order
|
||||
}
|
||||
order += 1
|
||||
elif key in ['identityfile', 'localforward', 'remoteforward']:
|
||||
if key in host['config']:
|
||||
host['config'][key].append(value)
|
||||
else:
|
||||
host['config'][key] = [value]
|
||||
elif key not in host['config']:
|
||||
host['config'].update({key: value})
|
||||
self._config.append(host)
|
||||
|
||||
|
||||
class ConfigParser(object):
|
||||
"""
|
||||
Config parser for ~/.ssh/config files.
|
||||
"""
|
||||
|
||||
def __init__(self, ssh_config_file=None):
|
||||
if not ssh_config_file:
|
||||
ssh_config_file = self.get_default_ssh_config_file()
|
||||
|
||||
self.defaults = {}
|
||||
|
||||
self.ssh_config_file = ssh_config_file
|
||||
|
||||
if not os.path.exists(self.ssh_config_file):
|
||||
if not os.path.exists(os.path.dirname(self.ssh_config_file)):
|
||||
os.makedirs(os.path.dirname(self.ssh_config_file))
|
||||
open(self.ssh_config_file, 'w+').close()
|
||||
os.chmod(self.ssh_config_file, 0o600)
|
||||
|
||||
self.config_data = []
|
||||
|
||||
def get_default_ssh_config_file(self):
|
||||
return os.path.expanduser("~/.ssh/config")
|
||||
|
||||
def load(self):
|
||||
config = StormConfig()
|
||||
|
||||
with open(self.ssh_config_file) as fd:
|
||||
config.parse(fd)
|
||||
|
||||
for entry in config.__dict__.get("_config"):
|
||||
if entry.get("host") == ["*"]:
|
||||
self.defaults.update(entry.get("config"))
|
||||
|
||||
if entry.get("type") in ["comment", "empty_line"]:
|
||||
self.config_data.append(entry)
|
||||
continue
|
||||
|
||||
host_item = {
|
||||
'host': entry["host"][0],
|
||||
'options': entry.get("config"),
|
||||
'type': 'entry',
|
||||
'order': entry.get("order", 0),
|
||||
}
|
||||
|
||||
if len(entry["host"]) > 1:
|
||||
host_item.update({
|
||||
'host': " ".join(entry["host"]),
|
||||
})
|
||||
# minor bug in paramiko.SSHConfig that duplicates
|
||||
# "Host *" entries.
|
||||
if entry.get("config") and len(entry.get("config")) > 0:
|
||||
self.config_data.append(host_item)
|
||||
|
||||
return self.config_data
|
||||
|
||||
def add_host(self, host, options):
|
||||
self.config_data.append({
|
||||
'host': host,
|
||||
'options': options,
|
||||
'order': self.get_last_index(),
|
||||
})
|
||||
|
||||
return self
|
||||
|
||||
def update_host(self, host, options, use_regex=False):
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host or \
|
||||
(use_regex and re.match(host, host_entry.get("host"))):
|
||||
|
||||
if 'deleted_fields' in options:
|
||||
deleted_fields = options.pop("deleted_fields")
|
||||
for deleted_field in deleted_fields:
|
||||
del self.config_data[index]["options"][deleted_field]
|
||||
|
||||
self.config_data[index]["options"].update(options)
|
||||
|
||||
return self
|
||||
|
||||
def search_host(self, search_string):
|
||||
results = []
|
||||
for host_entry in self.config_data:
|
||||
if host_entry.get("type") != 'entry':
|
||||
continue
|
||||
if host_entry.get("host") == "*":
|
||||
continue
|
||||
|
||||
searchable_information = host_entry.get("host")
|
||||
for key, value in host_entry.get("options").items():
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
if isinstance(value, int):
|
||||
value = str(value)
|
||||
|
||||
searchable_information += " " + value
|
||||
|
||||
if search_string in searchable_information:
|
||||
results.append(host_entry)
|
||||
|
||||
return results
|
||||
|
||||
def delete_host(self, host):
|
||||
found = 0
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host:
|
||||
del self.config_data[index]
|
||||
found += 1
|
||||
|
||||
if found == 0:
|
||||
raise ValueError('No host found')
|
||||
return self
|
||||
|
||||
def delete_all_hosts(self):
|
||||
self.config_data = []
|
||||
self.write_to_ssh_config()
|
||||
|
||||
return self
|
||||
|
||||
def dump(self):
|
||||
if len(self.config_data) < 1:
|
||||
return
|
||||
|
||||
file_content = ""
|
||||
self.config_data = sorted(self.config_data, key=itemgetter("order"))
|
||||
|
||||
for host_item in self.config_data:
|
||||
if host_item.get("type") in ['comment', 'empty_line']:
|
||||
file_content += host_item.get("value") + "\n"
|
||||
continue
|
||||
host_item_content = "Host {0}\n".format(host_item.get("host"))
|
||||
for key, value in host_item.get("options").items():
|
||||
if isinstance(value, list):
|
||||
sub_content = ""
|
||||
for value_ in value:
|
||||
sub_content += " {0} {1}\n".format(
|
||||
key, value_
|
||||
)
|
||||
host_item_content += sub_content
|
||||
else:
|
||||
host_item_content += " {0} {1}\n".format(
|
||||
key, value
|
||||
)
|
||||
file_content += host_item_content
|
||||
|
||||
return file_content
|
||||
|
||||
def write_to_ssh_config(self):
|
||||
with open(self.ssh_config_file, 'w+') as f:
|
||||
data = self.dump()
|
||||
if data:
|
||||
f.write(data)
|
||||
return self
|
||||
|
||||
def get_last_index(self):
|
||||
last_index = 0
|
||||
indexes = []
|
||||
for item in self.config_data:
|
||||
if item.get("order"):
|
||||
indexes.append(item.get("order"))
|
||||
if len(indexes) > 0:
|
||||
last_index = max(indexes)
|
||||
|
||||
return last_index
|
||||
@@ -1,464 +0,0 @@
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def normalize_subvolume_path(path):
|
||||
"""
|
||||
Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
|
||||
In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
|
||||
"""
|
||||
fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
|
||||
result = re.sub(r'/+$', '', re.sub(r'/+', '/', '/' + fstree_stripped))
|
||||
return result if len(result) > 0 else '/'
|
||||
|
||||
|
||||
class BtrfsModuleException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BtrfsCommands(object):
|
||||
|
||||
"""
|
||||
Provides access to a subset of the Btrfs command line
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs = self.__module.get_bin_path("btrfs", required=True)
|
||||
|
||||
def filesystem_show(self):
|
||||
command = "%s filesystem show -d" % (self.__btrfs)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.strip() for x in result[1].splitlines()]
|
||||
filesystems = []
|
||||
current = None
|
||||
for line in stdout:
|
||||
if line.startswith('Label'):
|
||||
current = self.__parse_filesystem(line)
|
||||
filesystems.append(current)
|
||||
elif line.startswith('devid'):
|
||||
current['devices'].append(self.__parse_filesystem_device(line))
|
||||
return filesystems
|
||||
|
||||
def __parse_filesystem(self, line):
|
||||
label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
|
||||
id = re.sub(r'^.*uuid:\s*', '', line)
|
||||
|
||||
filesystem = {}
|
||||
filesystem['label'] = label.strip("'") if label != 'none' else None
|
||||
filesystem['uuid'] = id
|
||||
filesystem['devices'] = []
|
||||
filesystem['mountpoints'] = []
|
||||
filesystem['subvolumes'] = []
|
||||
filesystem['default_subvolid'] = None
|
||||
return filesystem
|
||||
|
||||
def __parse_filesystem_device(self, line):
|
||||
return re.sub(r'^.*path\s', '', line)
|
||||
|
||||
def subvolumes_list(self, filesystem_path):
|
||||
command = "%s subvolume list -tap %s" % (self.__btrfs, filesystem_path)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.split('\t') for x in result[1].splitlines()]
|
||||
subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
|
||||
if len(stdout) > 2:
|
||||
subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
|
||||
return subvolumes
|
||||
|
||||
def __parse_subvolume_list_record(self, item):
|
||||
return {
|
||||
'id': int(item[0]),
|
||||
'parent': int(item[2]),
|
||||
'path': normalize_subvolume_path(item[5]),
|
||||
}
|
||||
|
||||
def subvolume_get_default(self, filesystem_path):
|
||||
command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
# ID [n] ...
|
||||
return int(result[1].strip().split()[1])
|
||||
|
||||
def subvolume_set_default(self, filesystem_path, subvolume_id):
|
||||
command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_create(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_snapshot(self, snapshot_source, snapshot_destination):
|
||||
command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_delete(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
|
||||
class BtrfsInfoProvider(object):
|
||||
|
||||
"""
|
||||
Utility providing details of the currently available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True)
|
||||
|
||||
def get_filesystems(self):
|
||||
filesystems = self.__btrfs_api.filesystem_show()
|
||||
mountpoints = self.__find_mountpoints()
|
||||
for filesystem in filesystems:
|
||||
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
|
||||
filesystem['mountpoints'] = device_mountpoints
|
||||
|
||||
if len(device_mountpoints) > 0:
|
||||
|
||||
# any path within the filesystem can be used to query metadata
|
||||
mountpoint = device_mountpoints[0]['mountpoint']
|
||||
filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
|
||||
filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
|
||||
|
||||
return filesystems
|
||||
|
||||
def get_mountpoints(self, filesystem_devices):
|
||||
mountpoints = self.__find_mountpoints()
|
||||
return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices)
|
||||
|
||||
def get_subvolumes(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolumes_list(filesystem_path)
|
||||
|
||||
def get_default_subvolume_id(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolume_get_default(filesystem_path)
|
||||
|
||||
def __filter_mountpoints_for_devices(self, mountpoints, devices):
|
||||
return [m for m in mountpoints if (m['device'] in devices)]
|
||||
|
||||
def __find_mountpoints(self):
|
||||
command = "%s -t btrfs -nvP" % self.__findmnt_path
|
||||
result = self.__module.run_command(command)
|
||||
mountpoints = []
|
||||
if result[0] == 0:
|
||||
lines = result[1].splitlines()
|
||||
for line in lines:
|
||||
mountpoint = self.__parse_mountpoint_pairs(line)
|
||||
mountpoints.append(mountpoint)
|
||||
return mountpoints
|
||||
|
||||
def __parse_mountpoint_pairs(self, line):
|
||||
pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
|
||||
match = pattern.search(line)
|
||||
if match is not None:
|
||||
groups = match.groupdict()
|
||||
|
||||
return {
|
||||
'mountpoint': groups['target'],
|
||||
'device': groups['source'],
|
||||
'subvolid': self.__extract_mount_subvolid(groups['options']),
|
||||
}
|
||||
else:
|
||||
raise BtrfsModuleException("Failed to parse findmnt result for line: '%s'" % line)
|
||||
|
||||
def __extract_mount_subvolid(self, mount_options):
|
||||
for option in mount_options.split(','):
|
||||
if option.startswith('subvolid='):
|
||||
return int(option[len('subvolid='):])
|
||||
raise BtrfsModuleException("Failed to find subvolid for mountpoint in options '%s'" % mount_options)
|
||||
|
||||
|
||||
class BtrfsSubvolume(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs subvolume
|
||||
"""
|
||||
|
||||
def __init__(self, filesystem, subvolume_id):
|
||||
self.__filesystem = filesystem
|
||||
self.__subvolume_id = subvolume_id
|
||||
|
||||
def get_filesystem(self):
|
||||
return self.__filesystem
|
||||
|
||||
def is_mounted(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
return mountpoints is not None and len(mountpoints) > 0
|
||||
|
||||
def is_filesystem_root(self):
|
||||
return 5 == self.__subvolume_id
|
||||
|
||||
def is_filesystem_default(self):
|
||||
return self.__filesystem.default_subvolid == self.__subvolume_id
|
||||
|
||||
def get_mounted_path(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
if mountpoints is not None and len(mountpoints) > 0:
|
||||
return mountpoints[0]
|
||||
elif self.parent is not None:
|
||||
parent = self.__filesystem.get_subvolume_by_id(self.parent)
|
||||
parent_path = parent.get_mounted_path()
|
||||
if parent_path is not None:
|
||||
return parent_path + os.path.sep + self.name
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_mountpoints(self):
|
||||
return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id)
|
||||
|
||||
def get_child_relative_path(self, absolute_child_path):
|
||||
"""
|
||||
Get the relative path from this subvolume to the named child subvolume.
|
||||
The provided parameter is expected to be normalized as by normalize_subvolume_path.
|
||||
"""
|
||||
path = self.path
|
||||
if absolute_child_path.startswith(path):
|
||||
relative = absolute_child_path[len(path):]
|
||||
return re.sub(r'^/*', '', relative)
|
||||
else:
|
||||
raise BtrfsModuleException("Path '%s' doesn't start with '%s'" % (absolute_child_path, path))
|
||||
|
||||
def get_parent_subvolume(self):
|
||||
parent_id = self.parent
|
||||
return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None
|
||||
|
||||
def get_child_subvolumes(self):
|
||||
return self.__filesystem.get_subvolume_children(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def __info(self):
|
||||
return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.__subvolume_id
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.path.split('/').pop()
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.__info['path']
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self.__info['parent']
|
||||
|
||||
|
||||
class BtrfsFilesystem(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs filesystem
|
||||
"""
|
||||
|
||||
def __init__(self, info, provider, module):
|
||||
self.__provider = provider
|
||||
|
||||
# constant for module execution
|
||||
self.__uuid = info['uuid']
|
||||
self.__label = info['label']
|
||||
self.__devices = info['devices']
|
||||
|
||||
# refreshable
|
||||
self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
|
||||
self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
|
||||
self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.__uuid
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.__label
|
||||
|
||||
@property
|
||||
def default_subvolid(self):
|
||||
return self.__default_subvolid
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
return list(self.__devices)
|
||||
|
||||
def refresh(self):
|
||||
self.refresh_mountpoints()
|
||||
self.refresh_subvolumes()
|
||||
self.refresh_default_subvolume()
|
||||
|
||||
def refresh_mountpoints(self):
|
||||
mountpoints = self.__provider.get_mountpoints(list(self.__devices))
|
||||
self.__update_mountpoints(mountpoints)
|
||||
|
||||
def __update_mountpoints(self, mountpoints):
|
||||
self.__mountpoints = dict()
|
||||
for i in mountpoints:
|
||||
subvolid = i['subvolid']
|
||||
mountpoint = i['mountpoint']
|
||||
if subvolid not in self.__mountpoints:
|
||||
self.__mountpoints[subvolid] = []
|
||||
self.__mountpoints[subvolid].append(mountpoint)
|
||||
|
||||
def refresh_subvolumes(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
subvolumes = self.__provider.get_subvolumes(filesystem_path)
|
||||
self.__update_subvolumes(subvolumes)
|
||||
|
||||
def __update_subvolumes(self, subvolumes):
|
||||
# TODO strategy for retaining information on deleted subvolumes?
|
||||
self.__subvolumes = dict()
|
||||
for subvolume in subvolumes:
|
||||
self.__subvolumes[subvolume['id']] = subvolume
|
||||
|
||||
def refresh_default_subvolume(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path)
|
||||
|
||||
def contains_device(self, device):
|
||||
return device in self.__devices
|
||||
|
||||
def contains_subvolume(self, subvolume):
|
||||
return self.get_subvolume_by_name(subvolume) is not None
|
||||
|
||||
def get_subvolume_by_id(self, subvolume_id):
|
||||
return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_info_for_id(self, subvolume_id):
|
||||
return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_by_name(self, subvolume):
|
||||
for subvolume_info in self.__subvolumes.values():
|
||||
if subvolume_info['path'] == subvolume:
|
||||
return BtrfsSubvolume(self, subvolume_info['id'])
|
||||
return None
|
||||
|
||||
def get_any_mountpoint(self):
|
||||
for subvol_mountpoints in self.__mountpoints.values():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return subvol_mountpoints[0]
|
||||
# maybe error?
|
||||
return None
|
||||
|
||||
def get_any_mounted_subvolume(self):
|
||||
for subvolid, subvol_mountpoints in self.__mountpoints.items():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return self.get_subvolume_by_id(subvolid)
|
||||
return None
|
||||
|
||||
def get_mountpoints_by_subvolume_id(self, subvolume_id):
|
||||
return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else []
|
||||
|
||||
def get_nearest_subvolume(self, subvolume):
|
||||
"""Return the identified subvolume if existing, else the closest matching parent"""
|
||||
subvolumes_by_path = self.__get_subvolumes_by_path()
|
||||
while len(subvolume) > 1:
|
||||
if subvolume in subvolumes_by_path:
|
||||
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
|
||||
else:
|
||||
subvolume = re.sub(r'/[^/]+$', '', subvolume)
|
||||
|
||||
return BtrfsSubvolume(self, 5)
|
||||
|
||||
def get_mountpath_as_child(self, subvolume_name):
|
||||
"""Find a path to the target subvolume through a mounted ancestor"""
|
||||
nearest = self.get_nearest_subvolume(subvolume_name)
|
||||
if nearest.path == subvolume_name:
|
||||
nearest = nearest.get_parent_subvolume()
|
||||
if nearest is None or nearest.get_mounted_path() is None:
|
||||
raise BtrfsModuleException("Failed to find a path '%s' through a mounted parent subvolume" % subvolume_name)
|
||||
else:
|
||||
return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
|
||||
|
||||
def get_subvolume_children(self, subvolume_id):
|
||||
return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
|
||||
|
||||
def __get_subvolumes_by_path(self):
|
||||
result = {}
|
||||
for s in self.__subvolumes.values():
|
||||
path = s['path']
|
||||
result[path] = s
|
||||
return result
|
||||
|
||||
def is_mounted(self):
|
||||
return self.__mountpoints is not None and len(self.__mountpoints) > 0
|
||||
|
||||
def get_summary(self):
|
||||
subvolumes = []
|
||||
sources = self.__subvolumes.values() if self.__subvolumes is not None else []
|
||||
for subvolume in sources:
|
||||
id = subvolume['id']
|
||||
subvolumes.append({
|
||||
'id': id,
|
||||
'path': subvolume['path'],
|
||||
'parent': subvolume['parent'],
|
||||
'mountpoints': self.get_mountpoints_by_subvolume_id(id),
|
||||
})
|
||||
|
||||
return {
|
||||
'default_subvolume': self.__default_subvolid,
|
||||
'devices': self.__devices,
|
||||
'label': self.__label,
|
||||
'uuid': self.__uuid,
|
||||
'subvolumes': subvolumes,
|
||||
}
|
||||
|
||||
|
||||
class BtrfsFilesystemsProvider(object):
|
||||
|
||||
"""
|
||||
Provides methods to query available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__provider = BtrfsInfoProvider(module)
|
||||
self.__filesystems = None
|
||||
|
||||
def get_matching_filesystem(self, criteria):
|
||||
if criteria['device'] is not None:
|
||||
criteria['device'] = os.path.realpath(criteria['device'])
|
||||
|
||||
self.__check_init()
|
||||
matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
|
||||
if len(matching) == 1:
|
||||
return matching[0]
|
||||
else:
|
||||
raise BtrfsModuleException("Found %d filesystems matching criteria uuid=%s label=%s device=%s" % (
|
||||
len(matching),
|
||||
criteria['uuid'],
|
||||
criteria['label'],
|
||||
criteria['device']
|
||||
))
|
||||
|
||||
def __filesystem_matches_criteria(self, filesystem, criteria):
|
||||
return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
|
||||
(criteria['label'] is None or filesystem.label == criteria['label']) and
|
||||
(criteria['device'] is None or filesystem.contains_device(criteria['device'])))
|
||||
|
||||
def get_filesystem_for_device(self, device):
|
||||
real_device = os.path.realpath(device)
|
||||
self.__check_init()
|
||||
for fs in self.__filesystems.values():
|
||||
if fs.contains_device(real_device):
|
||||
return fs
|
||||
return None
|
||||
|
||||
def get_filesystems(self):
|
||||
self.__check_init()
|
||||
return list(self.__filesystems.values())
|
||||
|
||||
def __check_init(self):
|
||||
if self.__filesystems is None:
|
||||
self.__filesystems = dict()
|
||||
for f in self.__provider.get_filesystems():
|
||||
uuid = f['uuid']
|
||||
self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)
|
||||
@@ -103,13 +103,8 @@ class _ArgFormat(object):
|
||||
|
||||
class _Format(object):
|
||||
@staticmethod
|
||||
def as_bool(args_true, args_false=None, ignore_none=None):
|
||||
if args_false is not None:
|
||||
if ignore_none is None:
|
||||
ignore_none = False
|
||||
else:
|
||||
args_false = []
|
||||
return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none)
|
||||
def as_bool(args):
|
||||
return _ArgFormat(lambda value: _ensure_list(args) if value else [])
|
||||
|
||||
@staticmethod
|
||||
def as_bool_not(args):
|
||||
@@ -309,3 +304,11 @@ class _CmdRunnerContext(object):
|
||||
|
||||
|
||||
cmd_runner_fmt = _Format()
|
||||
|
||||
#
|
||||
# The fmt form is deprecated and will be removed in community.general 7.0.0
|
||||
# Please use:
|
||||
# cmd_runner_fmt
|
||||
# Or, to retain the same effect, use:
|
||||
# from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt as fmt
|
||||
fmt = cmd_runner_fmt
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright (c) 2022, Ansible Project
|
||||
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
|
||||
|
||||
_deps = dict()
|
||||
|
||||
|
||||
class _Dependency(object):
|
||||
_states = ["pending", "failure", "success"]
|
||||
|
||||
def __init__(self, name, reason=None, url=None, msg=None):
|
||||
self.name = name
|
||||
self.reason = reason
|
||||
self.url = url
|
||||
self.msg = msg
|
||||
|
||||
self.state = 0
|
||||
self.trace = None
|
||||
self.exc = None
|
||||
|
||||
def succeed(self):
|
||||
self.state = 2
|
||||
|
||||
def fail(self, exc, trace):
|
||||
self.state = 1
|
||||
self.exc = exc
|
||||
self.trace = trace
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.msg:
|
||||
return to_native(self.msg)
|
||||
else:
|
||||
return missing_required_lib(self.name, reason=self.reason, url=self.url)
|
||||
|
||||
@property
|
||||
def failed(self):
|
||||
return self.state == 1
|
||||
|
||||
def validate(self, module):
|
||||
if self.failed:
|
||||
module.fail_json(msg=self.message, exception=self.trace)
|
||||
|
||||
def __str__(self):
|
||||
return "<dependency: {0} [{1}]>".format(self.name, self._states[self.state])
|
||||
|
||||
|
||||
@contextmanager
|
||||
def declare(name, *args, **kwargs):
|
||||
dep = _Dependency(name, *args, **kwargs)
|
||||
try:
|
||||
yield dep
|
||||
except Exception as e:
|
||||
dep.fail(e, traceback.format_exc())
|
||||
else:
|
||||
dep.succeed()
|
||||
finally:
|
||||
_deps[name] = dep
|
||||
|
||||
|
||||
def _select_names(spec):
|
||||
dep_names = sorted(_deps)
|
||||
|
||||
if spec:
|
||||
if spec.startswith("-"):
|
||||
spec_split = spec[1:].split(":")
|
||||
for d in spec_split:
|
||||
dep_names.remove(d)
|
||||
else:
|
||||
spec_split = spec.split(":")
|
||||
dep_names = []
|
||||
for d in spec_split:
|
||||
_deps[d] # ensure it exists
|
||||
dep_names.append(d)
|
||||
|
||||
return dep_names
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
for dep in _select_names(spec):
|
||||
_deps[dep].validate(module)
|
||||
|
||||
|
||||
def failed(spec=None):
|
||||
return any(_deps[d].failed for d in _select_names(spec))
|
||||
@@ -6,14 +6,7 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
_state_map = {
|
||||
"present": "--set",
|
||||
"absent": "--unset",
|
||||
"get": "--get",
|
||||
}
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
|
||||
|
||||
|
||||
def gconftool2_runner(module, **kwargs):
|
||||
@@ -21,12 +14,14 @@ def gconftool2_runner(module, **kwargs):
|
||||
module,
|
||||
command='gconftool-2',
|
||||
arg_formats=dict(
|
||||
state=cmd_runner_fmt.as_map(_state_map),
|
||||
key=cmd_runner_fmt.as_list(),
|
||||
value_type=cmd_runner_fmt.as_opt_val("--type"),
|
||||
value=cmd_runner_fmt.as_list(),
|
||||
direct=cmd_runner_fmt.as_bool("--direct"),
|
||||
config_source=cmd_runner_fmt.as_opt_val("--config-source"),
|
||||
key=fmt.as_list(),
|
||||
value_type=fmt.as_opt_val("--type"),
|
||||
value=fmt.as_list(),
|
||||
direct=fmt.as_bool("--direct"),
|
||||
config_source=fmt.as_opt_val("--config-source"),
|
||||
get=fmt.as_bool("--get"),
|
||||
set_arg=fmt.as_bool("--set"),
|
||||
unset=fmt.as_bool("--unset"),
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@@ -10,7 +10,6 @@ __metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import integer_types, string_types
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
@@ -122,38 +121,3 @@ def filter_returned_variables(gitlab_variables):
|
||||
if key not in KNOWN:
|
||||
item.pop(key)
|
||||
return existing_variables
|
||||
|
||||
|
||||
def vars_to_variables(vars, module):
|
||||
# transform old vars to new variables structure
|
||||
variables = list()
|
||||
for item, value in vars.items():
|
||||
if isinstance(value, (string_types, integer_types, float)):
|
||||
variables.append(
|
||||
{
|
||||
"name": item,
|
||||
"value": str(value),
|
||||
"masked": False,
|
||||
"protected": False,
|
||||
"variable_type": "env_var",
|
||||
}
|
||||
)
|
||||
|
||||
elif isinstance(value, dict):
|
||||
new_item = {
|
||||
"name": item,
|
||||
"value": value.get('value'),
|
||||
"masked": value.get('masked'),
|
||||
"protected": value.get('protected'),
|
||||
"variable_type": value.get('variable_type'),
|
||||
}
|
||||
|
||||
if value.get('environment_scope'):
|
||||
new_item['environment_scope'] = value.get('environment_scope')
|
||||
|
||||
variables.append(new_item)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="value must be of type string, integer, float or dict")
|
||||
|
||||
return variables
|
||||
|
||||
@@ -42,23 +42,12 @@ URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
|
||||
URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
|
||||
URL_GROUPS = "{url}/admin/realms/{realm}/groups"
|
||||
URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
|
||||
URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children"
|
||||
|
||||
URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes"
|
||||
URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes"
|
||||
URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}"
|
||||
URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes"
|
||||
URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes"
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
@@ -69,8 +58,6 @@ URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappi
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite"
|
||||
|
||||
URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret"
|
||||
|
||||
URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
|
||||
URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}"
|
||||
URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy"
|
||||
@@ -90,9 +77,6 @@ URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/ins
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -1176,177 +1160,6 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
|
||||
% (mapper_rep, realm, str(e)))
|
||||
|
||||
def get_default_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The default clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'default', client_id)
|
||||
|
||||
def get_optional_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The optinal clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
|
||||
|
||||
def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param url_template the template for the right type
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The clientscopes of the specified type of this realm
|
||||
"""
|
||||
if client_id is None:
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
|
||||
else:
|
||||
cid = self.get_client_id(client_id=client_id, realm=realm)
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
|
||||
|
||||
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
|
||||
"""Decides which url to use.
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
if client_id is None:
|
||||
if scope_type == "default":
|
||||
return URL_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_OPTIONAL_CLIENTSCOPE
|
||||
else:
|
||||
if scope_type == "default":
|
||||
return URL_CLIENT_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_CLIENT_OPTIONAL_CLIENTSCOPE
|
||||
|
||||
def add_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'add')
|
||||
|
||||
def add_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'add')
|
||||
|
||||
def delete_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'delete')
|
||||
|
||||
def delete_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'delete')
|
||||
|
||||
def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'):
|
||||
""" Delete or add a clientscope of type.
|
||||
:param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
|
||||
:param client_id: The ID of the clientscope (preferred to name).
|
||||
:param scope_type 'default' or 'optional'
|
||||
:param realm: The realm in which this group resides, default "master".
|
||||
"""
|
||||
cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm)
|
||||
# should have a good cid by here.
|
||||
clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
|
||||
try:
|
||||
method = 'PUT' if action == "add" else 'DELETE'
|
||||
return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
except Exception as e:
|
||||
place = 'realm' if client_id is None else 'client ' + client_id
|
||||
self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
|
||||
|
||||
def create_clientsecret(self, id, realm="master"):
|
||||
""" Generate a new client secret by id
|
||||
|
||||
:param id: id (not clientId) of client to be queried
|
||||
:param realm: client from this realm
|
||||
:return: dict of credential representation
|
||||
"""
|
||||
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
|
||||
% (id, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
|
||||
% (id, realm, str(e)))
|
||||
|
||||
def get_clientsecret(self, id, realm="master"):
|
||||
""" Obtain client secret by id
|
||||
|
||||
:param id: id (not clientId) of client to be queried
|
||||
:param realm: client from this realm
|
||||
:return: dict of credential representation
|
||||
"""
|
||||
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
|
||||
% (id, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
|
||||
% (id, realm, str(e)))
|
||||
|
||||
def get_groups(self, realm="master"):
|
||||
""" Fetch the name and ID of all groups on the Keycloak server.
|
||||
|
||||
@@ -1388,7 +1201,7 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
|
||||
% (gid, realm, str(e)))
|
||||
|
||||
def get_group_by_name(self, name, realm="master", parents=None):
|
||||
def get_group_by_name(self, name, realm="master"):
|
||||
""" Fetch a keycloak group within a realm based on its name.
|
||||
|
||||
The Keycloak API does not allow filtering of the Groups resource by name.
|
||||
@@ -1398,19 +1211,10 @@ class KeycloakAPI(object):
|
||||
If the group does not exist, None is returned.
|
||||
:param name: Name of the group to fetch.
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
:param parents: Optional list of parents when group to look for is a subgroup
|
||||
"""
|
||||
groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
if parents:
|
||||
parent = self.get_subgroup_direct_parent(parents, realm)
|
||||
|
||||
if not parent:
|
||||
return None
|
||||
|
||||
all_groups = parent['subGroups']
|
||||
else:
|
||||
all_groups = self.get_groups(realm=realm)
|
||||
all_groups = self.get_groups(realm=realm)
|
||||
|
||||
for group in all_groups:
|
||||
if group['name'] == name:
|
||||
@@ -1422,102 +1226,6 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
|
||||
% (name, realm, str(e)))
|
||||
|
||||
def _get_normed_group_parent(self, parent):
|
||||
""" Converts parent dict information into a more easy to use form.
|
||||
|
||||
:param parent: parent describing dict
|
||||
"""
|
||||
if parent['id']:
|
||||
return (parent['id'], True)
|
||||
|
||||
return (parent['name'], False)
|
||||
|
||||
def get_subgroup_by_chain(self, name_chain, realm="master"):
|
||||
""" Access a subgroup API object by walking down a given name/id chain.
|
||||
|
||||
Groups can be given either as by name or by ID, the first element
|
||||
must either be a toplvl group or given as ID, all parents must exist.
|
||||
|
||||
If the group cannot be found, None is returned.
|
||||
:param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
"""
|
||||
cp = name_chain[0]
|
||||
|
||||
# for 1st parent in chain we must query the server
|
||||
cp, is_id = self._get_normed_group_parent(cp)
|
||||
|
||||
if is_id:
|
||||
tmp = self.get_group_by_groupid(cp, realm=realm)
|
||||
else:
|
||||
# given as name, assume toplvl group
|
||||
tmp = self.get_group_by_name(cp, realm=realm)
|
||||
|
||||
if not tmp:
|
||||
return None
|
||||
|
||||
for p in name_chain[1:]:
|
||||
for sg in tmp['subGroups']:
|
||||
pv, is_id = self._get_normed_group_parent(p)
|
||||
|
||||
if is_id:
|
||||
cmpkey = "id"
|
||||
else:
|
||||
cmpkey = "name"
|
||||
|
||||
if pv == sg[cmpkey]:
|
||||
tmp = sg
|
||||
break
|
||||
|
||||
if not tmp:
|
||||
return None
|
||||
|
||||
return tmp
|
||||
|
||||
def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None):
|
||||
""" Get keycloak direct parent group API object for a given chain of parents.
|
||||
|
||||
To succesfully work the API for subgroups we actually dont need
|
||||
to "walk the whole tree" for nested groups but only need to know
|
||||
the ID for the direct predecessor of current subgroup. This
|
||||
method will guarantee us this information getting there with
|
||||
as minimal work as possible.
|
||||
|
||||
Note that given parent list can and might be incomplete at the
|
||||
upper levels as long as it starts with an ID instead of a name
|
||||
|
||||
If the group does not exist, None is returned.
|
||||
:param parents: Topdown ordered list of subgroup parents
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
"""
|
||||
if children_to_resolve is None:
|
||||
# start recursion by reversing parents (in optimal cases
|
||||
# we dont need to walk the whole tree upwarts)
|
||||
parents = list(reversed(parents))
|
||||
children_to_resolve = []
|
||||
|
||||
if not parents:
|
||||
# walk complete parents list to the top, all names, no id's,
|
||||
# try to resolve it assuming list is complete and 1st
|
||||
# element is a toplvl group
|
||||
return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm)
|
||||
|
||||
cp = parents[0]
|
||||
unused, is_id = self._get_normed_group_parent(cp)
|
||||
|
||||
if is_id:
|
||||
# current parent is given as ID, we can stop walking
|
||||
# upwards searching for an entry point
|
||||
return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm)
|
||||
else:
|
||||
# current parent is given as name, it must be resolved
|
||||
# later, try next parent (recurse)
|
||||
children_to_resolve.append(cp)
|
||||
return self.get_subgroup_direct_parent(
|
||||
parents[1:],
|
||||
realm=realm, children_to_resolve=children_to_resolve
|
||||
)
|
||||
|
||||
def create_group(self, grouprep, realm="master"):
|
||||
""" Create a Keycloak group.
|
||||
|
||||
@@ -1532,34 +1240,6 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not create group %s in realm %s: %s"
|
||||
% (grouprep['name'], realm, str(e)))
|
||||
|
||||
def create_subgroup(self, parents, grouprep, realm="master"):
|
||||
""" Create a Keycloak subgroup.
|
||||
|
||||
:param parents: list of one or more parent groups
|
||||
:param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
parent_id = "---UNDETERMINED---"
|
||||
try:
|
||||
parent_id = self.get_subgroup_direct_parent(parents, realm)
|
||||
|
||||
if not parent_id:
|
||||
raise Exception(
|
||||
"Could not determine subgroup parent ID for given"
|
||||
" parent chain {0}. Assure that all parents exist"
|
||||
" already and the list is complete and properly"
|
||||
" ordered, starts with an ID or starts at the"
|
||||
" top level".format(parents)
|
||||
)
|
||||
|
||||
parent_id = parent_id["id"]
|
||||
url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id)
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(grouprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not create subgroup %s for parent group %s in realm %s: %s"
|
||||
% (grouprep['name'], parent_id, realm, str(e)))
|
||||
|
||||
def update_group(self, grouprep, realm="master"):
|
||||
""" Update an existing group.
|
||||
|
||||
@@ -1933,9 +1613,6 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(updatedExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
|
||||
(flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
|
||||
|
||||
@@ -1960,7 +1637,7 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
|
||||
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master'):
|
||||
""" Create new sublow on the flow
|
||||
|
||||
:param subflowName: name of the subflow to create
|
||||
@@ -1971,7 +1648,7 @@ class KeycloakAPI(object):
|
||||
newSubFlow = {}
|
||||
newSubFlow["alias"] = subflowName
|
||||
newSubFlow["provider"] = "registration-page-form"
|
||||
newSubFlow["type"] = flowType
|
||||
newSubFlow["type"] = "basic-flow"
|
||||
open_url(
|
||||
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
|
||||
url=self.baseurl,
|
||||
@@ -2006,11 +1683,8 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(newExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
|
||||
(flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
|
||||
self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e)))
|
||||
|
||||
def change_execution_priority(self, executionId, diff, realm='master'):
|
||||
""" Raise or lower execution priority of diff time
|
||||
@@ -2334,44 +2008,3 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
search_url = "%s/search?name=%s" % (url, quote(name))
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def create_authz_authorization_scope(self, payload, client_id, realm):
|
||||
"""Create an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def update_authz_authorization_scope(self, payload, id, client_id, realm):
|
||||
"""Update an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def remove_authz_authorization_scope(self, id, client_id, realm):
|
||||
"""Remove an authorization scope from a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022, John Cant <a.johncant@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
|
||||
keycloak_argument_spec
|
||||
|
||||
|
||||
def keycloak_clientsecret_module():
|
||||
"""
|
||||
Returns an AnsibleModule definition for modules that interact with a client
|
||||
secret.
|
||||
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
realm=dict(default='master'),
|
||||
id=dict(type='str'),
|
||||
client_id=dict(type='str', aliases=['clientId']),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['id', 'client_id'],
|
||||
['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]),
|
||||
mutually_exclusive=[
|
||||
['token', 'auth_realm'],
|
||||
['token', 'auth_username'],
|
||||
['token', 'auth_password']
|
||||
])
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def keycloak_clientsecret_module_resolve_params(module, kc):
|
||||
"""
|
||||
Given an AnsibleModule definition for keycloak_clientsecret_*, and a
|
||||
KeycloakAPI client, resolve the params needed to interact with the Keycloak
|
||||
client secret, looking up the client by clientId if necessary via an API
|
||||
call.
|
||||
|
||||
:return: tuple of id, realm
|
||||
"""
|
||||
|
||||
realm = module.params.get('realm')
|
||||
id = module.params.get('id')
|
||||
client_id = module.params.get('client_id')
|
||||
|
||||
# only lookup the client_id if id isn't provided.
|
||||
# in the case that both are provided, prefer the ID, since it's one
|
||||
# less lookup.
|
||||
if id is None:
|
||||
# Due to the required_one_of spec, client_id is guaranteed to not be None
|
||||
client = kc.get_client_by_clientid(client_id, realm=realm)
|
||||
|
||||
if client is None:
|
||||
module.fail_json(
|
||||
msg='Client does not exist {client_id}'.format(client_id=client_id)
|
||||
)
|
||||
|
||||
id = client['id']
|
||||
|
||||
return id, realm
|
||||
@@ -8,7 +8,6 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
import time
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
@@ -86,16 +85,17 @@ class iLORedfishUtils(RedfishUtils):
|
||||
|
||||
datetime_uri = self.manager_uri + "DateTime"
|
||||
|
||||
listofips = mgr_attributes['mgr_attr_value'].split(" ")
|
||||
if len(listofips) > 2:
|
||||
return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"}
|
||||
response = self.get_request(self.root_uri + datetime_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
ntp_list = []
|
||||
for ips in listofips:
|
||||
ntp_list.append(ips)
|
||||
data = response['data']
|
||||
|
||||
while len(ntp_list) < 2:
|
||||
ntp_list.append("0.0.0.0")
|
||||
ntp_list = data[setkey]
|
||||
if len(ntp_list) == 2:
|
||||
ntp_list.pop(0)
|
||||
|
||||
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||
|
||||
payload = {setkey: ntp_list}
|
||||
|
||||
@@ -137,16 +137,18 @@ class iLORedfishUtils(RedfishUtils):
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
uri = nic_info["nic_addr"]
|
||||
|
||||
listofips = attr['mgr_attr_value'].split(" ")
|
||||
if len(listofips) > 3:
|
||||
return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"}
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
dns_list = []
|
||||
for ips in listofips:
|
||||
dns_list.append(ips)
|
||||
data = response['data']
|
||||
|
||||
while len(dns_list) < 3:
|
||||
dns_list.append("0.0.0.0")
|
||||
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||
|
||||
if len(dns_list) == 3:
|
||||
dns_list.pop(0)
|
||||
|
||||
dns_list.append(attr['mgr_attr_value'])
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
@@ -229,79 +231,3 @@ class iLORedfishUtils(RedfishUtils):
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
|
||||
def get_server_poststate(self):
|
||||
# Get server details
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
server_data = response["data"]
|
||||
|
||||
if "Hpe" in server_data["Oem"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hpe"]["PostState"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hp"]["PostState"]
|
||||
}
|
||||
|
||||
def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
|
||||
# This method checks if OOB controller reboot is completed
|
||||
time.sleep(10)
|
||||
|
||||
# Check server poststate
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
count = int(max_polling_time / polling_interval)
|
||||
times = 0
|
||||
|
||||
# When server is powered OFF
|
||||
pcount = 0
|
||||
while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5:
|
||||
time.sleep(10)
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] not in ["PowerOff", "Off"]:
|
||||
break
|
||||
pcount = pcount + 1
|
||||
if state["server_poststate"] in ["PowerOff", "Off"]:
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server is powered OFF"
|
||||
}
|
||||
|
||||
# When server is not rebooting
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": False,
|
||||
"msg": "Server is not rebooting"
|
||||
}
|
||||
|
||||
while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": True,
|
||||
"msg": "Server reboot is completed"
|
||||
}
|
||||
time.sleep(polling_interval)
|
||||
times = times + 1
|
||||
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server Reboot has failed, server state: {state} ".format(state=state)
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
def download_updates_file(updates_expiration):
|
||||
updates_filename = 'jenkins-plugin-cache.json'
|
||||
updates_dir = os.path.expanduser('~/.ansible/tmp')
|
||||
updates_file = os.path.join(updates_dir, updates_filename)
|
||||
download_updates = True
|
||||
|
||||
# Make sure the destination directory exists
|
||||
if not os.path.isdir(updates_dir):
|
||||
os.makedirs(updates_dir, 0o700)
|
||||
|
||||
# Check if we need to download new updates file
|
||||
if os.path.isfile(updates_file):
|
||||
# Get timestamp when the file was changed last time
|
||||
ts_file = os.stat(updates_file).st_mtime
|
||||
ts_now = time.time()
|
||||
|
||||
if ts_now - ts_file < updates_expiration:
|
||||
download_updates = False
|
||||
|
||||
return updates_file, download_updates
|
||||
@@ -10,14 +10,11 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
import traceback
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
import ldap
|
||||
import ldap.dn
|
||||
import ldap.filter
|
||||
import ldap.sasl
|
||||
|
||||
HAS_LDAP = True
|
||||
@@ -34,14 +31,12 @@ def gen_specs(**specs):
|
||||
specs.update({
|
||||
'bind_dn': dict(),
|
||||
'bind_pw': dict(default='', no_log=True),
|
||||
'ca_path': dict(type='path'),
|
||||
'dn': dict(required=True),
|
||||
'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
|
||||
'server_uri': dict(default='ldapi:///'),
|
||||
'start_tls': dict(default=False, type='bool'),
|
||||
'validate_certs': dict(default=True, type='bool'),
|
||||
'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
|
||||
'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'),
|
||||
})
|
||||
|
||||
return specs
|
||||
@@ -53,23 +48,16 @@ class LdapGeneric(object):
|
||||
self.module = module
|
||||
self.bind_dn = self.module.params['bind_dn']
|
||||
self.bind_pw = self.module.params['bind_pw']
|
||||
self.ca_path = self.module.params['ca_path']
|
||||
self.dn = self.module.params['dn']
|
||||
self.referrals_chasing = self.module.params['referrals_chasing']
|
||||
self.server_uri = self.module.params['server_uri']
|
||||
self.start_tls = self.module.params['start_tls']
|
||||
self.verify_cert = self.module.params['validate_certs']
|
||||
self.sasl_class = self.module.params['sasl_class']
|
||||
self.xorder_discovery = self.module.params['xorder_discovery']
|
||||
|
||||
# Establish connection
|
||||
self.connection = self._connect_to_ldap()
|
||||
|
||||
if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()):
|
||||
# Try to find the X_ORDERed version of the DN
|
||||
self.dn = self._find_dn()
|
||||
else:
|
||||
self.dn = self.module.params['dn']
|
||||
|
||||
def fail(self, msg, exn):
|
||||
self.module.fail_json(
|
||||
msg=msg,
|
||||
@@ -77,31 +65,10 @@ class LdapGeneric(object):
|
||||
exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
def _find_dn(self):
|
||||
dn = self.module.params['dn']
|
||||
|
||||
explode_dn = ldap.dn.explode_dn(dn)
|
||||
|
||||
if len(explode_dn) > 1:
|
||||
try:
|
||||
escaped_value = ldap.filter.escape_filter_chars(explode_dn[0])
|
||||
filterstr = "(%s)" % escaped_value
|
||||
dns = self.connection.search_s(','.join(explode_dn[1:]),
|
||||
ldap.SCOPE_ONELEVEL, filterstr)
|
||||
if len(dns) == 1:
|
||||
dn, dummy = dns[0]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return dn
|
||||
|
||||
def _connect_to_ldap(self):
|
||||
if not self.verify_cert:
|
||||
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
|
||||
if self.ca_path:
|
||||
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path)
|
||||
|
||||
connection = ldap.initialize(self.server_uri)
|
||||
|
||||
if self.referrals_chasing == 'disabled':
|
||||
@@ -124,8 +91,3 @@ class LdapGeneric(object):
|
||||
self.fail("Cannot bind to the server.", e)
|
||||
|
||||
return connection
|
||||
|
||||
def _xorder_dn(self):
|
||||
# match X_ORDERed DNs
|
||||
regex = r"\w+=\{\d+\}.+"
|
||||
return re.match(regex, self.module.params['dn']) is not None
|
||||
|
||||
@@ -8,10 +8,8 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import generic_urlparse
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
@@ -22,6 +20,8 @@ from ansible.module_utils.common.text.converters import to_text
|
||||
HTTPConnection = http_client.HTTPConnection
|
||||
HTTPSConnection = http_client.HTTPSConnection
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class UnixHTTPConnection(HTTPConnection):
|
||||
def __init__(self, path):
|
||||
@@ -124,11 +124,3 @@ class LXDClient(object):
|
||||
if err is None:
|
||||
err = resp_json.get('error', None)
|
||||
return err
|
||||
|
||||
|
||||
def default_key_file():
|
||||
return os.path.expanduser('~/.config/lxc/client.key')
|
||||
|
||||
|
||||
def default_cert_file():
|
||||
return os.path.expanduser('~/.config/lxc/client.crt')
|
||||
|
||||
@@ -37,17 +37,8 @@ def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
def fix_var_conflicts(output):
|
||||
result = dict([
|
||||
(k if k not in conflict_list else "_" + k, v)
|
||||
for k, v in output.items()
|
||||
])
|
||||
return result
|
||||
|
||||
try:
|
||||
func(self, *args, **kwargs)
|
||||
except SystemExit:
|
||||
@@ -55,16 +46,12 @@ def module_fails_on_exception(func):
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
# patchy solution to resolve conflict with output variables
|
||||
output = fix_var_conflicts(self.output)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **output)
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
except Exception as e:
|
||||
# patchy solution to resolve conflict with output variables
|
||||
output = fix_var_conflicts(self.output)
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **output)
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
|
||||
|
||||
|
||||
@@ -38,12 +38,6 @@ class DependencyCtxMgr(object):
|
||||
|
||||
|
||||
class DependencyMixin(ModuleHelperBase):
|
||||
"""
|
||||
THIS CLASS IS BEING DEPRECATED.
|
||||
See the deprecation notice in ``DependencyMixin.fail_on_missing_deps()`` below.
|
||||
|
||||
Mixin for mapping module options to running a CLI command with its arguments.
|
||||
"""
|
||||
_dependencies = []
|
||||
|
||||
@classmethod
|
||||
@@ -52,12 +46,6 @@ class DependencyMixin(ModuleHelperBase):
|
||||
return cls._dependencies[-1]
|
||||
|
||||
def fail_on_missing_deps(self):
|
||||
self.module.deprecate(
|
||||
'The DependencyMixin is being deprecated. '
|
||||
'Modules should use community.general.plugins.module_utils.deps instead.',
|
||||
version='9.0.0',
|
||||
collection_name='community.general',
|
||||
)
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
|
||||
@@ -15,17 +14,20 @@ from ansible_collections.community.general.plugins.module_utils.mh.base import M
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
|
||||
|
||||
|
||||
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
change_params = ()
|
||||
facts_params = ()
|
||||
|
||||
VarDict = _VD # for backward compatibility, will be deprecated at some point
|
||||
|
||||
def __init__(self, module=None):
|
||||
super(ModuleHelper, self).__init__(module)
|
||||
for name, value in self.module.params.items():
|
||||
@@ -37,6 +39,16 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
self._deprecate_attr(
|
||||
attr="VarDict",
|
||||
msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
|
||||
"the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
|
||||
version="6.0.0",
|
||||
collection_name="community.general",
|
||||
target=ModuleHelper,
|
||||
module=self.module,
|
||||
)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
@@ -61,6 +73,10 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in list(result):
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -7,17 +7,14 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( # noqa: F401, pylint: disable=unused-import
|
||||
ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr, DependencyMixin # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import (
|
||||
cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns,
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401, pylint: disable=unused-import
|
||||
# pylint: disable-next=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict # noqa: F401, pylint: disable=unused-import
|
||||
|
||||
@@ -1,502 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022 Western Digital Corporation
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
|
||||
|
||||
GET_HEADERS = {'accept': 'application/json'}
|
||||
PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
|
||||
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
|
||||
DELETE_HEADERS = {'accept': 'application/json'}
|
||||
|
||||
HEALTH_OK = 5
|
||||
|
||||
|
||||
class OcapiUtils(object):
|
||||
|
||||
def __init__(self, creds, base_uri, proxy_slot_number, timeout, module):
|
||||
self.root_uri = base_uri
|
||||
self.proxy_slot_number = proxy_slot_number
|
||||
self.creds = creds
|
||||
self.timeout = timeout
|
||||
self.module = module
|
||||
|
||||
def _auth_params(self):
|
||||
"""
|
||||
Return tuple of required authentication params based on the username and password.
|
||||
|
||||
:return: tuple of username, password
|
||||
"""
|
||||
username = self.creds['user']
|
||||
password = self.creds['pswd']
|
||||
force_basic_auth = True
|
||||
return username, password, force_basic_auth
|
||||
|
||||
def get_request(self, uri):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on GET request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def delete_request(self, uri, etag=None):
|
||||
req_headers = dict(DELETE_HEADERS)
|
||||
if etag is not None:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, method="DELETE", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
if resp.status != 204:
|
||||
data = json.loads(to_native(resp.read()))
|
||||
else:
|
||||
data = ""
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on DELETE request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def put_request(self, uri, payload, etag=None):
|
||||
req_headers = dict(PUT_HEADERS)
|
||||
if etag is not None:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(payload),
|
||||
headers=req_headers, method="PUT",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on PUT request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
|
||||
def post_request(self, uri, payload, content_type="application/json", timeout=None):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
if content_type != "application/json":
|
||||
req_headers["content-type"] = content_type
|
||||
username, password, basic_auth = self._auth_params()
|
||||
if content_type == "application/json":
|
||||
request_data = json.dumps(payload)
|
||||
else:
|
||||
request_data = payload
|
||||
try:
|
||||
resp = open_url(uri, data=request_data,
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout if timeout is None else timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on POST request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
|
||||
def get_uri_with_slot_number_query_param(self, uri):
|
||||
"""Return the URI with proxy slot number added as a query param, if there is one.
|
||||
|
||||
If a proxy slot number is provided, to access it, we must append it as a query parameter.
|
||||
This method returns the given URI with the slotnumber query param added, if there is one.
|
||||
If there is not a proxy slot number, it just returns the URI as it was passed in.
|
||||
"""
|
||||
if self.proxy_slot_number is not None:
|
||||
parsed_url = urlparse(uri)
|
||||
return parsed_url._replace(query="slotnumber=" + str(self.proxy_slot_number)).geturl()
|
||||
else:
|
||||
return uri
|
||||
|
||||
def manage_system_power(self, command):
|
||||
"""Process a command to manage the system power.
|
||||
|
||||
:param str command: The Ansible command being processed.
|
||||
"""
|
||||
if command == "PowerGracefulRestart":
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
|
||||
# Get the resource so that we have the Etag
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
# Issue the PUT to do the reboot (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'Reboot': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
elif command.startswith("PowerMode"):
|
||||
return self.manage_power_mode(command)
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command: ' + command}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def manage_chassis_indicator_led(self, command):
|
||||
"""Process a command to manage the chassis indicator LED.
|
||||
|
||||
:param string command: The Ansible command being processed.
|
||||
"""
|
||||
return self.manage_indicator_led(command, self.root_uri)
|
||||
|
||||
def manage_indicator_led(self, command, resource_uri=None):
|
||||
"""Process a command to manage an indicator LED.
|
||||
|
||||
:param string command: The Ansible command being processed.
|
||||
:param string resource_uri: URI of the resource whose indicator LED is being managed.
|
||||
"""
|
||||
key = "IndicatorLED"
|
||||
if resource_uri is None:
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
|
||||
payloads = {
|
||||
'IndicatorLedOn': {
|
||||
'ID': 2
|
||||
},
|
||||
'IndicatorLedOff': {
|
||||
'ID': 4
|
||||
}
|
||||
}
|
||||
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found" % key}
|
||||
if 'ID' not in data[key]:
|
||||
return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'}
|
||||
|
||||
if command in payloads.keys():
|
||||
# See if the LED is already set as requested.
|
||||
current_led_status = data[key]['ID']
|
||||
if current_led_status == payloads[command]['ID']:
|
||||
return {'ret': True, 'changed': False}
|
||||
|
||||
# Set the LED (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'IndicatorLED': payloads[command]}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command'}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def manage_power_mode(self, command):
|
||||
key = "PowerState"
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri)
|
||||
|
||||
payloads = {
|
||||
"PowerModeNormal": 2,
|
||||
"PowerModeLow": 4
|
||||
}
|
||||
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found" % key}
|
||||
if 'ID' not in data[key]:
|
||||
return {'ret': False, 'msg': 'PowerState for resource has no ID.'}
|
||||
|
||||
if command in payloads.keys():
|
||||
# See if the PowerState is already set as requested.
|
||||
current_power_state = data[key]['ID']
|
||||
if current_power_state == payloads[command]:
|
||||
return {'ret': True, 'changed': False}
|
||||
|
||||
# Set the Power State (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'PowerState': {"ID": payloads[command]}}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command: ' + command}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def prepare_multipart_firmware_upload(self, filename):
|
||||
"""Prepare a multipart/form-data body for OCAPI firmware upload.
|
||||
|
||||
:arg filename: The name of the file to upload.
|
||||
:returns: tuple of (content_type, body) where ``content_type`` is
|
||||
the ``multipart/form-data`` ``Content-Type`` header including
|
||||
``boundary`` and ``body`` is the prepared bytestring body
|
||||
|
||||
Prepares the body to include "FirmwareFile" field with the contents of the file.
|
||||
Because some OCAPI targets do not support Base-64 encoding for multipart/form-data,
|
||||
this method sends the file as binary.
|
||||
"""
|
||||
boundary = str(uuid.uuid4()) # Generate a random boundary
|
||||
body = "--" + boundary + '\r\n'
|
||||
body += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % to_native(os.path.basename(filename))
|
||||
body += 'Content-Type: application/octet-stream\r\n\r\n'
|
||||
body_bytes = bytearray(body, 'utf-8')
|
||||
with open(filename, 'rb') as f:
|
||||
body_bytes += f.read()
|
||||
body_bytes += bytearray("\r\n--%s--" % boundary, 'utf-8')
|
||||
return ("multipart/form-data; boundary=%s" % boundary,
|
||||
body_bytes)
|
||||
|
||||
def upload_firmware_image(self, update_image_path):
|
||||
"""Perform Firmware Upload to the OCAPI storage device.
|
||||
|
||||
:param str update_image_path: The path/filename of the firmware image, on the local filesystem.
|
||||
"""
|
||||
if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)):
|
||||
return {'ret': False, 'msg': 'File does not exist.'}
|
||||
url = self.root_uri + "OperatingSystem"
|
||||
url = self.get_uri_with_slot_number_query_param(url)
|
||||
content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path)
|
||||
|
||||
# Post the firmware (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
result = self.post_request(url, b_form_data, content_type=content_type, timeout=300)
|
||||
if result['ret'] is False:
|
||||
return result
|
||||
return {'ret': True}
|
||||
|
||||
def update_firmware_image(self):
|
||||
"""Perform a Firmware Update on the OCAPI storage device."""
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the PUT.
|
||||
response = self.get_request(resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
|
||||
# Issue the PUT (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'FirmwareUpdate': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'jobUri': response["headers"]["location"]}
|
||||
|
||||
def activate_firmware_image(self):
|
||||
"""Perform a Firmware Activate on the OCAPI storage device."""
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the PUT.
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
# Issue the PUT (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'FirmwareActivate': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'jobUri': response["headers"]["location"]}
|
||||
|
||||
def get_job_status(self, job_uri):
|
||||
"""Get the status of a job.
|
||||
|
||||
:param str job_uri: The URI of the job's status monitor.
|
||||
"""
|
||||
job_uri = self.get_uri_with_slot_number_query_param(job_uri)
|
||||
response = self.get_request(job_uri)
|
||||
if response['ret'] is False:
|
||||
if response.get('status') == 404:
|
||||
# Job not found -- assume 0%
|
||||
return {
|
||||
"ret": True,
|
||||
"percentComplete": 0,
|
||||
"operationStatus": "Not Available",
|
||||
"operationStatusId": 1,
|
||||
"operationHealth": None,
|
||||
"operationHealthId": None,
|
||||
"details": "Job does not exist.",
|
||||
"jobExists": False
|
||||
}
|
||||
else:
|
||||
return response
|
||||
details = response["data"]["Status"].get("Details")
|
||||
if type(details) is str:
|
||||
details = [details]
|
||||
health_list = response["data"]["Status"]["Health"]
|
||||
return_value = {
|
||||
"ret": True,
|
||||
"percentComplete": response["data"]["PercentComplete"],
|
||||
"operationStatus": response["data"]["Status"]["State"]["Name"],
|
||||
"operationStatusId": response["data"]["Status"]["State"]["ID"],
|
||||
"operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None,
|
||||
"operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None,
|
||||
"details": details,
|
||||
"jobExists": True
|
||||
}
|
||||
return return_value
|
||||
|
||||
def delete_job(self, job_uri):
|
||||
"""Delete the OCAPI job referenced by the specified job_uri."""
|
||||
job_uri = self.get_uri_with_slot_number_query_param(job_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the DELETE.
|
||||
response = self.get_request(job_uri)
|
||||
|
||||
if response['ret'] is True:
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
else:
|
||||
etag = response['headers']['etag']
|
||||
|
||||
if response['data']['PercentComplete'] != 100:
|
||||
return {
|
||||
'ret': False,
|
||||
'changed': False,
|
||||
'msg': 'Cannot delete job because it is in progress.'
|
||||
}
|
||||
|
||||
if response['ret'] is False:
|
||||
if response['status'] == 404:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': False,
|
||||
'msg': 'Job already deleted.'
|
||||
}
|
||||
return response
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
|
||||
# Do the DELETE (unless we are in check mode)
|
||||
response = self.delete_request(job_uri, etag)
|
||||
if response['ret'] is False:
|
||||
if response['status'] == 404:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': False
|
||||
}
|
||||
elif response['status'] == 409:
|
||||
return {
|
||||
'ret': False,
|
||||
'changed': False,
|
||||
'msg': 'Cannot delete job because it is in progress.'
|
||||
}
|
||||
return response
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True
|
||||
}
|
||||
@@ -26,41 +26,6 @@ except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
|
||||
# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
|
||||
# It allows for easily handling lists like "NIC" or "DISK" in the JSON-like template representation.
|
||||
# There are either lists of dictionaries (length > 1) or just dictionaries.
|
||||
def flatten(to_flatten, extract=False):
|
||||
"""Flattens nested lists (with optional value extraction)."""
|
||||
def recurse(to_flatten):
|
||||
return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten]
|
||||
value = recurse(to_flatten)
|
||||
if extract and len(value) == 1:
|
||||
return value[0]
|
||||
return value
|
||||
|
||||
|
||||
# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
|
||||
# It renders JSON-like template representation into OpenNebula's template syntax (string).
|
||||
def render(to_render):
|
||||
"""Converts dictionary to OpenNebula template."""
|
||||
def recurse(to_render):
|
||||
for key, value in sorted(to_render.items()):
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
|
||||
continue
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
|
||||
continue
|
||||
if isinstance(value, str):
|
||||
yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
continue
|
||||
yield '{0:}="{1:}"'.format(key, value)
|
||||
return '\n'.join(recurse(to_render))
|
||||
|
||||
|
||||
class OpenNebulaModule:
|
||||
"""
|
||||
Base class for all OpenNebula Ansible Modules.
|
||||
|
||||
@@ -32,14 +32,12 @@ def pipx_runner(module, command, **kwargs):
|
||||
state=fmt.as_map(_state_map),
|
||||
name=fmt.as_list(),
|
||||
name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])),
|
||||
install_apps=fmt.as_bool("--include-apps"),
|
||||
install_deps=fmt.as_bool("--include-deps"),
|
||||
inject_packages=fmt.as_list(),
|
||||
force=fmt.as_bool("--force"),
|
||||
include_injected=fmt.as_bool("--include-injected"),
|
||||
index_url=fmt.as_opt_val('--index-url'),
|
||||
python=fmt.as_opt_val('--python'),
|
||||
system_site_packages=fmt.as_bool("--system-site-packages"),
|
||||
_list=fmt.as_fixed(['list', '--include-injected', '--json']),
|
||||
editable=fmt.as_bool("--editable"),
|
||||
pip_args=fmt.as_opt_val('--pip-args'),
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
_PUPPET_PATH_PREFIX = ["/opt/puppetlabs/bin"]
|
||||
|
||||
|
||||
def get_facter_dir():
|
||||
if os.getuid() == 0:
|
||||
return '/etc/facter/facts.d'
|
||||
else:
|
||||
return os.path.expanduser('~/.facter/facts.d')
|
||||
|
||||
|
||||
def _puppet_cmd(module):
|
||||
return module.get_bin_path("puppet", False, _PUPPET_PATH_PREFIX)
|
||||
|
||||
|
||||
# If the `timeout` CLI command feature is removed,
|
||||
# Then we could add this as a fixed param to `puppet_runner`
|
||||
def ensure_agent_enabled(module):
|
||||
runner = CmdRunner(
|
||||
module,
|
||||
command="puppet",
|
||||
path_prefix=_PUPPET_PATH_PREFIX,
|
||||
arg_formats=dict(
|
||||
_agent_disabled=cmd_runner_fmt.as_fixed(['config', 'print', 'agent_disabled_lockfile']),
|
||||
),
|
||||
check_rc=False,
|
||||
)
|
||||
|
||||
rc, stdout, stderr = runner("_agent_disabled").run()
|
||||
if os.path.exists(stdout.strip()):
|
||||
module.fail_json(
|
||||
msg="Puppet agent is administratively disabled.",
|
||||
disabled=True)
|
||||
elif rc != 0:
|
||||
module.fail_json(
|
||||
msg="Puppet agent state could not be determined.")
|
||||
|
||||
|
||||
def puppet_runner(module):
|
||||
|
||||
# Keeping backward compatibility, allow for running with the `timeout` CLI command.
|
||||
# If this can be replaced with ansible `timeout` parameter in playbook,
|
||||
# then this function could be removed.
|
||||
def _prepare_base_cmd():
|
||||
_tout_cmd = module.get_bin_path("timeout", False)
|
||||
if _tout_cmd:
|
||||
cmd = ["timeout", "-s", "9", module.params["timeout"], _puppet_cmd(module)]
|
||||
else:
|
||||
cmd = ["puppet"]
|
||||
return cmd
|
||||
|
||||
def noop_func(v):
|
||||
return ["--noop"] if module.check_mode or v else ["--no-noop"]
|
||||
|
||||
_logdest_map = {
|
||||
"syslog": ["--logdest", "syslog"],
|
||||
"all": ["--logdest", "syslog", "--logdest", "console"],
|
||||
}
|
||||
|
||||
@cmd_runner_fmt.unpack_args
|
||||
def execute_func(execute, manifest):
|
||||
if execute:
|
||||
return ["--execute", execute]
|
||||
else:
|
||||
return [manifest]
|
||||
|
||||
runner = CmdRunner(
|
||||
module,
|
||||
command=_prepare_base_cmd(),
|
||||
path_prefix=_PUPPET_PATH_PREFIX,
|
||||
arg_formats=dict(
|
||||
_agent_fixed=cmd_runner_fmt.as_fixed([
|
||||
"agent", "--onetime", "--no-daemonize", "--no-usecacheonfailure",
|
||||
"--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0",
|
||||
]),
|
||||
_apply_fixed=cmd_runner_fmt.as_fixed(["apply", "--detailed-exitcodes"]),
|
||||
puppetmaster=cmd_runner_fmt.as_opt_val("--server"),
|
||||
show_diff=cmd_runner_fmt.as_bool("--show-diff"),
|
||||
confdir=cmd_runner_fmt.as_opt_val("--confdir"),
|
||||
environment=cmd_runner_fmt.as_opt_val("--environment"),
|
||||
tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]),
|
||||
skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
|
||||
certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
|
||||
noop=cmd_runner_fmt.as_func(noop_func),
|
||||
use_srv_records=cmd_runner_fmt.as_map({
|
||||
True: "--usr_srv_records",
|
||||
False: "--no-usr_srv_records",
|
||||
}),
|
||||
logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]),
|
||||
modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"),
|
||||
_execute=cmd_runner_fmt.as_func(execute_func),
|
||||
summarize=cmd_runner_fmt.as_bool("--summarize"),
|
||||
debug=cmd_runner_fmt.as_bool("--debug"),
|
||||
verbose=cmd_runner_fmt.as_bool("--verbose"),
|
||||
),
|
||||
check_rc=False,
|
||||
)
|
||||
return runner
|
||||
@@ -314,21 +314,3 @@ def setup_rax_module(module, rax_module, region_required=True):
|
||||
(region, ','.join(rax_module.regions)))
|
||||
|
||||
return rax_module
|
||||
|
||||
|
||||
def rax_scaling_group_personality_file(module, files):
|
||||
if not files:
|
||||
return []
|
||||
|
||||
results = []
|
||||
for rpath, lpath in files.items():
|
||||
lpath = os.path.expanduser(lpath)
|
||||
try:
|
||||
with open(lpath, 'r') as f:
|
||||
results.append({
|
||||
'path': rpath,
|
||||
'contents': f.read(),
|
||||
})
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to load %s: %s' % (lpath, str(e)))
|
||||
return results
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -81,18 +81,12 @@ def api_request(module, endpoint, data=None, method="GET"):
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
|
||||
if not content:
|
||||
return None, info
|
||||
else:
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(
|
||||
msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info
|
||||
)
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
|
||||
@@ -11,21 +11,11 @@ import re
|
||||
import sys
|
||||
import datetime
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
SCALEWAY_SECRET_IMP_ERR = None
|
||||
try:
|
||||
from passlib.hash import argon2
|
||||
HAS_SCALEWAY_SECRET_PACKAGE = True
|
||||
except Exception:
|
||||
argon2 = None
|
||||
SCALEWAY_SECRET_IMP_ERR = traceback.format_exc()
|
||||
HAS_SCALEWAY_SECRET_PACKAGE = False
|
||||
|
||||
|
||||
def scaleway_argument_spec():
|
||||
return dict(
|
||||
@@ -84,54 +74,12 @@ def parse_pagination_link(header):
|
||||
|
||||
|
||||
def filter_sensitive_attributes(container, attributes):
|
||||
'''
|
||||
WARNING: This function is effectively private, **do not use it**!
|
||||
It will be removed or renamed once changing its name no longer triggers a pylint bug.
|
||||
'''
|
||||
for attr in attributes:
|
||||
container[attr] = "SENSITIVE_VALUE"
|
||||
|
||||
return container
|
||||
|
||||
|
||||
class SecretVariables(object):
|
||||
@staticmethod
|
||||
def ensure_scaleway_secret_package(module):
|
||||
if not HAS_SCALEWAY_SECRET_PACKAGE:
|
||||
module.fail_json(
|
||||
msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'),
|
||||
exception=SCALEWAY_SECRET_IMP_ERR
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def dict_to_list(source_dict):
|
||||
return [
|
||||
dict(key=var[0], value=var[1])
|
||||
for var in source_dict.items()
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def list_to_dict(source_list, hashed=False):
|
||||
key_value = 'hashed_value' if hashed else 'value'
|
||||
return dict(
|
||||
(var['key'], var[key_value])
|
||||
for var in source_list
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def decode(cls, secrets_list, values_list):
|
||||
secrets_dict = cls.list_to_dict(secrets_list, hashed=True)
|
||||
values_dict = cls.list_to_dict(values_list, hashed=False)
|
||||
for key in values_dict:
|
||||
if key in secrets_dict:
|
||||
if argon2.verify(values_dict[key], secrets_dict[key]):
|
||||
secrets_dict[key] = values_dict[key]
|
||||
else:
|
||||
secrets_dict[key] = secrets_dict[key]
|
||||
|
||||
return cls.dict_to_list(secrets_dict)
|
||||
|
||||
|
||||
def resource_attributes_should_be_changed(target, wished, verifiable_mutable_attributes, mutable_attributes):
|
||||
diff = dict()
|
||||
for attr in verifiable_mutable_attributes:
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2023, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
_alias_state_map = dict(
|
||||
present='alias',
|
||||
absent='unalias',
|
||||
info='aliases',
|
||||
)
|
||||
|
||||
_state_map = dict(
|
||||
present='install',
|
||||
absent='remove',
|
||||
enabled='enable',
|
||||
disabled='disable',
|
||||
)
|
||||
|
||||
|
||||
def snap_runner(module, **kwargs):
|
||||
runner = CmdRunner(
|
||||
module,
|
||||
"snap",
|
||||
arg_formats=dict(
|
||||
state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only
|
||||
name=cmd_runner_fmt.as_list(),
|
||||
alias=cmd_runner_fmt.as_list(), # snap_alias only
|
||||
state=cmd_runner_fmt.as_map(_state_map),
|
||||
_list=cmd_runner_fmt.as_fixed("list"),
|
||||
_set=cmd_runner_fmt.as_fixed("set"),
|
||||
get=cmd_runner_fmt.as_fixed(["get", "-d"]),
|
||||
classic=cmd_runner_fmt.as_bool("--classic"),
|
||||
channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]),
|
||||
options=cmd_runner_fmt.as_list(),
|
||||
),
|
||||
check_rc=False,
|
||||
**kwargs
|
||||
)
|
||||
return runner
|
||||
@@ -28,7 +28,7 @@ class BitbucketHelper:
|
||||
# TODO:
|
||||
# - Rename user to username once current usage of username is removed
|
||||
# - Alias user to username and deprecate it
|
||||
user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
|
||||
user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
|
||||
password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2015, Björn Andersson
|
||||
# Copyright (c) 2021, Ansible Project
|
||||
# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def determine_config_file(user, config_file):
|
||||
if user:
|
||||
config_file = os.path.join(os.path.expanduser('~%s' % user), '.ssh', 'config')
|
||||
elif config_file is None:
|
||||
config_file = '/etc/ssh/ssh_config'
|
||||
return config_file
|
||||
@@ -1,109 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_info
|
||||
short_description: Query btrfs filesystem info
|
||||
version_added: "6.6.0"
|
||||
description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Query information about mounted btrfs filesystems
|
||||
community.general.btrfs_info:
|
||||
register: my_btrfs_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystems:
|
||||
description: Summaries of the current state for all btrfs filesystems found on the target host.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The id of the filesystem's default subvolume.
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
'''
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
provider = BtrfsFilesystemsProvider(module)
|
||||
filesystems = [x.get_summary() for x in provider.get_filesystems()]
|
||||
result = {
|
||||
"filesystems": filesystems,
|
||||
}
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,682 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_subvolume
|
||||
short_description: Manage btrfs subvolumes
|
||||
version_added: "6.6.0"
|
||||
|
||||
description: Creates, updates and deletes btrfs subvolumes and snapshots.
|
||||
|
||||
options:
|
||||
automount:
|
||||
description:
|
||||
- Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
|
||||
type: bool
|
||||
default: false
|
||||
default:
|
||||
description:
|
||||
- Make the subvolume specified by I(name) the filesystem's default subvolume.
|
||||
type: bool
|
||||
default: false
|
||||
filesystem_device:
|
||||
description:
|
||||
- A block device contained within the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: path
|
||||
filesystem_label:
|
||||
description:
|
||||
- A descriptive label assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
filesystem_uuid:
|
||||
description:
|
||||
- A unique identifier assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the subvolume/snapshot to be targeted.
|
||||
required: true
|
||||
type: str
|
||||
recursive:
|
||||
description:
|
||||
- When true, indicates that parent/child subvolumes should be created/removedas necessary
|
||||
to complete the operation (for I(state=present) and I(state=absent) respectively).
|
||||
type: bool
|
||||
default: false
|
||||
snapshot_source:
|
||||
description:
|
||||
- Identifies the source subvolume for the created snapshot.
|
||||
- Infers that the created subvolume is a snapshot.
|
||||
type: str
|
||||
snapshot_conflict:
|
||||
description:
|
||||
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
|
||||
- C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
|
||||
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
|
||||
- C(clobber) - If a subvolume already exists at the requested location, delete it first.
|
||||
This option is not idempotent and will result in a new snapshot being generated on every execution.
|
||||
- C(error) - If a subvolume already exists at the requested location, return an error.
|
||||
This option is not idempotent and will result in an error on replay of the module.
|
||||
type: str
|
||||
choices: [ skip, clobber, error ]
|
||||
default: skip
|
||||
state:
|
||||
description:
|
||||
- Indicates the current state of the targeted subvolume.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
|
||||
notes:
|
||||
- If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
|
||||
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
|
||||
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: partial
|
||||
details:
|
||||
- In some scenarios it may erroneously report intermediate subvolumes being created.
|
||||
After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
|
||||
diff_mode:
|
||||
support: none
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Create a @home subvolume under the root subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the @home subvolume if it exists
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
state: absent
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume named @
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume and make it the new default subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
default: Yes
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystem:
|
||||
description:
|
||||
- A summary of the final state of the targeted btrfs filesystem.
|
||||
type: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The ID of the filesystem's default subvolume.
|
||||
returned: success and if filesystem is mounted
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
returned: success and if filesystem is mounted
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
modifications:
|
||||
description:
|
||||
- A list where each element describes a change made to the target btrfs filesystem.
|
||||
type: list
|
||||
returned: Success
|
||||
elements: str
|
||||
|
||||
target_subvolume_id:
|
||||
description:
|
||||
- The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
|
||||
type: int
|
||||
sample: 257
|
||||
returned: Success and subvolume exists after module execution
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
|
||||
class BtrfsSubvolumeModule(object):
|
||||
|
||||
__BTRFS_ROOT_SUBVOLUME = '/'
|
||||
__BTRFS_ROOT_SUBVOLUME_ID = 5
|
||||
__BTRFS_SUBVOLUME_INODE_NUMBER = 256
|
||||
|
||||
__CREATE_SUBVOLUME_OPERATION = 'create'
|
||||
__CREATE_SNAPSHOT_OPERATION = 'snapshot'
|
||||
__DELETE_SUBVOLUME_OPERATION = 'delete'
|
||||
__SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
|
||||
|
||||
__UNKNOWN_SUBVOLUME_ID = '?'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__provider = BtrfsFilesystemsProvider(module)
|
||||
|
||||
# module parameters
|
||||
name = self.module.params['name']
|
||||
self.__name = normalize_subvolume_path(name) if name is not None else None
|
||||
self.__state = self.module.params['state']
|
||||
|
||||
self.__automount = self.module.params['automount']
|
||||
self.__default = self.module.params['default']
|
||||
self.__filesystem_device = self.module.params['filesystem_device']
|
||||
self.__filesystem_label = self.module.params['filesystem_label']
|
||||
self.__filesystem_uuid = self.module.params['filesystem_uuid']
|
||||
self.__recursive = self.module.params['recursive']
|
||||
self.__snapshot_conflict = self.module.params['snapshot_conflict']
|
||||
snapshot_source = self.module.params['snapshot_source']
|
||||
self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
|
||||
|
||||
# execution state
|
||||
self.__filesystem = None
|
||||
self.__required_mounts = []
|
||||
self.__unit_of_work = []
|
||||
self.__completed_work = []
|
||||
self.__temporary_mounts = dict()
|
||||
|
||||
def run(self):
|
||||
error = None
|
||||
try:
|
||||
self.__load_filesystem()
|
||||
self.__prepare_unit_of_work()
|
||||
|
||||
if not self.module.check_mode:
|
||||
# check required mounts & mount
|
||||
if len(self.__unit_of_work) > 0:
|
||||
self.__execute_unit_of_work()
|
||||
self.__filesystem.refresh()
|
||||
else:
|
||||
# check required mounts
|
||||
self.__completed_work.extend(self.__unit_of_work)
|
||||
except Exception as e:
|
||||
error = e
|
||||
finally:
|
||||
self.__cleanup_mounts()
|
||||
if self.__filesystem is not None:
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
return (error, self.get_results())
|
||||
|
||||
# Identify the targeted filesystem and obtain the current state
|
||||
def __load_filesystem(self):
|
||||
if self.__has_filesystem_criteria():
|
||||
filesystem = self.__find_matching_filesytem()
|
||||
else:
|
||||
filesystem = self.__find_default_filesystem()
|
||||
|
||||
# The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
|
||||
if not filesystem.is_mounted():
|
||||
if not self.__automount:
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted and automount=False."
|
||||
"Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
|
||||
elif self.module.check_mode:
|
||||
# TODO is failing the module an appropriate outcome in this scenario?
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
|
||||
"state while running with check_mode=True" % filesystem.uuid)
|
||||
else:
|
||||
self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
filesystem.refresh()
|
||||
self.__filesystem = filesystem
|
||||
|
||||
def __has_filesystem_criteria(self):
|
||||
return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
|
||||
|
||||
def __find_matching_filesytem(self):
|
||||
criteria = {
|
||||
'uuid': self.__filesystem_uuid,
|
||||
'label': self.__filesystem_label,
|
||||
'device': self.__filesystem_device,
|
||||
}
|
||||
return self.__provider.get_matching_filesystem(criteria)
|
||||
|
||||
def __find_default_filesystem(self):
|
||||
filesystems = self.__provider.get_filesystems()
|
||||
filesystem = None
|
||||
|
||||
if len(filesystems) == 1:
|
||||
filesystem = filesystems[0]
|
||||
else:
|
||||
mounted_filesystems = [x for x in filesystems if x.is_mounted()]
|
||||
if len(mounted_filesystems) == 1:
|
||||
filesystem = mounted_filesystems[0]
|
||||
|
||||
if filesystem is not None:
|
||||
return filesystem
|
||||
else:
|
||||
raise BtrfsModuleException(
|
||||
"Failed to automatically identify targeted filesystem. "
|
||||
"No explicit device indicated and found %d available filesystems." % len(filesystems)
|
||||
)
|
||||
|
||||
# Prepare unit of work
|
||||
def __prepare_unit_of_work(self):
|
||||
if self.__state == "present":
|
||||
if self.__snapshot_source is None:
|
||||
self.__prepare_subvolume_present()
|
||||
else:
|
||||
self.__prepare_snapshot_present()
|
||||
|
||||
if self.__default:
|
||||
self.__prepare_set_default()
|
||||
elif self.__state == "absent":
|
||||
self.__prepare_subvolume_absent()
|
||||
|
||||
def __prepare_subvolume_present(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is None:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
self.__stage_create_subvolume(self.__name)
|
||||
|
||||
def __prepare_before_create_subvolume(self, subvolume_name):
|
||||
closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
|
||||
self.__stage_required_mount(closest_parent)
|
||||
if self.__recursive:
|
||||
self.__prepare_create_intermediates(closest_parent, subvolume_name)
|
||||
|
||||
def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
|
||||
relative_path = closest_subvolume.get_child_relative_path(self.__name)
|
||||
missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
|
||||
if len(missing_subvolumes) > 1:
|
||||
current = closest_subvolume.path
|
||||
for s in missing_subvolumes[:-1]:
|
||||
separator = os.path.sep if current[-1] != os.path.sep else ""
|
||||
current = current + separator + s
|
||||
self.__stage_create_subvolume(current, True)
|
||||
|
||||
def __prepare_snapshot_present(self):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_exists = subvolume is not None
|
||||
|
||||
if subvolume_exists:
|
||||
if self.__snapshot_conflict == "skip":
|
||||
# No change required
|
||||
return
|
||||
elif self.__snapshot_conflict == "error":
|
||||
raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
|
||||
|
||||
if source_subvolume is None:
|
||||
raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
|
||||
elif subvolume is not None and source_subvolume.id == subvolume.id:
|
||||
raise BtrfsModuleException("Snapshot source and target are the same.")
|
||||
else:
|
||||
self.__stage_required_mount(source_subvolume)
|
||||
|
||||
if subvolume_exists and self.__snapshot_conflict == "clobber":
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
elif not subvolume_exists:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
|
||||
self.__stage_create_snapshot(source_subvolume, self.__name)
|
||||
|
||||
def __prepare_subvolume_absent(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is not None:
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
|
||||
def __prepare_delete_subvolume_tree(self, subvolume):
|
||||
if subvolume.is_filesystem_root():
|
||||
raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
|
||||
if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
|
||||
raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
|
||||
"Either explicitly delete the child subvolumes first or pass "
|
||||
"parameter recursive=True." % subvolume.path)
|
||||
|
||||
self.__stage_required_mount(subvolume.get_parent_subvolume())
|
||||
queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
|
||||
# prepare unit of work
|
||||
for s in queue:
|
||||
if s.is_mounted():
|
||||
# TODO potentially unmount the subvolume if automount=True ?
|
||||
raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
|
||||
if s.is_filesystem_default():
|
||||
self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
self.__stage_delete_subvolume(s)
|
||||
|
||||
def __prepare_recursive_delete_order(self, subvolume):
|
||||
"""Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
|
||||
pending = [subvolume]
|
||||
ordered = []
|
||||
while len(pending) > 0:
|
||||
next = pending.pop()
|
||||
ordered.append(next)
|
||||
pending.extend(next.get_child_subvolumes())
|
||||
ordered.reverse() # reverse to ensure children are deleted before their parent
|
||||
return ordered
|
||||
|
||||
def __prepare_set_default(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_id = subvolume.id if subvolume is not None else None
|
||||
|
||||
if self.__filesystem.default_subvolid != subvolume_id:
|
||||
self.__stage_set_default_subvolume(self.__name, subvolume_id)
|
||||
|
||||
# Stage operations to the unit of work
|
||||
def __stage_required_mount(self, subvolume):
|
||||
if subvolume.get_mounted_path() is None:
|
||||
if self.__automount:
|
||||
self.__required_mounts.append(subvolume)
|
||||
else:
|
||||
raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
|
||||
|
||||
def __stage_create_subvolume(self, subvolume_path, intermediate=False):
|
||||
"""
|
||||
Add required creation of an intermediate subvolume to the unit of work
|
||||
If intermediate is true, the action will be skipped if a directory like file is found at target
|
||||
after mounting a parent subvolume
|
||||
"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'intermediate': intermediate,
|
||||
})
|
||||
|
||||
def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
|
||||
"""Add creation of a snapshot from source to target to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SNAPSHOT_OPERATION,
|
||||
'source': source_subvolume.path,
|
||||
'source_id': source_subvolume.id,
|
||||
'target': target_subvolume_path,
|
||||
})
|
||||
|
||||
def __stage_delete_subvolume(self, subvolume):
|
||||
"""Add deletion of the target subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__DELETE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume.path,
|
||||
'target_id': subvolume.id,
|
||||
})
|
||||
|
||||
def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
|
||||
"""Add update of the filesystem's default subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'target_id': subvolume_id,
|
||||
})
|
||||
|
||||
# Execute the unit of work
|
||||
def __execute_unit_of_work(self):
|
||||
self.__check_required_mounts()
|
||||
for op in self.__unit_of_work:
|
||||
if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
self.__execute_create_subvolume(op)
|
||||
elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
self.__execute_create_snapshot(op)
|
||||
elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
self.__execute_delete_subvolume(op)
|
||||
elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
self.__execute_set_default_subvolume(op)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % op['action'])
|
||||
|
||||
def __execute_create_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
if not self.__is_existing_directory_like(target_mounted_path):
|
||||
self.__btrfs_api.subvolume_create(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_create_snapshot(self, operation):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
|
||||
source_mounted_path = source_subvolume.get_mounted_path()
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
|
||||
self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_delete_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
self.__btrfs_api.subvolume_delete(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_set_default_subvolume(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
|
||||
if target_id is None:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
self.__filesystem.refresh() # the target may have been created earlier in module execution
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
|
||||
else:
|
||||
target_id = target_subvolume.id
|
||||
|
||||
self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __is_existing_directory_like(self, path):
|
||||
return os.path.exists(path) and (
|
||||
os.path.isdir(path) or
|
||||
os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
|
||||
)
|
||||
|
||||
def __check_required_mounts(self):
|
||||
filtered = self.__filter_child_subvolumes(self.__required_mounts)
|
||||
if len(filtered) > 0:
|
||||
for subvolume in filtered:
|
||||
self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
def __filter_child_subvolumes(self, subvolumes):
|
||||
"""Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
|
||||
filtered = []
|
||||
last = None
|
||||
ordered = sorted(subvolumes, key=lambda x: x.path)
|
||||
for next in ordered:
|
||||
if last is None or not next.path[0:len(last)] == last:
|
||||
filtered.append(next)
|
||||
last = next.path
|
||||
return filtered
|
||||
|
||||
# Create/cleanup temporary mountpoints
|
||||
def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
|
||||
# this check should be redundant
|
||||
if self.module.check_mode or not self.__automount:
|
||||
raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
|
||||
"with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
|
||||
|
||||
cache_key = "%s:%d" % (filesystem.uuid, subvolid)
|
||||
# The subvolume was already mounted, so return the current path
|
||||
if cache_key in self.__temporary_mounts:
|
||||
return self.__temporary_mounts[cache_key]
|
||||
|
||||
device = filesystem.devices[0]
|
||||
mountpoint = tempfile.mkdtemp(dir="/tmp")
|
||||
self.__temporary_mounts[cache_key] = mountpoint
|
||||
|
||||
mount = self.module.get_bin_path("mount", required=True)
|
||||
command = "%s -o noatime,subvolid=%d %s %s " % (mount,
|
||||
subvolid,
|
||||
device,
|
||||
mountpoint)
|
||||
result = self.module.run_command(command, check_rc=True)
|
||||
|
||||
return mountpoint
|
||||
|
||||
def __cleanup_mounts(self):
|
||||
for key in self.__temporary_mounts.keys():
|
||||
self.__cleanup_mount(self.__temporary_mounts[key])
|
||||
|
||||
def __cleanup_mount(self, mountpoint):
|
||||
umount = self.module.get_bin_path("umount", required=True)
|
||||
result = self.module.run_command("%s %s" % (umount, mountpoint))
|
||||
if result[0] == 0:
|
||||
rmdir = self.module.get_bin_path("rmdir", required=True)
|
||||
self.module.run_command("%s %s" % (rmdir, mountpoint))
|
||||
|
||||
# Format and return results
|
||||
def get_results(self):
|
||||
target = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
return dict(
|
||||
changed=len(self.__completed_work) > 0,
|
||||
filesystem=self.__filesystem.get_summary(),
|
||||
modifications=self.__get_formatted_modifications(),
|
||||
target_subvolume_id=(target.id if target is not None else None)
|
||||
)
|
||||
|
||||
def __get_formatted_modifications(self):
|
||||
return [self.__format_operation_result(op) for op in self.__completed_work]
|
||||
|
||||
def __format_operation_result(self, operation):
|
||||
action_type = operation['action']
|
||||
if action_type == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
return self.__format_create_subvolume_result(operation)
|
||||
elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
return self.__format_create_snapshot_result(operation)
|
||||
elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
return self.__format_delete_subvolume_result(operation)
|
||||
elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
return self.__format_set_default_subvolume_result(operation)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % operation['action'])
|
||||
|
||||
def __format_create_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_create_snapshot_result(self, operation):
|
||||
source = operation['source']
|
||||
source_id = operation['source_id']
|
||||
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
|
||||
|
||||
def __format_delete_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
return "Deleted subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_set_default_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
if 'target_id' in operation:
|
||||
target_id = operation['target_id']
|
||||
else:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Updated default subvolume to '%s' (%s)" % (target, target_id)
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
automount=dict(type='bool', required=False, default=False),
|
||||
default=dict(type='bool', required=False, default=False),
|
||||
filesystem_device=dict(type='path', required=False),
|
||||
filesystem_label=dict(type='str', required=False),
|
||||
filesystem_uuid=dict(type='str', required=False),
|
||||
name=dict(type='str', required=True),
|
||||
recursive=dict(type='bool', default=False),
|
||||
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
|
||||
snapshot_source=dict(type='str', required=False),
|
||||
snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
subvolume = BtrfsSubvolumeModule(module)
|
||||
error, result = subvolume.run()
|
||||
if error is not None:
|
||||
module.fail_json(str(error), **result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -31,11 +31,6 @@ short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS;
|
||||
description:
|
||||
- Create, start, stop, restart, modify or terminate ecs instances.
|
||||
- Add or remove ecs instances to/from security group.
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
@@ -257,7 +252,6 @@ requirements:
|
||||
- "footmark >= 1.19.0"
|
||||
extends_documentation_fragment:
|
||||
- community.general.alicloud
|
||||
- community.general.attributes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -33,11 +33,6 @@ description:
|
||||
The module must be called from within the ECS instance itself.
|
||||
- This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
version_added: 3.3.0
|
||||
# This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
|
||||
|
||||
options:
|
||||
name_prefix:
|
||||
description:
|
||||
@@ -65,8 +60,6 @@ requirements:
|
||||
- "footmark >= 1.13.0"
|
||||
extends_documentation_fragment:
|
||||
- community.general.alicloud
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -22,13 +22,6 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- "python >= 2.6"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
@@ -22,13 +22,6 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
revision:
|
||||
description:
|
||||
@@ -22,13 +22,6 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
@@ -14,13 +14,6 @@ module: clc_aa_policy
|
||||
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
@@ -15,13 +15,6 @@ module: clc_alert_policy
|
||||
short_description: Create or Delete Alert Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
alias:
|
||||
description:
|
||||
@@ -14,13 +14,6 @@ module: clc_blueprint_package
|
||||
short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
server_ids:
|
||||
description:
|
||||
@@ -14,13 +14,6 @@ module: clc_firewall_policy
|
||||
short_description: Create/delete/update firewall policies
|
||||
description:
|
||||
- Create or delete or update firewall policies on Centurylink Cloud
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
location:
|
||||
description:
|
||||
@@ -15,13 +15,6 @@ module: clc_group
|
||||
short_description: Create/delete Server Groups at Centurylink Cloud
|
||||
description:
|
||||
- Create or delete Server Groups at Centurylink Centurylink Cloud
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
@@ -15,13 +15,6 @@ module: clc_loadbalancer
|
||||
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user