mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 09:26:44 +00:00
Compare commits
346 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b81ba747ba | ||
|
|
7ab1aa8894 | ||
|
|
d272f7731c | ||
|
|
d58472ec39 | ||
|
|
25d5574089 | ||
|
|
ab43b88d95 | ||
|
|
0b13c1eb45 | ||
|
|
ad5b8a813f | ||
|
|
9cccc9f0cd | ||
|
|
b7368b9802 | ||
|
|
be54f11a7d | ||
|
|
07b147d90f | ||
|
|
8f0f6ffc43 | ||
|
|
30622754a9 | ||
|
|
6f1e585da5 | ||
|
|
65861d3482 | ||
|
|
5a54ddfab9 | ||
|
|
e1576ca00d | ||
|
|
1aa26662ef | ||
|
|
4b9696023a | ||
|
|
8a95fe8b00 | ||
|
|
a389969ace | ||
|
|
3221b25393 | ||
|
|
cc3fefd325 | ||
|
|
90c278ad87 | ||
|
|
5ece46c56e | ||
|
|
f158b6e6c1 | ||
|
|
2d84387d84 | ||
|
|
12618ddbd4 | ||
|
|
7fac03ec56 | ||
|
|
199e53112c | ||
|
|
f8237ce76d | ||
|
|
8a9d18cc86 | ||
|
|
b7b69d918a | ||
|
|
a3f08377b2 | ||
|
|
4c9c8e0514 | ||
|
|
3911b83145 | ||
|
|
20e1d7c08b | ||
|
|
24aa8afde8 | ||
|
|
71c6ec0b00 | ||
|
|
469e32e15b | ||
|
|
ebfb46aa78 | ||
|
|
fa2d2d6971 | ||
|
|
a1429d0266 | ||
|
|
3077ac770f | ||
|
|
7813cd751a | ||
|
|
4461c18957 | ||
|
|
34cf93a538 | ||
|
|
b56539f17e | ||
|
|
167d4bae90 | ||
|
|
de85c11bd1 | ||
|
|
d0731b111c | ||
|
|
7cd96ef3b6 | ||
|
|
b7a44a593e | ||
|
|
c413963ecb | ||
|
|
4f7d44aa10 | ||
|
|
56055d4f1e | ||
|
|
3fa4a9c073 | ||
|
|
1552bae77b | ||
|
|
a9cad80a36 | ||
|
|
fc79283662 | ||
|
|
0d8ea31781 | ||
|
|
7ac14f964b | ||
|
|
95d725a3cc | ||
|
|
95de8bd39d | ||
|
|
ecbdaca971 | ||
|
|
54754f7e81 | ||
|
|
bd15741647 | ||
|
|
fa05ca3f63 | ||
|
|
29992f1fbf | ||
|
|
34ab07865f | ||
|
|
5fa1fc65ca | ||
|
|
c0bb56c454 | ||
|
|
332ba8166c | ||
|
|
725450e57a | ||
|
|
f4311e08aa | ||
|
|
9e7b067904 | ||
|
|
d29db3ecf9 | ||
|
|
4aba7d5b87 | ||
|
|
88d00c32db | ||
|
|
f1e1b46ce2 | ||
|
|
c4256d8674 | ||
|
|
0bfed46136 | ||
|
|
a04912dec0 | ||
|
|
7f92aa0854 | ||
|
|
a16164cb72 | ||
|
|
3960153f70 | ||
|
|
6d4760eb20 | ||
|
|
777a741d4d | ||
|
|
aaf42f3646 | ||
|
|
c167ac10e0 | ||
|
|
154d8a313c | ||
|
|
b76492687b | ||
|
|
a118bb8d05 | ||
|
|
d2b1df49c1 | ||
|
|
fb3085e78d | ||
|
|
ad163ed3af | ||
|
|
15257e9a64 | ||
|
|
c642ee9157 | ||
|
|
7e89bc6f61 | ||
|
|
9defd1aca1 | ||
|
|
ff1a8415bd | ||
|
|
961011891b | ||
|
|
6470d3defe | ||
|
|
32ac93fb16 | ||
|
|
1dfe7963cf | ||
|
|
a46fb7bcae | ||
|
|
70a8ca6ac3 | ||
|
|
17f598fdc2 | ||
|
|
bc13182e1d | ||
|
|
6cbd0c772e | ||
|
|
49314a42ef | ||
|
|
4953fda9a0 | ||
|
|
e03431d9f6 | ||
|
|
ab94b0ace0 | ||
|
|
bd8df8e94e | ||
|
|
9bb439632b | ||
|
|
9e780b9d5e | ||
|
|
60e0a660ce | ||
|
|
7664146c9b | ||
|
|
28c455d234 | ||
|
|
74d4561a33 | ||
|
|
a07db2f731 | ||
|
|
528602f1b9 | ||
|
|
b99586e26a | ||
|
|
999620c789 | ||
|
|
d5c24e67e8 | ||
|
|
b1d1391be5 | ||
|
|
02f0abfb36 | ||
|
|
659ef811a3 | ||
|
|
b86161886f | ||
|
|
e29d585412 | ||
|
|
703bb465c7 | ||
|
|
1d290c129f | ||
|
|
af913c9a6a | ||
|
|
028481c55e | ||
|
|
247da9890b | ||
|
|
73d573b915 | ||
|
|
d1a7423196 | ||
|
|
7ace59f505 | ||
|
|
bcf0060f10 | ||
|
|
07e35f7505 | ||
|
|
9279e4532d | ||
|
|
51bf08c690 | ||
|
|
87e31ae886 | ||
|
|
7713202d9b | ||
|
|
e669562a0f | ||
|
|
17e39e3744 | ||
|
|
c1a6feaf25 | ||
|
|
bdeb63e579 | ||
|
|
7e247b0eea | ||
|
|
2fe6a34e3f | ||
|
|
ca97eb6f93 | ||
|
|
b529955c07 | ||
|
|
d5d24302b6 | ||
|
|
51a3594494 | ||
|
|
85fd4240f6 | ||
|
|
490495937b | ||
|
|
0e7a130ec3 | ||
|
|
5239357077 | ||
|
|
8cd126be26 | ||
|
|
d2e259da4a | ||
|
|
6bd10adb97 | ||
|
|
71b63e6a75 | ||
|
|
1ce00126c5 | ||
|
|
011e27caf5 | ||
|
|
726ea65f4f | ||
|
|
85307d28e2 | ||
|
|
f6fe843a57 | ||
|
|
ee04231964 | ||
|
|
e09392e867 | ||
|
|
1b7c49cf56 | ||
|
|
e5cddcaf87 | ||
|
|
82162b35c4 | ||
|
|
2233c94a6f | ||
|
|
eba42c9eb9 | ||
|
|
dbb145bc71 | ||
|
|
373df2ba68 | ||
|
|
4cd7476604 | ||
|
|
24f973a9d1 | ||
|
|
f1ca1ccd89 | ||
|
|
0564a2239f | ||
|
|
a3a33cb019 | ||
|
|
399c28c11e | ||
|
|
18da4d22f8 | ||
|
|
d4435b0b8d | ||
|
|
c4983f9b90 | ||
|
|
0005df8910 | ||
|
|
130709348d | ||
|
|
901bca58bb | ||
|
|
5a826a5cb7 | ||
|
|
924f18535a | ||
|
|
be27bf1eae | ||
|
|
29819e04ec | ||
|
|
bf9a6c08d0 | ||
|
|
6708ee1afd | ||
|
|
88bd8fc7ea | ||
|
|
5d0a0d27e5 | ||
|
|
d74680a3c6 | ||
|
|
19a7aa462b | ||
|
|
176c9a90ca | ||
|
|
c4e93b0b5f | ||
|
|
08831e193f | ||
|
|
6ea7616541 | ||
|
|
34c164dc78 | ||
|
|
2ff06d2fdf | ||
|
|
3a69dd949d | ||
|
|
82c79e9a06 | ||
|
|
96a970475f | ||
|
|
f3e07723cd | ||
|
|
9f93219611 | ||
|
|
922dd0fc10 | ||
|
|
a3a0c5c3fd | ||
|
|
a20e221d6f | ||
|
|
d0a9ced474 | ||
|
|
b035084caa | ||
|
|
b56857932e | ||
|
|
7da1f3ffea | ||
|
|
c826a81b40 | ||
|
|
af4f1f727d | ||
|
|
5571a0cdf8 | ||
|
|
bb2ad10eef | ||
|
|
788dc4bc23 | ||
|
|
705118247d | ||
|
|
1b579dfdc2 | ||
|
|
e3e3682eb3 | ||
|
|
3c6e84b21c | ||
|
|
28ec0b07e9 | ||
|
|
22e0fa03b2 | ||
|
|
b3cac071fa | ||
|
|
ebb9d8a6fa | ||
|
|
f8fcc827cd | ||
|
|
f9ac30a531 | ||
|
|
efa884b64a | ||
|
|
ee8f87412a | ||
|
|
2cb3cec659 | ||
|
|
6092cd89bc | ||
|
|
dd47c3a548 | ||
|
|
06678d4ce3 | ||
|
|
ba10525125 | ||
|
|
713e386c66 | ||
|
|
adf61bf7f4 | ||
|
|
97507b50b5 | ||
|
|
c0971e41b0 | ||
|
|
0b28f5d9e4 | ||
|
|
7c0175322b | ||
|
|
4e497ace29 | ||
|
|
26bb835975 | ||
|
|
5d3a2a3bd4 | ||
|
|
686cdf2a6b | ||
|
|
4928810dda | ||
|
|
4dc2e14039 | ||
|
|
6ec769b051 | ||
|
|
e4d3d24b26 | ||
|
|
572e3f0814 | ||
|
|
e03ade818a | ||
|
|
54725bea77 | ||
|
|
db24f9857a | ||
|
|
c00147e532 | ||
|
|
0baceda7f6 | ||
|
|
c563813e4e | ||
|
|
1dbd7d4d00 | ||
|
|
41b72c0055 | ||
|
|
96a8390b5e | ||
|
|
25474f657a | ||
|
|
d7c4849473 | ||
|
|
0d459e5662 | ||
|
|
01bbab6b2c | ||
|
|
59a7064392 | ||
|
|
8e7b779ec9 | ||
|
|
1ba5344258 | ||
|
|
58e9454379 | ||
|
|
af3dec9b97 | ||
|
|
99a161bd06 | ||
|
|
feabad39f4 | ||
|
|
4a5276b589 | ||
|
|
e342dfb467 | ||
|
|
5b425fc297 | ||
|
|
d8328312a1 | ||
|
|
2ce326ca5b | ||
|
|
90ed2fa5c3 | ||
|
|
407d776610 | ||
|
|
951806c888 | ||
|
|
0fe7ea63a8 | ||
|
|
3a95a84963 | ||
|
|
2c3e93cc4d | ||
|
|
656b25a4a1 | ||
|
|
1863694297 | ||
|
|
c0f753dd21 | ||
|
|
369cde2320 | ||
|
|
e90872b486 | ||
|
|
b52d3504cb | ||
|
|
1e150cda01 | ||
|
|
db135b83dc | ||
|
|
ad4866bb3b | ||
|
|
83339c44b3 | ||
|
|
71633249c4 | ||
|
|
fdf244d488 | ||
|
|
5575d454ab | ||
|
|
d4633cfcd5 | ||
|
|
11315c8c69 | ||
|
|
6c387f87dd | ||
|
|
33cf4877f5 | ||
|
|
6e2fee77a7 | ||
|
|
502e5ceb79 | ||
|
|
4685a53f29 | ||
|
|
79616f47cb | ||
|
|
496218b6e6 | ||
|
|
8bd8ccd974 | ||
|
|
c802de865a | ||
|
|
1dfd6e395c | ||
|
|
25eabb39a6 | ||
|
|
869e0e60c2 | ||
|
|
cae5823685 | ||
|
|
3d0dbc1fb0 | ||
|
|
912583026f | ||
|
|
748304dadd | ||
|
|
253c2179de | ||
|
|
fcc72e5af1 | ||
|
|
d472953e10 | ||
|
|
c78d6c95d6 | ||
|
|
c9cb987eb7 | ||
|
|
099a99d288 | ||
|
|
26ea01d5b4 | ||
|
|
a9afbe59e5 | ||
|
|
dc9cab36ac | ||
|
|
99265c5126 | ||
|
|
57aede6b95 | ||
|
|
e51e41203a | ||
|
|
54644179ea | ||
|
|
7d6a1a4483 | ||
|
|
2715e4456c | ||
|
|
a335d1cc56 | ||
|
|
a89b43b110 | ||
|
|
1b599bde37 | ||
|
|
7bd987e2b9 | ||
|
|
8b0896a43d | ||
|
|
402bb01501 | ||
|
|
75afd83508 | ||
|
|
b25f0f3cd2 | ||
|
|
9226c4b0d5 | ||
|
|
fe3e262209 | ||
|
|
b9fac26dcd | ||
|
|
343e5a03a7 | ||
|
|
acea082a7c | ||
|
|
0cff1f116f |
3
.azure-pipelines/README.md
Normal file
3
.azure-pipelines/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
329
.azure-pipelines/azure-pipelines.yml
Normal file
329
.azure-pipelines/azure-pipelines.yml
Normal file
@@ -0,0 +1,329 @@
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/general
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.7.1
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
### Sanity
|
||||
- stage: Sanity_devel
|
||||
displayName: Sanity devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: devel/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.10/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_9
|
||||
displayName: Sanity 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.9/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 11.1
|
||||
test: freebsd/11.1
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/{0}
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/{0}
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
#- name: FreeBSD 12.0
|
||||
# test: freebsd/12.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
#- name: CentOS 8
|
||||
# test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_9
|
||||
displayName: Docker 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
#- name: CentOS 8
|
||||
# test: centos8
|
||||
#- name: Fedora 31
|
||||
# test: fedora31
|
||||
#- name: openSUSE 15 py3
|
||||
# test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Cloud
|
||||
- stage: Cloud_devel
|
||||
displayName: Cloud devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
20
.azure-pipelines/scripts/aggregate-coverage.sh
Executable file
20
.azure-pipelines/scripts/aggregate-coverage.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
||||
60
.azure-pipelines/scripts/combine-coverage.py
Executable file
60
.azure-pipelines/scripts/combine-coverage.py
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
24
.azure-pipelines/scripts/process-results.sh
Executable file
24
.azure-pipelines/scripts/process-results.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
||||
27
.azure-pipelines/scripts/publish-codecov.sh
Executable file
27
.azure-pipelines/scripts/publish-codecov.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
# Upload code coverage reports to codecov.io.
|
||||
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
output_path="$1"
|
||||
|
||||
curl --silent --show-error https://codecov.io/bash > codecov.sh
|
||||
|
||||
for file in "${output_path}"/reports/coverage*.xml; do
|
||||
name="${file}"
|
||||
name="${name##*/}" # remove path
|
||||
name="${name##coverage=}" # remove 'coverage=' prefix if present
|
||||
name="${name%.xml}" # remove '.xml' suffix
|
||||
|
||||
bash codecov.sh \
|
||||
-f "${file}" \
|
||||
-n "${name}" \
|
||||
-X coveragepy \
|
||||
-X gcov \
|
||||
-X fix \
|
||||
-X search \
|
||||
-X xcode \
|
||||
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
|
||||
done
|
||||
15
.azure-pipelines/scripts/report-coverage.sh
Executable file
15
.azure-pipelines/scripts/report-coverage.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
|
||||
34
.azure-pipelines/scripts/run-tests.sh
Executable file
34
.azure-pipelines/scripts/run-tests.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||
25
.azure-pipelines/scripts/time-command.py
Executable file
25
.azure-pipelines/scripts/time-command.py
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
39
.azure-pipelines/templates/coverage.yml
Normal file
39
.azure-pipelines/templates/coverage.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
55
.azure-pipelines/templates/matrix.yml
Normal file
55
.azure-pipelines/templates/matrix.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
45
.azure-pipelines/templates/test.yml
Normal file
45
.azure-pipelines/templates/test.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
621
.github/BOTMETA.yml
vendored
621
.github/BOTMETA.yml
vendored
File diff suppressed because it is too large
Load Diff
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,135 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Explain the problem briefly below.
|
||||
placeholder: >-
|
||||
When I try to do X with the collection from the main branch on GitHub, Y
|
||||
breaks in a way Z under the env E. Here are all the details I know
|
||||
about this problem...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Bug Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
# For smaller collections we could use a multi-select and hardcode the list
|
||||
# May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
|
||||
# Select from list, filter as you type (`mysql` would only show the 3 mysql components)
|
||||
# OR freeform - doesn't seem to be supported in adaptivecards
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
|
||||
This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. target OS versions,
|
||||
network device firmware, etc.
|
||||
placeholder: RHEL 8, CentOS Stream etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: |
|
||||
Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Results
|
||||
description: >-
|
||||
Describe what you expected to happen when running the steps above.
|
||||
placeholder: >-
|
||||
I expected X to happen because I assumed Y.
|
||||
that it did not.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Actual Results
|
||||
description: |
|
||||
Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
|
||||
|
||||
Paste verbatim command output between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
|
||||
```
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
27
.github/ISSUE_TEMPLATE/config.yml
vendored
27
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,27 +0,0 @@
|
||||
---
|
||||
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||
blank_issues_enabled: false # default: true
|
||||
contact_links:
|
||||
- name: Security bug report
|
||||
url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: |
|
||||
Please learn how to report security vulnerabilities here.
|
||||
|
||||
For all security related bugs, email security@ansible.com
|
||||
instead of using this issue tracker and you will receive
|
||||
a prompt response.
|
||||
|
||||
For more information, see
|
||||
https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
|
||||
- name: Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Be nice to other members of the community.
|
||||
- name: Talks to the community
|
||||
url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
|
||||
about: Please ask and answer usage questions here
|
||||
- name: Working groups
|
||||
url: https://github.com/ansible/community/wiki
|
||||
about: Interested in improving a specific area? Become a part of a working group!
|
||||
- name: For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -1,111 +0,0 @@
|
||||
---
|
||||
name: Documentation Report
|
||||
description: Ask us about docs
|
||||
# NOTE: issue body is enabled to allow screenshots
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: |
|
||||
Explain the problem briefly below, add suggestions to wording or structure.
|
||||
|
||||
**HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
|
||||
placeholder: >-
|
||||
I was reading the Collection documentation of version X and I'm having
|
||||
problems understanding Y. It would be very helpful if that got
|
||||
rephrased as Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Documentation Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the rst file, module, plugin, task or
|
||||
feature below, *use your best guess if unsure*.
|
||||
placeholder: mysql_user
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. OS version,
|
||||
browser, etc.
|
||||
placeholder: Fedora 33, Firefox etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how this improves the documentation, e.g. before/after situation or screenshots.
|
||||
|
||||
**Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
placeholder: >-
|
||||
When the improvement is applied, it makes it more straightforward
|
||||
to understand X.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,69 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
description: Suggest an idea for this project
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Describe the new feature/improvement briefly below.
|
||||
placeholder: >-
|
||||
I am trying to do X with the collection from the main branch on GitHub and
|
||||
I think that implementing a feature Y would be very helpful for me and
|
||||
every other user of ansible-core because of Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Feature Idea
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how the feature would be used, why it is needed and what it would solve.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '26 19 * * 1'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
81
.gitignore
vendored
81
.gitignore
vendored
@@ -1,6 +1,6 @@
|
||||
|
||||
# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
|
||||
### dotenv ###
|
||||
.env
|
||||
@@ -88,7 +88,7 @@ flycheck_*.el
|
||||
.nfs*
|
||||
|
||||
### PyCharm+all ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
@@ -98,9 +98,6 @@ flycheck_*.el
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# AWS User-specific
|
||||
.idea/**/aws.xml
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
@@ -121,9 +118,6 @@ flycheck_*.el
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
@@ -204,6 +198,7 @@ parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
@@ -230,25 +225,13 @@ htmlcov/
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
@@ -256,19 +239,9 @@ instance/
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
@@ -278,24 +251,12 @@ ipython_config.py
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
@@ -303,6 +264,10 @@ venv.bak/
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
@@ -314,16 +279,9 @@ dmypy.json
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
### Vim ###
|
||||
# Swap
|
||||
[._]*.s[a-v][a-z]
|
||||
!*.svg # comment out if you don't need vector files
|
||||
[._]*.sw[a-p]
|
||||
[._]s[a-rt-v][a-z]
|
||||
[._]ss[a-gi-z]
|
||||
@@ -341,13 +299,11 @@ tags
|
||||
[._]*.un~
|
||||
|
||||
### WebStorm ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
|
||||
# AWS User-specific
|
||||
|
||||
# Generated files
|
||||
|
||||
# Sensitive or high-churn files
|
||||
@@ -358,9 +314,6 @@ tags
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
@@ -396,27 +349,15 @@ tags
|
||||
# *.ipr
|
||||
|
||||
# Sonarlint plugin
|
||||
# https://plugins.jetbrains.com/plugin/7973-sonarlint
|
||||
.idea/**/sonarlint/
|
||||
|
||||
# SonarQube Plugin
|
||||
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
|
||||
.idea/**/sonarIssues.xml
|
||||
|
||||
# Markdown Navigator plugin
|
||||
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
|
||||
.idea/**/markdown-navigator.xml
|
||||
.idea/**/markdown-navigator-enh.xml
|
||||
.idea/**/markdown-navigator/
|
||||
|
||||
# Cache file creation bug
|
||||
# See https://youtrack.jetbrains.com/issue/JBR-2257
|
||||
.idea/$CACHE_FILE$
|
||||
|
||||
# CodeStream plugin
|
||||
# https://plugins.jetbrains.com/plugin/12206-codestream
|
||||
.idea/codestream.xml
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
@@ -443,4 +384,4 @@ $RECYCLE.BIN/
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||
|
||||
2344
CHANGELOG.rst
2344
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
121
CONTRIBUTING.md
121
CONTRIBUTING.md
@@ -1,121 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
|
||||
|
||||
If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
|
||||
## Issue tracker
|
||||
|
||||
Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
|
||||
There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
|
||||
Also somebody may already have started discussing or working on implementing the same or a similar idea,
|
||||
so you can cooperate to create a better solution together.
|
||||
|
||||
* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
|
||||
* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
|
||||
|
||||
## Open pull requests
|
||||
|
||||
Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
|
||||
You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
|
||||
Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
|
||||
|
||||
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
||||
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
## Test pull requests
|
||||
|
||||
If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly.
|
||||
|
||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
||||
|
||||
## Run sanity, unit or integration tests locally
|
||||
|
||||
You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is:
|
||||
|
||||
```.bash
|
||||
mkdir -p ~/dev/ansible_collections/community
|
||||
git clone https://github.com/ansible-collections/community.general.git ~/dev/ansible_collections/community/general
|
||||
cd ~/dev/ansible_collections/community/general
|
||||
```
|
||||
|
||||
Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+.
|
||||
|
||||
The following commands show how to run sanity tests:
|
||||
|
||||
```.bash
|
||||
# Run sanity tests for all files in the collection:
|
||||
ansible-test sanity --docker -v
|
||||
|
||||
# Run sanity tests for the given files and directories:
|
||||
ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/
|
||||
```
|
||||
|
||||
The following commands show how to run unit tests:
|
||||
|
||||
```.bash
|
||||
# Run all unit tests:
|
||||
ansible-test units --docker -v
|
||||
|
||||
# Run all unit tests for one Python version (a lot faster):
|
||||
ansible-test units --docker -v --python 3.8
|
||||
|
||||
# Run a specific unit test (for the nmcli module) for one Python version:
|
||||
ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py
|
||||
```
|
||||
|
||||
The following commands show how to run integration tests:
|
||||
|
||||
```.bash
|
||||
# Run integration tests for the interfaces_files module in a Docker container using the
|
||||
# fedora35 operating system image (the supported images depend on your ansible-core version):
|
||||
ansible-test integration --docker fedora35 -v interfaces_file
|
||||
|
||||
# Run integration tests for the flattened lookup **without any isolation**:
|
||||
ansible-test integration -v lookup_flattened
|
||||
```
|
||||
|
||||
If you are unsure about the integration test target name for a module or plugin, you can take a look in `tests/integration/targets/`. Tests for plugins have the plugin type prepended.
|
||||
|
||||
## Creating new modules or plugins
|
||||
|
||||
Creating new modules and plugins requires a bit more work than other Pull Requests.
|
||||
|
||||
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
|
||||
can only be used by very few people should better be added to more specialized collections.
|
||||
|
||||
2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
|
||||
That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
|
||||
of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
|
||||
such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
|
||||
|
||||
3. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
|
||||
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
|
||||
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
|
||||
the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide);
|
||||
- Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed
|
||||
(new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed);
|
||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
||||
which run in CI.
|
||||
|
||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
|
||||
subdirectories.)
|
||||
|
||||
- Action plugins need to be accompanied by a module, even if the module file only contains documentation
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
|
||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
|
||||
@@ -1,9 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) <year> <copyright holders>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -1,48 +0,0 @@
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
88
README.md
88
README.md
@@ -1,23 +1,15 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
|
||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
|
||||
|
||||
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 and ansible-core 2.13 releases. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -29,9 +21,7 @@ Please check the included content on the [Ansible Galaxy page for this collectio
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
|
||||
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
|
||||
Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||
|
||||
ansible-galaxy collection install community.general
|
||||
|
||||
@@ -42,79 +32,57 @@ collections:
|
||||
- name: community.general
|
||||
```
|
||||
|
||||
Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
|
||||
|
||||
```bash
|
||||
ansible-galaxy collection install community.general --upgrade
|
||||
```
|
||||
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
|
||||
|
||||
```bash
|
||||
ansible-galaxy collection install community.general:==X.Y.Z
|
||||
```
|
||||
|
||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||
|
||||
## Contributing to this collection
|
||||
|
||||
The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
|
||||
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||
|
||||
We are actively accepting new contributors.
|
||||
For example, if you are working in the `~/dev` directory:
|
||||
|
||||
All types of contributions are very welcome.
|
||||
|
||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)!
|
||||
|
||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
||||
```
|
||||
cd ~/dev
|
||||
git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
|
||||
export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
||||
```
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Running tests
|
||||
|
||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||
|
||||
## Collection maintenance
|
||||
### Communication
|
||||
|
||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||
We have a dedicated Working Group for Ansible development.
|
||||
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||
|
||||
It is necessary for maintainers of this collection to be subscribed to:
|
||||
|
||||
* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
|
||||
* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
|
||||
|
||||
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
|
||||
|
||||
## Communication
|
||||
|
||||
We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
|
||||
|
||||
Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
|
||||
|
||||
We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
|
||||
You can find other people interested on the following Freenode IRC channels -
|
||||
- `#ansible` - For general use questions and support.
|
||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
|
||||
- `#ansible-community` - For discussions on community topics and community meetings.
|
||||
|
||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||
|
||||
For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html)
|
||||
|
||||
## Publishing New Version
|
||||
### Publishing New Version
|
||||
|
||||
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
|
||||
Basic instructions without release branches:
|
||||
|
||||
1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
|
||||
2. Run `antsibull-changelog release --collection-flatmap yes`
|
||||
3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
|
||||
4. Tag the commit with `<version>`. Push changes and tag to the main repository.
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
|
||||
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
|
||||
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
||||
|
||||
## More information
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +0,0 @@
|
||||
Committers Guidelines for community.general
|
||||
===========================================
|
||||
|
||||
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
|
||||
|
||||
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
|
||||
|
||||
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
|
||||
|
||||
That said, use the trust wisely.
|
||||
|
||||
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
|
||||
|
||||
Our workflow on GitHub
|
||||
----------------------
|
||||
|
||||
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
|
||||
|
||||
* Fork the repository upon which you want to do some work to your own personal repository
|
||||
* Work on the specific branch upon which you need to commit
|
||||
* Create a Pull Request back to the collection repository and await reviews
|
||||
* Adjust code as necessary based on the Comments provided
|
||||
* Ask someone from the other committers to do a final review and merge
|
||||
|
||||
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
|
||||
|
||||
Roles
|
||||
-----
|
||||
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
|
||||
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
|
||||
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
|
||||
|
||||
General rules
|
||||
-------------
|
||||
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
|
||||
|
||||
* Do NOTs:
|
||||
|
||||
- Do not commit directly.
|
||||
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
|
||||
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
|
||||
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
|
||||
- Do not break playbooks. Always keep backwards compatibility in mind.
|
||||
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
|
||||
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
|
||||
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
|
||||
|
||||
* Do:
|
||||
|
||||
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
|
||||
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
|
||||
- Consider backwards compatibility (goes back to "do not break existing playbooks").
|
||||
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
|
||||
- Discuss with other committers, specially when you are unsure of something.
|
||||
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
|
||||
- Consider scope, sometimes a fix can be generalized.
|
||||
- Keep it simple, then things are maintainable, debuggable and intelligible.
|
||||
|
||||
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
|
||||
|
||||
|
||||
People
|
||||
------
|
||||
|
||||
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
|
||||
|
||||
| Name | GitHub ID | IRC Nick | Other |
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Alexei Znamensky | russoz | russoz | |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Andrew Pantuso | Ajpantuso | ajpantuso | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
sections:
|
||||
- title: Guides
|
||||
toctree:
|
||||
- filter_guide
|
||||
@@ -1,784 +0,0 @@
|
||||
.. _ansible_collections.community.general.docsite.filter_guide:
|
||||
|
||||
community.general Filter Guide
|
||||
==============================
|
||||
|
||||
The :ref:`community.general collection <plugins_in_community.general>` offers several useful filter plugins.
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
Paths
|
||||
-----
|
||||
|
||||
The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
# ansible-base 2.10 or newer:
|
||||
path: {{ ('/etc', path, 'subdir', file) | path_join }}
|
||||
|
||||
# Also works with Ansible 2.9:
|
||||
path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Abstract transformations
|
||||
------------------------
|
||||
|
||||
Dictionaries
|
||||
^^^^^^^^^^^^
|
||||
|
||||
You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a single-entry dictionary
|
||||
debug:
|
||||
msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
|
||||
vars:
|
||||
myvar: myvalue
|
||||
|
||||
- name: Create a list of dictionaries where the 'server' field is taken from a list
|
||||
debug:
|
||||
msg: >-
|
||||
{{ myservers | map('community.general.dict_kv', 'server')
|
||||
| map('combine', common_config) }}
|
||||
vars:
|
||||
common_config:
|
||||
type: host
|
||||
database: all
|
||||
myservers:
|
||||
- server1
|
||||
- server2
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a single-entry dictionary] **************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"thatsmyvar": "myvalue"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server1",
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server2",
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a dictionary with the dict function
|
||||
debug:
|
||||
msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
|
||||
|
||||
- name: Create a dictionary with the community.general.dict filter
|
||||
debug:
|
||||
msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
|
||||
|
||||
- name: Create a list of dictionaries with map and the community.general.dict filter
|
||||
debug:
|
||||
msg: >-
|
||||
{{ values | map('zip', ['k1', 'k2', 'k3'])
|
||||
| map('map', 'reverse')
|
||||
| map('community.general.dict') }}
|
||||
vars:
|
||||
values:
|
||||
- - foo
|
||||
- 23
|
||||
- a
|
||||
- - bar
|
||||
- 42
|
||||
- b
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a dictionary with the dict function] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a dictionary with the community.general.dict filter] ************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"k1": "foo",
|
||||
"k2": 23,
|
||||
"k3": "a"
|
||||
},
|
||||
{
|
||||
"k1": "bar",
|
||||
"k2": 42,
|
||||
"k3": "b"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Grouping
|
||||
^^^^^^^^
|
||||
|
||||
If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
|
||||
|
||||
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Output mount facts grouped by device name
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('device')
|
||||
|
||||
- name: Output mount facts grouped by mount point
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Output mount facts grouped by device name] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('device')": {
|
||||
"/dev/sda1": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
},
|
||||
"/dev/sda2": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "abcdef01-2345-6789-0abc-def012345678"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Output mount facts grouped by mount point] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
|
||||
"/": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
|
||||
},
|
||||
"/boot": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Merging lists of dictionaries
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
debug:
|
||||
var: list1 | community.general.lists_mergeby(list2, 'name')
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /bazzz
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Merge two lists by common attribute 'name'] ****************************************
|
||||
ok: [localhost] => {
|
||||
"list1 | community.general.lists_mergeby(list2, 'name')": [
|
||||
{
|
||||
"extra": false,
|
||||
"name": "bar"
|
||||
},
|
||||
{
|
||||
"name": "baz",
|
||||
"path": "/bazzz"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "foo",
|
||||
"path": "/foo"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "meh"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
Working with times
|
||||
------------------
|
||||
|
||||
The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
|
||||
|
||||
There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
|
||||
|
||||
.. list-table:: Units
|
||||
:widths: 25 25 25 25
|
||||
:header-rows: 1
|
||||
|
||||
* - Unit name
|
||||
- Unit value in seconds
|
||||
- Unit strings for filter
|
||||
- Shorthand filter
|
||||
* - Millisecond
|
||||
- 1/1000 second
|
||||
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
|
||||
- ``to_milliseconds``
|
||||
* - Second
|
||||
- 1 second
|
||||
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
|
||||
- ``to_seconds``
|
||||
* - Minute
|
||||
- 60 seconds
|
||||
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
|
||||
- ``to_minutes``
|
||||
* - Hour
|
||||
- 60*60 seconds
|
||||
- ``h``, ``hour``, ``hours``
|
||||
- ``to_hours``
|
||||
* - Day
|
||||
- 24*60*60 seconds
|
||||
- ``d``, ``day``, ``days``
|
||||
- ``to_days``
|
||||
* - Week
|
||||
- 7*24*60*60 seconds
|
||||
- ``w``, ``week``, ``weeks``
|
||||
- ``to_weeks``
|
||||
* - Month
|
||||
- 30*24*60*60 seconds
|
||||
- ``mo``, ``month``, ``months``
|
||||
- ``to_months``
|
||||
* - Year
|
||||
- 365*24*60*60 seconds
|
||||
- ``y``, ``year``, ``years``
|
||||
- ``to_years``
|
||||
|
||||
Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Convert string to seconds
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
|
||||
|
||||
- name: Convert string to hours
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
|
||||
|
||||
- name: Convert string to years (using 365.25 days == 1 year)
|
||||
debug:
|
||||
msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Convert string to seconds] **********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "109210.123"
|
||||
}
|
||||
|
||||
TASK [Convert string to hours] ************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "30.336145277778"
|
||||
}
|
||||
|
||||
TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
|
||||
ok: [localhost] => {
|
||||
"msg": "1.096851471595"
|
||||
}
|
||||
|
||||
.. versionadded: 0.2.0
|
||||
|
||||
Working with versions
|
||||
---------------------
|
||||
|
||||
If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Sort list by version number
|
||||
debug:
|
||||
var: ansible_versions | community.general.version_sort
|
||||
vars:
|
||||
ansible_versions:
|
||||
- '2.8.0'
|
||||
- '2.11.0'
|
||||
- '2.7.0'
|
||||
- '2.10.0'
|
||||
- '2.9.0'
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Sort list by version number] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_versions | community.general.version_sort": [
|
||||
"2.7.0",
|
||||
"2.8.0",
|
||||
"2.9.0",
|
||||
"2.10.0",
|
||||
"2.11.0"
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.2.0
|
||||
|
||||
Creating identifiers
|
||||
--------------------
|
||||
|
||||
The following filters allow to create identifiers.
|
||||
|
||||
Hashids
|
||||
^^^^^^^
|
||||
|
||||
`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create hashid"
|
||||
debug:
|
||||
msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
|
||||
|
||||
- name: "Decode hashid"
|
||||
debug:
|
||||
msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "jm2Cytn"
|
||||
}
|
||||
|
||||
TASK [Decode hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
1234,
|
||||
5,
|
||||
6
|
||||
]
|
||||
}
|
||||
|
||||
The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
|
||||
|
||||
:salt: String to use as salt when hashing.
|
||||
:alphabet: String of 16 or more unique characters to produce a hash.
|
||||
:min_length: Minimum length of hash produced.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Random MACs
|
||||
^^^^^^^^^^^
|
||||
|
||||
You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create a random MAC starting with ff:"
|
||||
debug:
|
||||
msg: "{{ 'FF' | community.general.random_mac }}"
|
||||
|
||||
- name: "Create a random MAC starting with 00:11:22:"
|
||||
debug:
|
||||
msg: "{{ '00:11:22' | community.general.random_mac }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a random MAC starting with ff:] **********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "ff:69:d3:78:7f:b4"
|
||||
}
|
||||
|
||||
TASK [Create a random MAC starting with 00:11:22:] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "00:11:22:71:5d:3b"
|
||||
}
|
||||
|
||||
You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
"{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
|
||||
|
||||
Conversions
|
||||
-----------
|
||||
|
||||
Parsing CSV files
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Parse CSV from string"
|
||||
debug:
|
||||
msg: "{{ csv_string | community.general.from_csv }}"
|
||||
vars:
|
||||
csv_string: |
|
||||
foo,bar,baz
|
||||
1,2,3
|
||||
you,this,then
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Parse CSV from string] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"bar": "2",
|
||||
"baz": "3",
|
||||
"foo": "1"
|
||||
},
|
||||
{
|
||||
"bar": "this",
|
||||
"baz": "then",
|
||||
"foo": "you"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
The ``from_csv`` filter has several keyword arguments to control its behavior:
|
||||
|
||||
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
|
||||
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
|
||||
:delimiter: Sets the delimiter to use. Default depends on the dialect used.
|
||||
:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
|
||||
:strict: Set to ``true`` to error out on invalid CSV input.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Converting to JSON
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Run 'ls' to list files in /
|
||||
command: ls /
|
||||
register: result
|
||||
|
||||
- name: Parse the ls output
|
||||
debug:
|
||||
msg: "{{ result.stdout | community.general.jc('ls') }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Run 'ls' to list files in /] ********************************************************
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Parse the ls output] ****************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"filename": "bin"
|
||||
},
|
||||
{
|
||||
"filename": "boot"
|
||||
},
|
||||
{
|
||||
"filename": "dev"
|
||||
},
|
||||
{
|
||||
"filename": "etc"
|
||||
},
|
||||
{
|
||||
"filename": "home"
|
||||
},
|
||||
{
|
||||
"filename": "lib"
|
||||
},
|
||||
{
|
||||
"filename": "proc"
|
||||
},
|
||||
{
|
||||
"filename": "root"
|
||||
},
|
||||
{
|
||||
"filename": "run"
|
||||
},
|
||||
{
|
||||
"filename": "tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
.. _ansible_collections.community.general.docsite.json_query_filter:
|
||||
|
||||
Selecting JSON data: JSON queries
|
||||
---------------------------------
|
||||
|
||||
To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
|
||||
|
||||
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
|
||||
|
||||
Consider this data structure:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
{
|
||||
"domain_definition": {
|
||||
"domain": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "cluster2"
|
||||
}
|
||||
],
|
||||
"server": [
|
||||
{
|
||||
"name": "server11",
|
||||
"cluster": "cluster1",
|
||||
"port": "8080"
|
||||
},
|
||||
{
|
||||
"name": "server12",
|
||||
"cluster": "cluster1",
|
||||
"port": "8090"
|
||||
},
|
||||
{
|
||||
"name": "server21",
|
||||
"cluster": "cluster2",
|
||||
"port": "9080"
|
||||
},
|
||||
{
|
||||
"name": "server22",
|
||||
"cluster": "cluster2",
|
||||
"port": "9090"
|
||||
}
|
||||
],
|
||||
"library": [
|
||||
{
|
||||
"name": "lib1",
|
||||
"target": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "lib2",
|
||||
"target": "cluster2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
To extract all clusters from this structure, you can use the following query:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all cluster names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
|
||||
|
||||
To extract all server names:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
|
||||
|
||||
To extract ports from cluster1:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
|
||||
|
||||
.. note:: You can use a variable to make the query more readable.
|
||||
|
||||
To print out the ports from cluster1 in a comma separated string:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1 as a string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
|
||||
|
||||
.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
|
||||
|
||||
You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
|
||||
|
||||
.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
|
||||
|
||||
To get a hash map with all ports and names of a cluster:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server ports and names from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
|
||||
|
||||
To extract ports from all clusters with name starting with 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?starts_with(name,'server1')].port"
|
||||
|
||||
To extract ports from all clusters with name containing 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
|
||||
Working with Unicode
|
||||
---------------------
|
||||
|
||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
||||
|
||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Compare Unicode representations
|
||||
debug:
|
||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
||||
vars:
|
||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
||||
without_combining_character: Mayagüez
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Compare Unicode representations] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": true
|
||||
}
|
||||
|
||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
||||
|
||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
||||
|
||||
.. versionadded:: 3.7.0
|
||||
@@ -1,14 +1,17 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.8.10
|
||||
version: 1.3.6
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
description: null
|
||||
license_file: COPYING
|
||||
tags: [community]
|
||||
# NOTE: No dependencies are expected to be added here
|
||||
# dependencies:
|
||||
# NOTE: No more dependencies can be added to this list
|
||||
dependencies:
|
||||
ansible.netcommon: '>=1.0.0'
|
||||
community.kubernetes: '>=1.0.0'
|
||||
google.cloud: '>=1.0.0'
|
||||
repository: https://github.com/ansible-collections/community.general
|
||||
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
||||
homepage: https://github.com/ansible-collections/community.general
|
||||
|
||||
1059
meta/runtime.yml
1059
meta/runtime.yml
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2020, quidame <quidame@poivron.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
import time
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -41,27 +40,19 @@ class ActionModule(ActionBase):
|
||||
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
|
||||
"'ansible_timeout' (=%s) (recommended).")
|
||||
|
||||
def _async_result(self, async_status_args, task_vars, timeout):
|
||||
def _async_result(self, module_args, task_vars, timeout):
|
||||
'''
|
||||
Retrieve results of the asynchonous task, and display them in place of
|
||||
the async wrapper results (those with the ansible_job_id key).
|
||||
'''
|
||||
async_status = self._task.copy()
|
||||
async_status.args = async_status_args
|
||||
async_status.action = 'ansible.builtin.async_status'
|
||||
async_status.async_val = 0
|
||||
async_action = self._shared_loader_obj.action_loader.get(
|
||||
async_status.action, task=async_status, connection=self._connection,
|
||||
play_context=self._play_context, loader=self._loader, templar=self._templar,
|
||||
shared_loader_obj=self._shared_loader_obj)
|
||||
|
||||
if async_status.args['mode'] == 'cleanup':
|
||||
return async_action.run(task_vars=task_vars)
|
||||
|
||||
# At least one iteration is required, even if timeout is 0.
|
||||
for dummy in range(max(1, timeout)):
|
||||
async_result = async_action.run(task_vars=task_vars)
|
||||
if async_result.get('finished', 0) == 1:
|
||||
for i in range(max(1, timeout)):
|
||||
async_result = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=module_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
if async_result['finished'] == 1:
|
||||
break
|
||||
time.sleep(min(1, timeout))
|
||||
|
||||
@@ -85,6 +76,7 @@ class ActionModule(ActionBase):
|
||||
task_async = self._task.async_val
|
||||
check_mode = self._play_context.check_mode
|
||||
max_timeout = self._connection._play_context.timeout
|
||||
module_name = self._task.action
|
||||
module_args = self._task.args
|
||||
|
||||
if module_args.get('state', None) == 'restored':
|
||||
@@ -106,16 +98,32 @@ class ActionModule(ActionBase):
|
||||
task_async,
|
||||
max_timeout))
|
||||
|
||||
# inject the async directory based on the shell option into the
|
||||
# module args
|
||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||
# BEGIN snippet from async_status action plugin
|
||||
env_async_dir = [e for e in self._task.environment if
|
||||
"ANSIBLE_ASYNC_DIR" in e]
|
||||
if len(env_async_dir) > 0:
|
||||
# for backwards compatibility we need to get the dir from
|
||||
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
|
||||
# deprecated and will be removed in favour of shell options
|
||||
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
|
||||
|
||||
msg = "Setting the async dir from the environment keyword " \
|
||||
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
|
||||
"shell option instead"
|
||||
display.deprecated(msg, version='2.0.0',
|
||||
collection_name='community.general') # was Ansible 2.12
|
||||
else:
|
||||
# inject the async directory based on the shell option into the
|
||||
# module args
|
||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||
# END snippet from async_status action plugin
|
||||
|
||||
# Bind the loop max duration to consistent values on both
|
||||
# remote and local sides (if not the same, make the loop
|
||||
# longer on the controller); and set a backup file path.
|
||||
module_args['_timeout'] = task_async
|
||||
module_args['_back'] = '%s/iptables.state' % async_dir
|
||||
async_status_args = dict(mode='status')
|
||||
async_status_args = dict(_async_dir=async_dir)
|
||||
confirm_cmd = 'rm -f %s' % module_args['_back']
|
||||
starter_cmd = 'touch %s.starter' % module_args['_back']
|
||||
remaining_time = max(task_async, max_timeout)
|
||||
@@ -141,7 +149,7 @@ class ActionModule(ActionBase):
|
||||
# The module is aware to not process the main iptables-restore
|
||||
# command before finding (and deleting) the 'starter' cookie on
|
||||
# the host, so the previous query will not reach ssh timeout.
|
||||
dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
|
||||
# As the main command is not yet executed on the target, here
|
||||
# 'finished' means 'failed before main command be executed'.
|
||||
@@ -151,7 +159,7 @@ class ActionModule(ActionBase):
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
for dummy in range(max_timeout):
|
||||
for x in range(max_timeout):
|
||||
time.sleep(1)
|
||||
remaining_time -= 1
|
||||
# - AnsibleConnectionFailure covers rejected requests (i.e.
|
||||
@@ -159,7 +167,7 @@ class ActionModule(ActionBase):
|
||||
# - ansible_timeout is able to cover dropped requests (due
|
||||
# to a rule or policy DROP) if not lower than async_val.
|
||||
try:
|
||||
dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
continue
|
||||
@@ -172,12 +180,16 @@ class ActionModule(ActionBase):
|
||||
del result[key]
|
||||
|
||||
if result.get('invocation', {}).get('module_args'):
|
||||
for key in ('_back', '_timeout', '_async_dir', 'jid'):
|
||||
if result['invocation']['module_args'].get(key):
|
||||
del result['invocation']['module_args'][key]
|
||||
if '_timeout' in result['invocation']['module_args']:
|
||||
del result['invocation']['module_args']['_back']
|
||||
del result['invocation']['module_args']['_timeout']
|
||||
|
||||
async_status_args['mode'] = 'cleanup'
|
||||
dummy = self._async_result(async_status_args, task_vars, 0)
|
||||
garbage = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=async_status_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
|
||||
if not wrap_async:
|
||||
# remove a temporary path we created
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
|
||||
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
|
||||
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
|
||||
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.common.collections import is_string
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: doas
|
||||
become: doas
|
||||
short_description: Do As user
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task
|
||||
@@ -81,7 +81,7 @@ DOCUMENTATION = '''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
|
||||
@@ -4,11 +4,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: dzdo
|
||||
become: dzdo
|
||||
short_description: Centrify's Direct Authorize
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: ksu
|
||||
become: ksu
|
||||
short_description: Kerberos substitute user
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task
|
||||
@@ -82,7 +82,7 @@ DOCUMENTATION = '''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: machinectl
|
||||
become: machinectl
|
||||
short_description: Systemd's machinectl privilege escalation
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pbrun
|
||||
become: pbrun
|
||||
short_description: PowerBroker run
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pfexec
|
||||
become: pfexec
|
||||
short_description: profile based execution
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_user:
|
||||
description:
|
||||
|
||||
@@ -5,11 +5,11 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pmrun
|
||||
become: pmrun
|
||||
short_description: Privilege Manager run
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
||||
author: Ansible Core Team
|
||||
author: ansible (@core)
|
||||
options:
|
||||
become_exe:
|
||||
description: Sudo executable
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sesu
|
||||
become: sesu
|
||||
short_description: CA Privileged Access Manager
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
name: sudosu
|
||||
short_description: Run tasks using sudo su -
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
version_added: 2.4.0
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task.
|
||||
default: root
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_user
|
||||
- section: sudo_become_plugin
|
||||
key: user
|
||||
vars:
|
||||
- name: ansible_become_user
|
||||
- name: ansible_sudo_user
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_USER
|
||||
- name: ANSIBLE_SUDO_USER
|
||||
become_flags:
|
||||
description: Options to pass to C(sudo).
|
||||
default: -H -S -n
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_flags
|
||||
- section: sudo_become_plugin
|
||||
key: flags
|
||||
vars:
|
||||
- name: ansible_become_flags
|
||||
- name: ansible_sudo_flags
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_FLAGS
|
||||
- name: ANSIBLE_SUDO_FLAGS
|
||||
become_pass:
|
||||
description: Password to pass to C(sudo).
|
||||
required: false
|
||||
vars:
|
||||
- name: ansible_become_password
|
||||
- name: ansible_become_pass
|
||||
- name: ansible_sudo_pass
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_PASS
|
||||
- name: ANSIBLE_SUDO_PASS
|
||||
ini:
|
||||
- section: sudo_become_plugin
|
||||
key: password
|
||||
"""
|
||||
|
||||
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
class BecomeModule(BecomeBase):
|
||||
|
||||
name = 'community.general.sudosu'
|
||||
|
||||
# messages for detecting prompted password issues
|
||||
fail = ('Sorry, try again.',)
|
||||
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
|
||||
|
||||
def build_become_command(self, cmd, shell):
|
||||
super(BecomeModule, self).build_become_command(cmd, shell)
|
||||
|
||||
if not cmd:
|
||||
return cmd
|
||||
|
||||
becomecmd = 'sudo'
|
||||
|
||||
flags = self.get_option('become_flags') or ''
|
||||
prompt = ''
|
||||
if self.get_option('become_pass'):
|
||||
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
|
||||
if flags: # this could be simplified, but kept as is for now for backwards string matching
|
||||
flags = flags.replace('-n', '')
|
||||
prompt = '-p "%s"' % (self.prompt)
|
||||
|
||||
user = self.get_option('become_user') or ''
|
||||
if user:
|
||||
user = '%s' % (user)
|
||||
|
||||
return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
|
||||
21
plugins/cache/memcached.py
vendored
21
plugins/cache/memcached.py
vendored
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2014, Brian Coca, Josh Drake, et al
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: memcached
|
||||
cache: memcached
|
||||
short_description: Use memcached DB for cache
|
||||
description:
|
||||
- This cache uses JSON formatted, per host records saved in memcached.
|
||||
@@ -20,7 +19,6 @@ DOCUMENTATION = '''
|
||||
- List of connection information for the memcached DBs
|
||||
default: ['127.0.0.1:11211']
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
|
||||
ini:
|
||||
@@ -55,7 +53,6 @@ from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common._collections_compat import MutableSet
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
@@ -155,16 +152,16 @@ class CacheModuleKeys(MutableSet):
|
||||
def __len__(self):
|
||||
return len(self._keyset)
|
||||
|
||||
def add(self, value):
|
||||
self._keyset[value] = time.time()
|
||||
def add(self, key):
|
||||
self._keyset[key] = time.time()
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def discard(self, value):
|
||||
del self._keyset[value]
|
||||
def discard(self, key):
|
||||
del self._keyset[key]
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
for k in list(self._keyset.keys()):
|
||||
for k in self._keyset.keys():
|
||||
t = self._keyset[k]
|
||||
if s_min < t < s_max:
|
||||
del self._keyset[k]
|
||||
@@ -183,9 +180,9 @@ class CacheModule(BaseCacheModule):
|
||||
self._timeout = self.get_option('_timeout')
|
||||
self._prefix = self.get_option('_prefix')
|
||||
except KeyError:
|
||||
# TODO: remove once we no longer support Ansible 2.9
|
||||
if not ansible_base_version.startswith('2.9.'):
|
||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||
display.deprecated('Rather than importing CacheModules directly, '
|
||||
'use ansible.plugins.loader.cache_loader',
|
||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||
if C.CACHE_PLUGIN_CONNECTION:
|
||||
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
||||
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
||||
|
||||
3
plugins/cache/pickle.py
vendored
3
plugins/cache/pickle.py
vendored
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Brian Coca
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pickle
|
||||
cache: pickle
|
||||
short_description: Pickle formatted files.
|
||||
description:
|
||||
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
||||
|
||||
32
plugins/cache/redis.py
vendored
32
plugins/cache/redis.py
vendored
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2014, Brian Coca, Josh Drake, et al
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -7,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: redis
|
||||
cache: redis
|
||||
short_description: Use Redis DB for cache
|
||||
description:
|
||||
- This cache uses JSON formatted, per host records saved in Redis.
|
||||
@@ -62,16 +61,14 @@ DOCUMENTATION = '''
|
||||
type: integer
|
||||
'''
|
||||
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
@@ -93,8 +90,6 @@ class CacheModule(BaseCacheModule):
|
||||
performance.
|
||||
"""
|
||||
_sentinel_service_name = None
|
||||
re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
|
||||
re_sent_conn = re.compile(r'^(.*):(\d+)$')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
uri = ''
|
||||
@@ -108,9 +103,9 @@ class CacheModule(BaseCacheModule):
|
||||
self._keys_set = self.get_option('_keyset_name')
|
||||
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
||||
except KeyError:
|
||||
# TODO: remove once we no longer support Ansible 2.9
|
||||
if not ansible_base_version.startswith('2.9.'):
|
||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||
display.deprecated('Rather than importing CacheModules directly, '
|
||||
'use ansible.plugins.loader.cache_loader',
|
||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||
if C.CACHE_PLUGIN_CONNECTION:
|
||||
uri = C.CACHE_PLUGIN_CONNECTION
|
||||
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
||||
@@ -134,18 +129,11 @@ class CacheModule(BaseCacheModule):
|
||||
self._db = self._get_sentinel_connection(uri, kw)
|
||||
# normal connection
|
||||
else:
|
||||
connection = self._parse_connection(self.re_url_conn, uri)
|
||||
connection = uri.split(':')
|
||||
self._db = StrictRedis(*connection, **kw)
|
||||
|
||||
display.vv('Redis connection: %s' % self._db)
|
||||
|
||||
@staticmethod
|
||||
def _parse_connection(re_patt, uri):
|
||||
match = re_patt.match(uri)
|
||||
if not match:
|
||||
raise AnsibleError("Unable to parse connection string")
|
||||
return match.groups()
|
||||
|
||||
def _get_sentinel_connection(self, uri, kw):
|
||||
"""
|
||||
get sentinel connection details from _uri
|
||||
@@ -169,7 +157,7 @@ class CacheModule(BaseCacheModule):
|
||||
except IndexError:
|
||||
pass # password is optional
|
||||
|
||||
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
|
||||
sentinels = [tuple(shost.split(':')) for shost in connections]
|
||||
display.vv('\nUsing redis sentinels: %s' % sentinels)
|
||||
scon = Sentinel(sentinels, **kw)
|
||||
try:
|
||||
@@ -228,12 +216,14 @@ class CacheModule(BaseCacheModule):
|
||||
self._db.zrem(self._keys_set, key)
|
||||
|
||||
def flush(self):
|
||||
for key in list(self.keys()):
|
||||
for key in self.keys():
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
# TODO: there is probably a better way to do this in redis
|
||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
|
||||
3
plugins/cache/yaml.py
vendored
3
plugins/cache/yaml.py
vendored
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Brian Coca
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: yaml
|
||||
cache: yaml
|
||||
short_description: YAML formatted files.
|
||||
description:
|
||||
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
||||
|
||||
61
plugins/callback/actionable.py
Normal file
61
plugins/callback/actionable.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: actionable
|
||||
type: stdout
|
||||
short_description: shows only items that need attention
|
||||
description:
|
||||
- Use this callback when you dont care about OK nor Skipped.
|
||||
- This callback suppresses any non Failed or Changed status.
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
requirements:
|
||||
- set as stdout callback in configuration
|
||||
# Override defaults from 'default' callback plugin
|
||||
options:
|
||||
display_skipped_hosts:
|
||||
name: Show skipped hosts
|
||||
description: "Toggle to control displaying skipped task/host results in a task"
|
||||
type: bool
|
||||
default: no
|
||||
env:
|
||||
- name: DISPLAY_SKIPPED_HOSTS
|
||||
deprecated:
|
||||
why: environment variables without "ANSIBLE_" prefix are deprecated
|
||||
version: "2.0.0" # was Ansible 2.12
|
||||
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
|
||||
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
|
||||
ini:
|
||||
- key: display_skipped_hosts
|
||||
section: defaults
|
||||
display_ok_hosts:
|
||||
name: Show 'ok' hosts
|
||||
description: "Toggle to control displaying 'ok' task/host results in a task"
|
||||
type: bool
|
||||
default: no
|
||||
env:
|
||||
- name: ANSIBLE_DISPLAY_OK_HOSTS
|
||||
ini:
|
||||
- key: display_ok_hosts
|
||||
section: defaults
|
||||
'''
|
||||
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.actionable'
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: cgroup_memory_recap
|
||||
callback: cgroup_memory_recap
|
||||
type: aggregate
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: context_demo
|
||||
callback: context_demo
|
||||
type: aggregate
|
||||
short_description: demo callback that adds play/task context
|
||||
description:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
'''
|
||||
@@ -10,7 +9,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: counter_enabled
|
||||
callback: counter_enabled
|
||||
type: stdout
|
||||
short_description: adds counters to the output items (tasks and hosts/task)
|
||||
description:
|
||||
@@ -45,8 +44,6 @@ class CallbackModule(CallbackBase):
|
||||
_task_total = 0
|
||||
_host_counter = 1
|
||||
_host_total = 0
|
||||
_current_batch_total = 0
|
||||
_previous_batch_total = 0
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
@@ -78,11 +75,8 @@ class CallbackModule(CallbackBase):
|
||||
self._display.banner(msg)
|
||||
self._play = play
|
||||
|
||||
self._previous_batch_total = self._current_batch_total
|
||||
self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch'])
|
||||
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
|
||||
self._task_total = len(self._play.get_tasks()[0])
|
||||
self._task_counter = 1
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self._display.banner("PLAY RECAP")
|
||||
@@ -150,7 +144,7 @@ class CallbackModule(CallbackBase):
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
||||
self._host_counter = self._previous_batch_total
|
||||
self._host_counter = 0
|
||||
self._task_counter += 1
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2016, Dag Wieers <dag@wieers.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -7,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: dense
|
||||
callback: dense
|
||||
type: stdout
|
||||
short_description: minimal stdout output
|
||||
extends_documentation_fragment:
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: diy
|
||||
callback: diy
|
||||
type: stdout
|
||||
short_description: Customize the output
|
||||
version_added: 0.2.0
|
||||
@@ -792,7 +792,7 @@ from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.template import Templar
|
||||
from ansible.vars.manager import VariableManager
|
||||
from ansible.plugins.callback.default import CallbackModule as Default
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
class DummyStdout(object):
|
||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
||||
for attr in _stats_attributes:
|
||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||
|
||||
return _ret
|
||||
|
||||
|
||||
@@ -1,408 +0,0 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: elastic
|
||||
type: notification
|
||||
short_description: Create distributed traces for each Ansible task in Elastic APM
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task in Elastic APM.
|
||||
- You can configure the plugin with environment variables.
|
||||
- See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
apm_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVICE_NAME
|
||||
apm_server_url:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server and its environment variables.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVER_URL
|
||||
apm_secret_token:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server token
|
||||
env:
|
||||
- name: ELASTIC_APM_SECRET_TOKEN
|
||||
apm_api_key:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM API key
|
||||
env:
|
||||
- name: ELASTIC_APM_API_KEY
|
||||
apm_verify_server_cert:
|
||||
default: true
|
||||
type: bool
|
||||
description:
|
||||
- Verifies the SSL certificate if an HTTPS connection.
|
||||
env:
|
||||
- name: ELASTIC_APM_VERIFY_SERVER_CERT
|
||||
traceparent:
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- elastic-apm (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.elastic
|
||||
|
||||
Set the environment variable:
|
||||
export ELASTIC_APM_SERVER_URL=<your APM server URL)>
|
||||
export ELASTIC_APM_SERVICE_NAME=your_service_name
|
||||
export ELASTIC_APM_API_KEY=your_APM_API_KEY
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleRuntimeError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
|
||||
except ImportError as imp_exc:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
self.start = time.time()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
self.finish = time.time()
|
||||
|
||||
|
||||
class ElasticSource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
|
||||
apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
|
||||
if apm_cli:
|
||||
instrument() # Only call this once, as early as possible.
|
||||
if traceparent:
|
||||
parent = trace_parent_from_string(traceparent)
|
||||
apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
|
||||
else:
|
||||
apm_cli.begin_transaction("Session", start=parent_start_time)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
label(ansible_version=self.ansible_version)
|
||||
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
|
||||
if self.ip_address is not None:
|
||||
label(ansible_host_ip=self.ip_address)
|
||||
|
||||
for task_data in tasks:
|
||||
for host_uuid, host_data in task_data.host_data.items():
|
||||
self.create_span_data(apm_cli, task_data, host_data)
|
||||
|
||||
apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
|
||||
|
||||
def create_span_data(self, apm_cli, task_data, host_data):
|
||||
""" create the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = "success"
|
||||
status = "success"
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
if res.get('exception') is not None:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
status = "failure"
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = "unknown"
|
||||
|
||||
with capture_span(task_data.name,
|
||||
start=task_data.start,
|
||||
span_type="ansible.task.run",
|
||||
duration=host_data.finish - task_data.start,
|
||||
labels={"ansible.task.args": task_data.args,
|
||||
"ansible.task.message": message,
|
||||
"ansible.task.module": task_data.action,
|
||||
"ansible.task.name": name,
|
||||
"ansible.task.result": rc,
|
||||
"ansible.task.host.name": host_data.name,
|
||||
"ansible.task.host.status": host_data.status}) as span:
|
||||
span.outcome = status
|
||||
if 'failure' in status:
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||
|
||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
if apm_server_url:
|
||||
return Client(service_name=apm_service_name,
|
||||
server_url=apm_server_url,
|
||||
verify_server_cert=False,
|
||||
secret_token=apm_secret_token,
|
||||
api_key=apm_api_key,
|
||||
use_elastic_traceparent_header=True,
|
||||
debug=True)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces with Elastic APM.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.elastic'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.apm_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.traceparent = False
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
|
||||
if ELASTIC_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `elastic-apm` must be installed to use this plugin'),
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.elastic = ElasticSource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.apm_service_name = self.get_option('apm_service_name')
|
||||
if not self.apm_service_name:
|
||||
self.apm_service_name = 'ansible'
|
||||
|
||||
self.apm_server_url = self.get_option('apm_server_url')
|
||||
self.apm_secret_token = self.get_option('apm_secret_token')
|
||||
self.apm_api_key = self.get_option('apm_api_key')
|
||||
self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = "success"
|
||||
else:
|
||||
status = "failure"
|
||||
self.elastic.generate_distributed_traces(
|
||||
self.tasks_data,
|
||||
status,
|
||||
time.time(),
|
||||
self.traceparent,
|
||||
self.apm_service_name,
|
||||
self.apm_server_url,
|
||||
self.apm_verify_server_cert,
|
||||
self.apm_secret_token,
|
||||
self.apm_api_key
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
76
plugins/callback/full_skip.py
Normal file
76
plugins/callback/full_skip.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: full_skip
|
||||
type: stdout
|
||||
short_description: suppresses tasks if all hosts skipped
|
||||
description:
|
||||
- Use this plugin when you do not care about any output for tasks that were completely skipped
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
requirements:
|
||||
- set as stdout in configuration
|
||||
'''
|
||||
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.full_skip'
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.outlines = []
|
||||
self.outlines.append("TASK [%s]" % task.get_name().strip())
|
||||
if self._display.verbosity >= 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self.outlines.append("task path: %s" % path)
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_playbook_item_on_ok(result)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_runner_on_ok(result)
|
||||
|
||||
def display(self):
|
||||
if len(self.outlines) == 0:
|
||||
return
|
||||
(first, rest) = self.outlines[0], self.outlines[1:]
|
||||
self._display.banner(first)
|
||||
for line in rest:
|
||||
self._display.display(line)
|
||||
self.outlines = []
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2014, Matt Martz <matt@sivel.net>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: hipchat
|
||||
callback: hipchat
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration.
|
||||
@@ -174,7 +173,8 @@ class CallbackModule(CallbackBase):
|
||||
# Displays info about playbook being started by a person on an
|
||||
# inventory, as well as Tags, Skip Tags and Limits
|
||||
if not self.printed_playbook:
|
||||
self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
|
||||
self.playbook_name, _ = os.path.splitext(
|
||||
os.path.basename(self.play.playbook.filename))
|
||||
host_list = self.play.playbook.inventory.host_list
|
||||
inventory = os.path.basename(os.path.realpath(host_list))
|
||||
self.send_msg("%s: Playbook initiated by %s against %s" %
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: jabber
|
||||
callback: jabber
|
||||
type: notification
|
||||
short_description: post task events to a jabber server
|
||||
description:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,11 +7,11 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: log_plays
|
||||
callback: log_plays
|
||||
type: notification
|
||||
short_description: write playbook output to log file
|
||||
description:
|
||||
- This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory
|
||||
- This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory
|
||||
requirements:
|
||||
- Whitelist in configuration
|
||||
- A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
|
||||
@@ -32,7 +31,7 @@ import time
|
||||
import json
|
||||
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: loganalytics
|
||||
type: aggregate
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
- This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
|
||||
- Credits to authors of splunk callback plugin.
|
||||
version_added: "2.4.0"
|
||||
requirements:
|
||||
- Whitelisting this callback plugin.
|
||||
- An Azure log analytics work space has been established.
|
||||
options:
|
||||
workspace_id:
|
||||
description: Workspace ID of the Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_ID
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: workspace_id
|
||||
shared_key:
|
||||
description: Shared key to connect to Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_SHARED_KEY
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: shared_key
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Whitelist the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callback_whitelist = community.general.loganalytics
|
||||
Set the environment variable:
|
||||
export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
|
||||
export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
Or configure the plugin in ansible.cfg in the callback_loganalytics block:
|
||||
[callback_loganalytics]
|
||||
workspace_id = 01234567-0123-0123-0123-01234567890a
|
||||
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
'''
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import socket
|
||||
import getpass
|
||||
|
||||
from datetime import datetime
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class AzureLogAnalyticsSource(object):
|
||||
def __init__(self):
|
||||
self.ansible_check_mode = False
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = ""
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
self.user = getpass.getuser()
|
||||
self.extra_vars = ""
|
||||
|
||||
def __build_signature(self, date, workspace_id, shared_key, content_length):
|
||||
# Build authorisation signature for Azure log analytics API call
|
||||
sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
|
||||
str(content_length), date)
|
||||
utf8_sigs = sigs.encode('utf-8')
|
||||
decoded_shared_key = base64.b64decode(shared_key)
|
||||
hmac_sha256_sigs = hmac.new(
|
||||
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
|
||||
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
|
||||
signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
|
||||
return signature
|
||||
|
||||
def __build_workspace_url(self, workspace_id):
|
||||
return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
|
||||
|
||||
def __rfc1123date(self):
|
||||
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
|
||||
def send_event(self, workspace_id, shared_key, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
if result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = \
|
||||
result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
if result._task._role:
|
||||
ansible_role = str(result._task._role)
|
||||
else:
|
||||
ansible_role = None
|
||||
|
||||
data = {}
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
data['status'] = state
|
||||
data['timestamp'] = self.__rfc1123date()
|
||||
data['host'] = self.host
|
||||
data['user'] = self.user
|
||||
data['runtime'] = runtime
|
||||
data['ansible_version'] = self.ansible_version
|
||||
data['ansible_check_mode'] = self.ansible_check_mode
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_playbook'] = self.ansible_playbook
|
||||
data['ansible_role'] = ansible_role
|
||||
data['ansible_task'] = result._task_fields
|
||||
# Removing args since it can contain sensitive data
|
||||
if 'args' in data['ansible_task']:
|
||||
data['ansible_task'].pop('args')
|
||||
data['ansible_result'] = result._result
|
||||
if 'content' in data['ansible_result']:
|
||||
data['ansible_result'].pop('content')
|
||||
|
||||
# Adding extra vars info
|
||||
data['extra_vars'] = self.extra_vars
|
||||
|
||||
# Preparing the playbook logs as JSON format and send to Azure log analytics
|
||||
jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
|
||||
content_length = len(jsondata)
|
||||
rfc1123date = self.__rfc1123date()
|
||||
signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
|
||||
workspace_url = self.__build_workspace_url(workspace_id)
|
||||
|
||||
open_url(
|
||||
workspace_url,
|
||||
jsondata,
|
||||
headers={
|
||||
'content-type': 'application/json',
|
||||
'Authorization': signature,
|
||||
'Log-Type': 'ansible_playbook',
|
||||
'x-ms-date': rfc1123date
|
||||
},
|
||||
method='POST'
|
||||
)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.start_datetimes = {} # Collect task start times
|
||||
self.workspace_id = None
|
||||
self.shared_key = None
|
||||
self.loganalytics = AzureLogAnalyticsSource()
|
||||
|
||||
def _seconds_since_start(self, result):
|
||||
return (
|
||||
datetime.utcnow() -
|
||||
self.start_datetimes[result._task._uuid]
|
||||
).total_seconds()
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
self.workspace_id = self.get_option('workspace_id')
|
||||
self.shared_key = self.get_option('shared_key')
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
vm = play.get_variable_manager()
|
||||
extra_vars = vm.extra_vars
|
||||
self.loganalytics.extra_vars = extra_vars
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.loganalytics.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'OK',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def runner_on_async_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2018, Samir Musali <samir.musali@logdna.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -7,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logdna
|
||||
callback: logdna
|
||||
type: aggregate
|
||||
short_description: Sends playbook logs to LogDNA
|
||||
description:
|
||||
@@ -78,7 +77,7 @@ def get_mac():
|
||||
|
||||
# Getting hostname of system:
|
||||
def get_hostname():
|
||||
return str(socket.gethostname()).split('.local', 1)[0]
|
||||
return str(socket.gethostname()).split('.local')[0]
|
||||
|
||||
|
||||
# Getting IP of system:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -7,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logentries
|
||||
callback: logentries
|
||||
type: notification
|
||||
short_description: Sends events to Logentries
|
||||
description:
|
||||
@@ -112,7 +111,7 @@ try:
|
||||
except ImportError:
|
||||
HAS_FLATDICT = False
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
# Todo:
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
||||
# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
|
||||
# (C) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
author: Yevhen Khmelenko (@ujenmr)
|
||||
name: logstash
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: logstash
|
||||
type: notification
|
||||
short_description: Sends events to Logstash
|
||||
description:
|
||||
@@ -44,62 +43,16 @@ DOCUMENTATION = r'''
|
||||
key: type
|
||||
version_added: 1.0.0
|
||||
default: ansible
|
||||
pre_command:
|
||||
description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field.
|
||||
version_added: 2.0.0
|
||||
ini:
|
||||
- section: callback_logstash
|
||||
key: pre_command
|
||||
env:
|
||||
- name: LOGSTASH_PRE_COMMAND
|
||||
format_version:
|
||||
description: Logging format
|
||||
type: str
|
||||
version_added: 2.0.0
|
||||
ini:
|
||||
- section: callback_logstash
|
||||
key: format_version
|
||||
env:
|
||||
- name: LOGSTASH_FORMAT_VERSION
|
||||
default: v1
|
||||
choices:
|
||||
- v1
|
||||
- v2
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
ansible.cfg: |
|
||||
# Enable Callback plugin
|
||||
[defaults]
|
||||
callback_whitelist = community.general.logstash
|
||||
|
||||
[callback_logstash]
|
||||
server = logstash.example.com
|
||||
port = 5000
|
||||
pre_command = git rev-parse HEAD
|
||||
type = ansible
|
||||
|
||||
11-input-tcp.conf: |
|
||||
# Enable Logstash TCP Input
|
||||
input {
|
||||
tcp {
|
||||
port => 5000
|
||||
codec => json
|
||||
add_field => { "[@metadata][beat]" => "notify" }
|
||||
add_field => { "[@metadata][type]" => "ansible" }
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
from ansible import context
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import logging
|
||||
|
||||
try:
|
||||
import logstash
|
||||
HAS_LOGSTASH = True
|
||||
@@ -110,78 +63,76 @@ from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
ansible logstash callback plugin
|
||||
ansible.cfg:
|
||||
callback_plugins = <path_to_callback_plugins_folder>
|
||||
callback_whitelist = logstash
|
||||
and put the plugin in <path_to_callback_plugins_folder>
|
||||
|
||||
logstash config:
|
||||
input {
|
||||
tcp {
|
||||
port => 5000
|
||||
codec => json
|
||||
}
|
||||
}
|
||||
|
||||
Requires:
|
||||
python-logstash
|
||||
|
||||
This plugin makes use of the following environment variables or ini config:
|
||||
LOGSTASH_SERVER (optional): defaults to localhost
|
||||
LOGSTASH_PORT (optional): defaults to 5000
|
||||
LOGSTASH_TYPE (optional): defaults to ansible
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'community.general.logstash'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
|
||||
if not HAS_LOGSTASH:
|
||||
self.disabled = True
|
||||
self._display.warning("The required python-logstash/python3-logstash is not installed. "
|
||||
"pip install python-logstash for Python 2"
|
||||
"pip install python3-logstash for Python 3")
|
||||
self._display.warning("The required python-logstash is not installed. "
|
||||
"pip install python-logstash")
|
||||
|
||||
self.start_time = datetime.utcnow()
|
||||
|
||||
def _init_plugin(self):
|
||||
if not self.disabled:
|
||||
self.logger = logging.getLogger('python-logstash-logger')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.handler = logstash.TCPLogstashHandler(
|
||||
self.ls_server,
|
||||
self.ls_port,
|
||||
version=1,
|
||||
message_type=self.ls_type
|
||||
)
|
||||
|
||||
self.logger.addHandler(self.handler)
|
||||
self.hostname = socket.gethostname()
|
||||
self.session = str(uuid.uuid4())
|
||||
self.errors = 0
|
||||
|
||||
self.base_data = {
|
||||
'session': self.session,
|
||||
'host': self.hostname
|
||||
}
|
||||
|
||||
if self.ls_pre_command is not None:
|
||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||
self.ls_pre_command).read()
|
||||
|
||||
if context.CLIARGS is not None:
|
||||
self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
|
||||
self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
|
||||
self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
|
||||
self.base_data['inventory'] = context.CLIARGS.get('inventory')
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
self.ls_server = self.get_option('server')
|
||||
self.ls_port = int(self.get_option('port'))
|
||||
self.ls_type = self.get_option('type')
|
||||
self.ls_pre_command = self.get_option('pre_command')
|
||||
self.ls_format_version = self.get_option('format_version')
|
||||
self.logger = logging.getLogger('python-logstash-logger')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self._init_plugin()
|
||||
self.logstash_server = self.get_option('server')
|
||||
self.logstash_port = self.get_option('port')
|
||||
self.logstash_type = self.get_option('type')
|
||||
self.handler = logstash.TCPLogstashHandler(
|
||||
self.logstash_server,
|
||||
int(self.logstash_port),
|
||||
version=1,
|
||||
message_type=self.logstash_type
|
||||
)
|
||||
self.logger.addHandler(self.handler)
|
||||
self.hostname = socket.gethostname()
|
||||
self.session = str(uuid.uuid1())
|
||||
self.errors = 0
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "start"
|
||||
data['status'] = "OK"
|
||||
data['ansible_playbook'] = playbook._file_name
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible start", extra=data)
|
||||
self.playbook = playbook._file_name
|
||||
data = {
|
||||
'status': "OK",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "start",
|
||||
'ansible_playbook': self.playbook,
|
||||
}
|
||||
self.logger.info("ansible start", extra=data)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
end_time = datetime.utcnow()
|
||||
@@ -195,201 +146,103 @@ class CallbackModule(CallbackBase):
|
||||
else:
|
||||
status = "FAILED"
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "finish"
|
||||
data['status'] = status
|
||||
data['ansible_playbook_duration'] = runtime.total_seconds()
|
||||
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible stats", extra=data)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_id = str(play._uuid)
|
||||
|
||||
if play.name:
|
||||
self.play_name = play.name
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "start"
|
||||
data['status'] = "OK"
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("START PLAY | %s", self.play_name, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible play", extra=data)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.task_id = str(task._uuid)
|
||||
|
||||
'''
|
||||
Tasks and handler tasks are dealt with here
|
||||
'''
|
||||
data = {
|
||||
'status': status,
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "finish",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_playbook_duration': runtime.total_seconds(),
|
||||
'ansible_result': json.dumps(summarize_stat),
|
||||
}
|
||||
self.logger.info("ansible stats", extra=data)
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
if task_name == 'setup':
|
||||
data['ansible_type'] = "setup"
|
||||
data['status'] = "OK"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_facts'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible facts", extra=data)
|
||||
else:
|
||||
if 'changed' in result._result.keys():
|
||||
data['ansible_changed'] = result._result['changed']
|
||||
else:
|
||||
data['ansible_changed'] = False
|
||||
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "OK"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"TASK OK | %s | RESULT | %s",
|
||||
task_name, self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible ok", extra=data)
|
||||
data = {
|
||||
'status': "OK",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.logger.info("ansible ok", extra=data)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "SKIPPED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible skipped", extra=data)
|
||||
data = {
|
||||
'status': "SKIPPED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_task': result._task,
|
||||
'ansible_host': result._host.name
|
||||
}
|
||||
self.logger.info("ansible skipped", extra=data)
|
||||
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "import"
|
||||
data['status'] = "IMPORTED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['imported_file'] = imported_file
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("IMPORT | %s", imported_file, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible import", extra=data)
|
||||
data = {
|
||||
'status': "IMPORTED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "import",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'imported_file': imported_file
|
||||
}
|
||||
self.logger.info("ansible import", extra=data)
|
||||
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "import"
|
||||
data['status'] = "NOT IMPORTED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['imported_file'] = missing_file
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible import", extra=data)
|
||||
data = {
|
||||
'status': "NOT IMPORTED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "import",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'missing_file': missing_file
|
||||
}
|
||||
self.logger.info("ansible import", extra=data)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
if 'changed' in result._result.keys():
|
||||
data['ansible_changed'] = result._result['changed']
|
||||
else:
|
||||
data['ansible_changed'] = False
|
||||
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "FAILED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
data = {
|
||||
'status': "FAILED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"TASK FAILED | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible failed", extra=data)
|
||||
self.logger.error("ansible failed", extra=data)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "UNREACHABLE"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible unreachable", extra=data)
|
||||
data = {
|
||||
'status': "UNREACHABLE",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.logger.error("ansible unreachable", extra=data)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "FAILED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
data = {
|
||||
'status': "FAILED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible async", extra=data)
|
||||
self.logger.error("ansible async", extra=data)
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: mail
|
||||
callback: mail
|
||||
type: notification
|
||||
short_description: Sends failure events via email
|
||||
description:
|
||||
@@ -62,7 +62,7 @@ import re
|
||||
import smtplib
|
||||
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -100,21 +100,28 @@ class CallbackModule(CallbackBase):
|
||||
|
||||
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
|
||||
|
||||
content = 'From: %s\n' % self.sender
|
||||
content += 'To: %s\n' % self.to
|
||||
if self.cc:
|
||||
content += 'Cc: %s\n' % self.cc
|
||||
content += 'Subject: %s\n\n' % subject.strip()
|
||||
content += body
|
||||
b_sender = to_bytes(self.sender)
|
||||
b_to = to_bytes(self.to)
|
||||
b_cc = to_bytes(self.cc)
|
||||
b_bcc = to_bytes(self.bcc)
|
||||
b_subject = to_bytes(subject)
|
||||
b_body = to_bytes(body)
|
||||
|
||||
addresses = self.to.split(',')
|
||||
b_content = b'From: %s\n' % b_sender
|
||||
b_content += b'To: %s\n' % b_to
|
||||
if self.cc:
|
||||
addresses += self.cc.split(',')
|
||||
b_content += b'Cc: %s\n' % b_cc
|
||||
b_content += b'Subject: %s\n\n' % b_subject
|
||||
b_content += b_body
|
||||
|
||||
b_addresses = b_to.split(b',')
|
||||
if self.cc:
|
||||
b_addresses += b_cc.split(b',')
|
||||
if self.bcc:
|
||||
addresses += self.bcc.split(',')
|
||||
b_addresses += b_bcc.split(b',')
|
||||
|
||||
for address in addresses:
|
||||
smtp.sendmail(self.sender, address, to_bytes(content))
|
||||
for b_address in b_addresses:
|
||||
smtp.sendmail(b_sender, b_address, b_content)
|
||||
|
||||
smtp.quit()
|
||||
|
||||
|
||||
@@ -7,26 +7,25 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: nrdp
|
||||
callback: nrdp
|
||||
type: notification
|
||||
author: "Remi VERCHERE (@rverchere)"
|
||||
short_description: Post task results to a Nagios server through nrdp
|
||||
short_description: post task result to a nagios server through nrdp
|
||||
description:
|
||||
- This callback send playbook result to Nagios.
|
||||
- Nagios shall use NRDP to recive passive events.
|
||||
- The passive check is sent to a dedicated host/service for Ansible.
|
||||
- this callback send playbook result to nagios
|
||||
- nagios shall use NRDP to recive passive events
|
||||
- the passive check is sent to a dedicated host/service for ansible
|
||||
options:
|
||||
url:
|
||||
description: URL of the nrdp server.
|
||||
required: true
|
||||
description: url of the nrdp server
|
||||
required: True
|
||||
env:
|
||||
- name : NRDP_URL
|
||||
ini:
|
||||
- section: callback_nrdp
|
||||
key: url
|
||||
type: string
|
||||
validate_certs:
|
||||
description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.)
|
||||
description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url)
|
||||
env:
|
||||
- name: NRDP_VALIDATE_CERTS
|
||||
ini:
|
||||
@@ -34,43 +33,38 @@ DOCUMENTATION = '''
|
||||
key: validate_nrdp_certs
|
||||
- section: callback_nrdp
|
||||
key: validate_certs
|
||||
type: boolean
|
||||
default: false
|
||||
default: False
|
||||
aliases: [ validate_nrdp_certs ]
|
||||
token:
|
||||
description: Token to be allowed to push nrdp events.
|
||||
required: true
|
||||
description: token to be allowed to push nrdp events
|
||||
required: True
|
||||
env:
|
||||
- name: NRDP_TOKEN
|
||||
ini:
|
||||
- section: callback_nrdp
|
||||
key: token
|
||||
type: string
|
||||
hostname:
|
||||
description: Hostname where the passive check is linked to.
|
||||
required: true
|
||||
description: hostname where the passive check is linked to
|
||||
required: True
|
||||
env:
|
||||
- name : NRDP_HOSTNAME
|
||||
ini:
|
||||
- section: callback_nrdp
|
||||
key: hostname
|
||||
type: string
|
||||
servicename:
|
||||
description: Service where the passive check is linked to.
|
||||
required: true
|
||||
description: service where the passive check is linked to
|
||||
required: True
|
||||
env:
|
||||
- name : NRDP_SERVICENAME
|
||||
ini:
|
||||
- section: callback_nrdp
|
||||
key: servicename
|
||||
type: string
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -144,7 +138,7 @@ class CallbackModule(CallbackBase):
|
||||
body = {
|
||||
'cmd': 'submitcheck',
|
||||
'token': self.token,
|
||||
'XMLDATA': to_bytes(xmldata)
|
||||
'XMLDATA': bytes(xmldata)
|
||||
}
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: 'null'
|
||||
callback: 'null'
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
|
||||
@@ -1,445 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: opentelemetry
|
||||
type: notification
|
||||
short_description: Create distributed traces with OpenTelemetry
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task with OpenTelemetry.
|
||||
- You can configure the OpenTelemetry exporter and SDK with environment variables.
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
enable_from_environment:
|
||||
type: str
|
||||
description:
|
||||
- Whether to enable this callback only if the given environment variable exists and it is set to C(true).
|
||||
- This is handy when you use Configuration as Code and want to send distributed traces
|
||||
if running in the CI rather when running Ansible locally.
|
||||
- For such, it evaluates the given I(enable_from_environment) value as environment variable
|
||||
and if set to true this plugin will be enabled.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
|
||||
version_added: 3.8.0
|
||||
otel_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
traceparent:
|
||||
default: None
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- opentelemetry-api (Python library)
|
||||
- opentelemetry-exporter-otlp (Python library)
|
||||
- opentelemetry-sdk (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.opentelemetry
|
||||
|
||||
Set the environment variable:
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
|
||||
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
|
||||
export OTEL_SERVICE_NAME=your_service_name
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.trace import SpanKind
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.trace.status import Status, StatusCode
|
||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
if sys.version_info >= (3, 7):
|
||||
self.start = time.time_ns()
|
||||
else:
|
||||
self.start = _time_ns()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
if sys.version_info >= (3, 7):
|
||||
self.finish = time.time_ns()
|
||||
else:
|
||||
self.finish = _time_ns()
|
||||
|
||||
|
||||
class OpenTelemetrySource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def traceparent_context(self, traceparent):
|
||||
carrier = dict()
|
||||
carrier['traceparent'] = traceparent
|
||||
return TraceContextTextMapPropagator().extract(carrier=carrier)
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(
|
||||
resource=Resource.create({SERVICE_NAME: otel_service_name})
|
||||
)
|
||||
)
|
||||
|
||||
processor = BatchSpanProcessor(OTLPSpanExporter())
|
||||
|
||||
trace.get_tracer_provider().add_span_processor(processor)
|
||||
|
||||
tracer = trace.get_tracer(__name__)
|
||||
|
||||
with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
|
||||
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
|
||||
parent.set_status(status)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
parent.set_attribute("ansible.version", self.ansible_version)
|
||||
parent.set_attribute("ansible.session", self.session)
|
||||
parent.set_attribute("ansible.host.name", self.host)
|
||||
if self.ip_address is not None:
|
||||
parent.set_attribute("ansible.host.ip", self.ip_address)
|
||||
parent.set_attribute("ansible.host.user", self.user)
|
||||
for task in tasks:
|
||||
for host_uuid, host_data in task.host_data.items():
|
||||
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
|
||||
self.update_span_data(task, host_data, span)
|
||||
|
||||
def update_span_data(self, task_data, host_data, span):
|
||||
""" update the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = 'success'
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message(res)
|
||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||
# Record an exception with the task message
|
||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
elif host_data.status == 'ignored':
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
|
||||
span.set_status(status)
|
||||
self.set_span_attribute(span, "ansible.task.args", task_data.args)
|
||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
||||
self.set_span_attribute(span, "ansible.task.message", message)
|
||||
self.set_span_attribute(span, "ansible.task.name", name)
|
||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||
span.end(end_time=host_data.finish)
|
||||
|
||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||
""" update the span attribute with the given attribute and value if not None """
|
||||
|
||||
if span is None and self._display is not None:
|
||||
self._display.warning('span object is None. Please double check if that is expected.')
|
||||
else:
|
||||
if attributeValue is not None:
|
||||
span.set_attribute(attributeName, attributeValue)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return OpenTelemetrySource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
return lines[-1]
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message(result):
|
||||
message = result.get('msg', 'failed')
|
||||
exception = result.get('exception')
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.opentelemetry'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.otel_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
self.traceparent = False
|
||||
|
||||
if OTEL_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
|
||||
OTEL_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.opentelemetry = OpenTelemetrySource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
environment_variable = self.get_option('enable_from_environment')
|
||||
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
|
||||
self.disabled = True
|
||||
self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
|
||||
"Disabling the `opentelemetry` callback plugin.".format(environment_variable))
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.otel_service_name = self.get_option('otel_service_name')
|
||||
|
||||
if not self.otel_service_name:
|
||||
self.otel_service_name = 'ansible'
|
||||
|
||||
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
if ignore_errors:
|
||||
status = 'ignored'
|
||||
else:
|
||||
status = 'failed'
|
||||
self.errors += 1
|
||||
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
status,
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
else:
|
||||
status = Status(status_code=StatusCode.ERROR)
|
||||
self.opentelemetry.generate_distributed_traces(
|
||||
self.otel_service_name,
|
||||
self.ansible_playbook,
|
||||
self.tasks_data,
|
||||
status,
|
||||
self.traceparent
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -9,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: say
|
||||
callback: say
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
@@ -21,11 +20,11 @@ DOCUMENTATION = '''
|
||||
- In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import platform
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
@@ -47,24 +46,21 @@ class CallbackModule(CallbackBase):
|
||||
self.HAPPY_VOICE = None
|
||||
self.LASER_VOICE = None
|
||||
|
||||
try:
|
||||
self.synthesizer = get_bin_path('say')
|
||||
if platform.system() != 'Darwin':
|
||||
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
||||
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
||||
else:
|
||||
self.FAILED_VOICE = 'Zarvox'
|
||||
self.REGULAR_VOICE = 'Trinoids'
|
||||
self.HAPPY_VOICE = 'Cellos'
|
||||
self.LASER_VOICE = 'Princess'
|
||||
except ValueError:
|
||||
try:
|
||||
self.synthesizer = get_bin_path('espeak')
|
||||
self.synthesizer = distutils.spawn.find_executable('say')
|
||||
if not self.synthesizer:
|
||||
self.synthesizer = distutils.spawn.find_executable('espeak')
|
||||
if self.synthesizer:
|
||||
self.FAILED_VOICE = 'klatt'
|
||||
self.HAPPY_VOICE = 'f5'
|
||||
self.LASER_VOICE = 'whisper'
|
||||
except ValueError:
|
||||
self.synthesizer = None
|
||||
elif platform.system() != 'Darwin':
|
||||
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
||||
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
||||
else:
|
||||
self.FAILED_VOICE = 'Zarvox'
|
||||
self.REGULAR_VOICE = 'Trinoids'
|
||||
self.HAPPY_VOICE = 'Cellos'
|
||||
self.LASER_VOICE = 'Princess'
|
||||
|
||||
# plugin disable itself if say is not present
|
||||
# ansible will not call any callback if disabled is set to True
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) Fastly, inc 2016
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,15 +7,15 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: selective
|
||||
callback: selective
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: only print certain tasks
|
||||
description:
|
||||
- This callback only prints tasks that have been tagged with C(print_action) or that have failed.
|
||||
- This callback only prints tasks that have been tagged with `print_action` or that have failed.
|
||||
This allows operators to focus on the tasks that provide value only.
|
||||
- Tasks that are not printed are placed with a C(.).
|
||||
- Tasks that are not printed are placed with a '.'.
|
||||
- If you increase verbosity all tasks are printed.
|
||||
options:
|
||||
nocolor:
|
||||
@@ -41,17 +40,8 @@ import difflib
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
try:
|
||||
codeCodes = C.COLOR_CODES
|
||||
except AttributeError:
|
||||
# This constant was moved to ansible.constants in
|
||||
# https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
|
||||
# (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
|
||||
# we include from the original location.
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
DONT_COLORIZE = False
|
||||
COLORS = {
|
||||
@@ -68,7 +58,7 @@ COLORS = {
|
||||
|
||||
def dict_diff(prv, nxt):
|
||||
"""Return a dict of keys that differ with another config object."""
|
||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
||||
keys = set(prv.keys() + nxt.keys())
|
||||
result = {}
|
||||
for k in keys:
|
||||
if prv.get(k) != nxt.get(k):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2014-2015, Matt Martz <matt@sivel.net>
|
||||
# (C) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -9,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: slack
|
||||
callback: slack
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
@@ -59,7 +58,7 @@ import os
|
||||
import uuid
|
||||
|
||||
from ansible import context
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: splunk
|
||||
callback: splunk
|
||||
type: aggregate
|
||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||
@@ -57,27 +57,6 @@ DOCUMENTATION = '''
|
||||
type: bool
|
||||
default: true
|
||||
version_added: '1.0.0'
|
||||
include_milliseconds:
|
||||
description: Whether to include milliseconds as part of the generated timestamp field in the event
|
||||
sent to the Splunk HTTP collector
|
||||
env:
|
||||
- name: SPLUNK_INCLUDE_MILLISECONDS
|
||||
ini:
|
||||
- section: callback_splunk
|
||||
key: include_milliseconds
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.0.0
|
||||
batch:
|
||||
description:
|
||||
- Correlation ID which can be set across multiple playbook executions.
|
||||
env:
|
||||
- name: SPLUNK_BATCH
|
||||
ini:
|
||||
- section: callback_splunk
|
||||
key: batch
|
||||
type: str
|
||||
version_added: 3.3.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -117,7 +96,7 @@ class SplunkHTTPCollectorSource(object):
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
self.user = getpass.getuser()
|
||||
|
||||
def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime):
|
||||
def send_event(self, url, authtoken, validate_certs, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
@@ -136,16 +115,9 @@ class SplunkHTTPCollectorSource(object):
|
||||
data = {}
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
if batch is not None:
|
||||
data['batch'] = batch
|
||||
data['status'] = state
|
||||
|
||||
if include_milliseconds:
|
||||
time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
|
||||
else:
|
||||
time_format = '%Y-%m-%d %H:%M:%S +0000'
|
||||
|
||||
data['timestamp'] = datetime.utcnow().strftime(time_format)
|
||||
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
|
||||
'+0000')
|
||||
data['host'] = self.host
|
||||
data['ip_address'] = self.ip_address
|
||||
data['user'] = self.user
|
||||
@@ -186,8 +158,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url = None
|
||||
self.authtoken = None
|
||||
self.validate_certs = None
|
||||
self.include_milliseconds = None
|
||||
self.batch = None
|
||||
self.splunk = SplunkHTTPCollectorSource()
|
||||
|
||||
def _runtime(self, result):
|
||||
@@ -223,10 +193,6 @@ class CallbackModule(CallbackBase):
|
||||
|
||||
self.validate_certs = self.get_option('validate_certs')
|
||||
|
||||
self.include_milliseconds = self.get_option('include_milliseconds')
|
||||
|
||||
self.batch = self.get_option('batch')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.splunk.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
@@ -241,8 +207,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
self.batch,
|
||||
'OK',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -253,8 +217,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
self.batch,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -265,8 +227,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
self.batch,
|
||||
'FAILED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -277,8 +237,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
self.batch,
|
||||
'FAILED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -289,8 +247,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
self.batch,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._runtime(result)
|
||||
|
||||
71
plugins/callback/stderr.py
Normal file
71
plugins/callback/stderr.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# (c) 2017, Frederic Van Espen <github@freh.be>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: stderr
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: Splits output, sending failed tasks to stderr
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
description:
|
||||
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
|
||||
- Also it does not output skipped host/task/item status
|
||||
'''
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the stderr callback plugin, which reuses the default
|
||||
callback plugin but sends error output to stderr.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.stderr'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.super_ref = super(CallbackModule, self)
|
||||
self.super_ref.__init__()
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
self._clean_results(result._result, result._task.action)
|
||||
|
||||
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
||||
self._print_task_banner(result._task)
|
||||
|
||||
self._handle_exception(result._result, use_stderr=True)
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
|
||||
self._dump_results(result._result)), color=C.COLOR_ERROR,
|
||||
stderr=True)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
|
||||
color=C.COLOR_ERROR, stderr=True)
|
||||
|
||||
if ignore_errors:
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sumologic
|
||||
callback: sumologic
|
||||
type: aggregate
|
||||
short_description: Sends task result events to Sumologic
|
||||
author: "Ryan Currah (@ryancurrah)"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: syslog_json
|
||||
callback: syslog_json
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2017, Allyson Bowles <@akatch>
|
||||
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: unixy
|
||||
callback: unixy
|
||||
type: stdout
|
||||
author: Allyson Bowles (@akatch)
|
||||
short_description: condensed Ansible output
|
||||
@@ -23,7 +22,7 @@ DOCUMENTATION = '''
|
||||
from os.path import basename
|
||||
from ansible import constants as C
|
||||
from ansible import context
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: yaml
|
||||
callback: yaml
|
||||
type: stdout
|
||||
short_description: yaml-ized Ansible screen output
|
||||
description:
|
||||
@@ -26,7 +25,7 @@ import re
|
||||
import string
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
|
||||
@@ -42,29 +41,28 @@ def should_use_block(value):
|
||||
return False
|
||||
|
||||
|
||||
class MyDumper(AnsibleDumper):
|
||||
def represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
def my_represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
|
||||
|
||||
class CallbackModule(Default):
|
||||
@@ -80,6 +78,7 @@ class CallbackModule(Default):
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
@@ -121,7 +120,7 @@ class CallbackModule(Default):
|
||||
|
||||
if abridged_result:
|
||||
dumped += '\n'
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
||||
|
||||
# indent by a couple of spaces
|
||||
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
@@ -11,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
||||
name: chroot
|
||||
connection: chroot
|
||||
short_description: Interact with local chroot
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
||||
@@ -55,7 +54,7 @@ from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.basic import is_executable
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -63,7 +62,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local chroot based connections """
|
||||
''' Local chroot based connections '''
|
||||
|
||||
transport = 'community.general.chroot'
|
||||
has_pipelining = True
|
||||
@@ -96,7 +95,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the chroot """
|
||||
''' connect to the chroot '''
|
||||
if os.path.isabs(self.get_option('chroot_exe')):
|
||||
self.chroot_cmd = self.get_option('chroot_exe')
|
||||
else:
|
||||
@@ -111,17 +110,17 @@ class Connection(ConnectionBase):
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the chroot. This is only needed for implementing
|
||||
''' run a command on the chroot. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
executable = self.get_option('executable')
|
||||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % local_cmd, host=self.chroot)
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
@@ -129,17 +128,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the chroot """
|
||||
''' run a command on the chroot '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
@staticmethod
|
||||
def _prefix_login_path(remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -147,13 +145,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to chroot """
|
||||
''' transfer a file from local to chroot '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
@@ -179,7 +177,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from chroot to local """
|
||||
''' fetch a file from chroot to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
@@ -203,6 +201,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
366
plugins/connection/docker.py
Normal file
366
plugins/connection/docker.py
Normal file
@@ -0,0 +1,366 @@
|
||||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
connection: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
options:
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line
|
||||
default: ''
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.compat import selectors
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.general.docker'
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we don't check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
else:
|
||||
self.docker_cmd = distutils.spawn.find_executable('docker')
|
||||
if not self.docker_cmd:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
docker_version = self._get_docker_version()
|
||||
if docker_version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
|
||||
# The remote user we will request from docker (if supported)
|
||||
self.remote_user = None
|
||||
# The actual user which will execute commands in docker (if known)
|
||||
self.actual_user = None
|
||||
|
||||
if self._play_context.remote_user is not None:
|
||||
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
self.remote_user = self._play_context.remote_user
|
||||
self.actual_user = self.remote_user
|
||||
else:
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
if self.actual_user != self._play_context.remote_user:
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(docker_version, self.actual_user or u'?'))
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
version = re.sub(u'^v', u'', version)
|
||||
return version
|
||||
|
||||
def _old_docker_version(self):
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
return out.strip() or u'root'
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._play_context.docker_extra_args:
|
||||
local_cmd += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
local_cmd += [b'exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self._play_context.remote_addr
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker doesn't have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
@@ -9,8 +8,8 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
||||
name: funcd
|
||||
author: Michael Scherer (@msherer) <misc@zarb.org>
|
||||
connection: funcd
|
||||
short_description: Use funcd to connect to target
|
||||
description:
|
||||
- This transport permits you to use Ansible over Func.
|
||||
@@ -38,14 +37,13 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Func-based connections """
|
||||
class Connection(object):
|
||||
''' Func-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
|
||||
@@ -54,7 +52,6 @@ class Connection(ConnectionBase):
|
||||
self.host = host
|
||||
# port is unused, this go on func
|
||||
self.port = port
|
||||
self.client = None
|
||||
|
||||
def connect(self, port=None):
|
||||
if not HAVE_FUNC:
|
||||
@@ -64,32 +61,31 @@ class Connection(ConnectionBase):
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
|
||||
""" run a command on the remote minion """
|
||||
''' run a command on the remote minion '''
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# totally ignores privlege escalation
|
||||
display.vvv("EXEC %s" % cmd, host=self.host)
|
||||
display.vvv("EXEC %s" % (cmd), host=self.host)
|
||||
p = self.client.command.run(cmd)[self.host]
|
||||
return p[0], p[1], p[2]
|
||||
return (p[0], p[1], p[2])
|
||||
|
||||
@staticmethod
|
||||
def _normalize_path(path, prefix):
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to remote """
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
self.client.local.copyfile.send(in_path, out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from remote to local """
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, '/')
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
@@ -102,5 +98,5 @@ class Connection(ConnectionBase):
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on jail.py
|
||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
@@ -11,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
||||
name: iocage
|
||||
connection: iocage
|
||||
short_description: Run tasks in iocage jails
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing iocage jail
|
||||
@@ -33,7 +32,7 @@ DOCUMENTATION = '''
|
||||
import subprocess
|
||||
|
||||
from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -41,7 +40,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(Jail):
|
||||
""" Local iocage based connections """
|
||||
''' Local iocage based connections '''
|
||||
|
||||
transport = 'community.general.iocage'
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
|
||||
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
@@ -11,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Ansible Core Team
|
||||
name: jail
|
||||
connection: jail
|
||||
short_description: Run tasks in jails
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing jail
|
||||
@@ -31,15 +30,16 @@ DOCUMENTATION = '''
|
||||
- name: ansible_jail_user
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import traceback
|
||||
import ansible.constants as C
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -47,7 +47,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local BSD Jail based connections """
|
||||
''' Local BSD Jail based connections '''
|
||||
|
||||
modified_jailname_key = 'conn_jail_name'
|
||||
|
||||
@@ -75,10 +75,10 @@ class Connection(ConnectionBase):
|
||||
|
||||
@staticmethod
|
||||
def _search_executable(executable):
|
||||
try:
|
||||
return get_bin_path(executable)
|
||||
except ValueError:
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise AnsibleError("%s command not found in PATH" % executable)
|
||||
return cmd
|
||||
|
||||
def list_jails(self):
|
||||
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
||||
@@ -90,20 +90,20 @@ class Connection(ConnectionBase):
|
||||
return to_text(stdout, errors='surrogate_or_strict').split()
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the jail; nothing to do here """
|
||||
''' connect to the jail; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the jail. This is only needed for implementing
|
||||
''' run a command on the jail. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
|
||||
local_cmd = [self.jexec_cmd]
|
||||
set_env = ''
|
||||
@@ -123,17 +123,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the jail """
|
||||
''' run a command on the jail '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
@staticmethod
|
||||
def _prefix_login_path(remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -141,13 +140,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to jail """
|
||||
''' transfer a file from local to jail '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
@@ -173,7 +172,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from jail to local """
|
||||
''' fetch a file from jail to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
@@ -197,6 +196,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
||||
name: lxc
|
||||
connection: lxc
|
||||
short_description: Run tasks in lxc containers via lxc python library
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
||||
@@ -43,13 +42,14 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible import errors
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local lxc based connections """
|
||||
''' Local lxc based connections '''
|
||||
|
||||
transport = 'community.general.lxc'
|
||||
has_pipelining = True
|
||||
@@ -62,7 +62,7 @@ class Connection(ConnectionBase):
|
||||
self.container = None
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the lxc; nothing to do here """
|
||||
''' connect to the lxc; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
|
||||
if not HAS_LIBLXC:
|
||||
@@ -77,8 +77,7 @@ class Connection(ConnectionBase):
|
||||
if self.container.state == "STOPPED":
|
||||
raise errors.AnsibleError("%s is not running" % self.container_name)
|
||||
|
||||
@staticmethod
|
||||
def _communicate(pid, in_data, stdin, stdout, stderr):
|
||||
def _communicate(self, pid, in_data, stdin, stdout, stderr):
|
||||
buf = {stdout: [], stderr: []}
|
||||
read_fds = [stdout, stderr]
|
||||
if in_data:
|
||||
@@ -87,7 +86,7 @@ class Connection(ConnectionBase):
|
||||
write_fds = []
|
||||
while len(read_fds) > 0 or len(write_fds) > 0:
|
||||
try:
|
||||
ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, [])
|
||||
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
|
||||
except select.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
@@ -112,7 +111,7 @@ class Connection(ConnectionBase):
|
||||
return fd
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the chroot """
|
||||
''' run a command on the chroot '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# python2-lxc needs bytes. python3-lxc needs text.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2016 Matt Clay <matt@mystile.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -8,14 +7,14 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Matt Clay (@mattclay) <matt@mystile.com>
|
||||
name: lxd
|
||||
connection: lxd
|
||||
short_description: Run tasks in lxc containers via lxc CLI
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- Container identifier.
|
||||
- Container identifier
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
@@ -27,27 +26,14 @@ DOCUMENTATION = '''
|
||||
vars:
|
||||
- name: ansible_executable
|
||||
- name: ansible_lxd_executable
|
||||
remote:
|
||||
description:
|
||||
- Name of the LXD remote to use.
|
||||
default: local
|
||||
vars:
|
||||
- name: ansible_lxd_remote
|
||||
version_added: 2.0.0
|
||||
project:
|
||||
description:
|
||||
- Name of the LXD project to use.
|
||||
vars:
|
||||
- name: ansible_lxd_project
|
||||
version_added: 2.0.0
|
||||
'''
|
||||
|
||||
import os
|
||||
from distutils.spawn import find_executable
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
|
||||
@@ -62,9 +48,9 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self._host = self._play_context.remote_addr
|
||||
try:
|
||||
self._lxc_cmd = get_bin_path("lxc")
|
||||
except ValueError:
|
||||
self._lxc_cmd = find_executable("lxc")
|
||||
|
||||
if not self._lxc_cmd:
|
||||
raise AnsibleError("lxc command not found in PATH")
|
||||
|
||||
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
|
||||
@@ -84,15 +70,7 @@ class Connection(ConnectionBase):
|
||||
|
||||
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"exec",
|
||||
"%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
|
||||
"--",
|
||||
self.get_option("executable"), "-c", cmd
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
||||
@@ -120,14 +98,7 @@ class Connection(ConnectionBase):
|
||||
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"file", "push",
|
||||
in_path,
|
||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
@@ -140,14 +111,7 @@ class Connection(ConnectionBase):
|
||||
|
||||
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"file", "pull",
|
||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
|
||||
out_path
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
|
||||
173
plugins/connection/oc.py
Normal file
173
plugins/connection/oc.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Based on the docker connection plugin
|
||||
#
|
||||
# Connection plugin for configuring kubernetes containers with kubectl
|
||||
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- xuxinkun (!UNKNOWN)
|
||||
|
||||
connection: oc
|
||||
|
||||
short_description: Execute tasks in pods running on OpenShift.
|
||||
|
||||
description:
|
||||
- Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
|
||||
container platform.
|
||||
|
||||
|
||||
requirements:
|
||||
- oc (go binary)
|
||||
|
||||
options:
|
||||
oc_pod:
|
||||
description:
|
||||
- Pod name. Required when the host name does not match pod name.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_pod
|
||||
env:
|
||||
- name: K8S_AUTH_POD
|
||||
oc_container:
|
||||
description:
|
||||
- Container name. Required when a pod contains more than one container.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_container
|
||||
env:
|
||||
- name: K8S_AUTH_CONTAINER
|
||||
oc_namespace:
|
||||
description:
|
||||
- The namespace of the pod
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_namespace
|
||||
env:
|
||||
- name: K8S_AUTH_NAMESPACE
|
||||
oc_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the oc command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_extra_args
|
||||
env:
|
||||
- name: K8S_AUTH_EXTRA_ARGS
|
||||
oc_kubeconfig:
|
||||
description:
|
||||
- Path to a oc config file. Defaults to I(~/.kube/conig)
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_kubeconfig
|
||||
- name: ansible_oc_config
|
||||
env:
|
||||
- name: K8S_AUTH_KUBECONFIG
|
||||
oc_context:
|
||||
description:
|
||||
- The name of a context found in the K8s config file.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_context
|
||||
env:
|
||||
- name: K8S_AUTH_CONTEXT
|
||||
oc_host:
|
||||
description:
|
||||
- URL for accessing the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_host
|
||||
- name: ansible_oc_server
|
||||
env:
|
||||
- name: K8S_AUTH_HOST
|
||||
- name: K8S_AUTH_SERVER
|
||||
oc_token:
|
||||
description:
|
||||
- API authentication bearer token.
|
||||
vars:
|
||||
- name: ansible_oc_token
|
||||
- name: ansible_oc_api_key
|
||||
env:
|
||||
- name: K8S_AUTH_TOKEN
|
||||
- name: K8S_AUTH_API_KEY
|
||||
client_cert:
|
||||
description:
|
||||
- Path to a certificate used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_cert_file
|
||||
- name: ansible_oc_client_cert
|
||||
env:
|
||||
- name: K8S_AUTH_CERT_FILE
|
||||
aliases: [ oc_cert_file ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to a key file used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_key_file
|
||||
- name: ansible_oc_client_key
|
||||
env:
|
||||
- name: K8S_AUTH_KEY_FILE
|
||||
aliases: [ oc_key_file ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Path to a CA certificate used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_ssl_ca_cert
|
||||
- name: ansible_oc_ca_cert
|
||||
env:
|
||||
- name: K8S_AUTH_SSL_CA_CERT
|
||||
aliases: [ oc_ssl_ca_cert ]
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_verify_ssl
|
||||
- name: ansible_oc_validate_certs
|
||||
env:
|
||||
- name: K8S_AUTH_VERIFY_SSL
|
||||
aliases: [ oc_verify_ssl ]
|
||||
'''
|
||||
|
||||
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
|
||||
|
||||
|
||||
CONNECTION_TRANSPORT = 'community.general.oc'
|
||||
|
||||
CONNECTION_OPTIONS = {
|
||||
'oc_container': '-c',
|
||||
'oc_namespace': '-n',
|
||||
'oc_kubeconfig': '--config',
|
||||
'oc_context': '--context',
|
||||
'oc_host': '--server',
|
||||
'client_cert': '--client-certificate',
|
||||
'client_key': '--client-key',
|
||||
'ca_cert': '--certificate-authority',
|
||||
'validate_certs': '--insecure-skip-tls-verify',
|
||||
'oc_token': '--token'
|
||||
}
|
||||
|
||||
|
||||
class Connection(KubectlConnection):
|
||||
''' Local oc based connections '''
|
||||
transport = CONNECTION_TRANSPORT
|
||||
connection_options = CONNECTION_OPTIONS
|
||||
documentation = DOCUMENTATION
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on the buildah connection plugin
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# 2018 Kushal Das
|
||||
@@ -12,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: qubes
|
||||
connection: qubes
|
||||
short_description: Interact with an existing QubesOS AppVM
|
||||
|
||||
description:
|
||||
@@ -38,9 +37,15 @@ DOCUMENTATION = '''
|
||||
# - name: hosts
|
||||
'''
|
||||
|
||||
import shlex
|
||||
import shutil
|
||||
|
||||
import os
|
||||
import base64
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
import ansible.constants as C
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase, ensure_connect
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# Based on func.py
|
||||
@@ -11,17 +10,20 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
||||
name: saltstack
|
||||
connection: saltstack
|
||||
short_description: Allow ansible to piggyback on salt minions
|
||||
description:
|
||||
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
||||
'''
|
||||
|
||||
import re
|
||||
import os
|
||||
import base64
|
||||
import pty
|
||||
import codecs
|
||||
import subprocess
|
||||
|
||||
from ansible import errors
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.module_utils.six.moves import cPickle
|
||||
|
||||
HAVE_SALTSTACK = False
|
||||
try:
|
||||
@@ -30,9 +32,13 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import os
|
||||
from ansible import errors
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Salt-based connections """
|
||||
''' Salt-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
# while the name of the product is salt, naming that module salt cause
|
||||
@@ -51,31 +57,30 @@ class Connection(ConnectionBase):
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the remote minion """
|
||||
def exec_command(self, cmd, sudoable=False, in_data=None):
|
||||
''' run a command on the remote minion '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
self._display.vvv("EXEC %s" % cmd, host=self.host)
|
||||
self._display.vvv("EXEC %s" % (cmd), host=self.host)
|
||||
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
|
||||
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
|
||||
if self.host not in res:
|
||||
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
|
||||
|
||||
p = res[self.host]
|
||||
return p['retcode'], p['stdout'], p['stderr']
|
||||
return (p['retcode'], p['stdout'], p['stderr'])
|
||||
|
||||
@staticmethod
|
||||
def _normalize_path(path, prefix):
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to remote """
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
@@ -83,11 +88,11 @@ class Connection(ConnectionBase):
|
||||
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
with open(in_path, 'rb') as in_fh:
|
||||
content = in_fh.read()
|
||||
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
|
||||
self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
|
||||
|
||||
# TODO test it
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from remote to local """
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
@@ -97,5 +102,5 @@ class Connection(ConnectionBase):
|
||||
open(out_path, 'wb').write(content)
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
@@ -12,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Ansible Core Team
|
||||
name: zone
|
||||
connection: zone
|
||||
short_description: Run tasks in a zone instance
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing zone
|
||||
@@ -26,15 +25,16 @@ DOCUMENTATION = '''
|
||||
- name: ansible_zone_host
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils._text import to_bytes
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -42,7 +42,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local zone based connections """
|
||||
''' Local zone based connections '''
|
||||
|
||||
transport = 'community.general.zone'
|
||||
has_pipelining = True
|
||||
@@ -64,10 +64,10 @@ class Connection(ConnectionBase):
|
||||
|
||||
@staticmethod
|
||||
def _search_executable(executable):
|
||||
try:
|
||||
return get_bin_path(executable)
|
||||
except ValueError:
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise AnsibleError("%s command not found in PATH" % executable)
|
||||
return cmd
|
||||
|
||||
def list_zones(self):
|
||||
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
|
||||
@@ -75,9 +75,9 @@ class Connection(ConnectionBase):
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
zones = []
|
||||
for line in process.stdout.readlines():
|
||||
for l in process.stdout.readlines():
|
||||
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
|
||||
s = line.split(':')
|
||||
s = l.split(':')
|
||||
if s[1] != 'global':
|
||||
zones.append(s[1])
|
||||
|
||||
@@ -95,20 +95,20 @@ class Connection(ConnectionBase):
|
||||
return path + '/root'
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the zone; nothing to do here """
|
||||
''' connect to the zone; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the zone. This is only needed for implementing
|
||||
''' run a command on the zone. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
# NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
|
||||
# this through /bin/sh -c here. Instead it goes through the shell
|
||||
# that zlogin selects.
|
||||
@@ -122,16 +122,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the zone """
|
||||
''' run a command on the zone '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -139,13 +139,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to zone """
|
||||
''' transfer a file from local to zone '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
|
||||
|
||||
@@ -171,7 +171,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from zone to local """
|
||||
''' fetch a file from zone to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
|
||||
|
||||
@@ -195,6 +195,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
62
plugins/doc_fragments/_gcp.py
Normal file
62
plugins/doc_fragments/_gcp.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Google Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# GCP doc fragment.
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
project:
|
||||
description:
|
||||
- The Google Cloud Platform project to use.
|
||||
type: str
|
||||
auth_kind:
|
||||
description:
|
||||
- The type of credential used.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ application, machineaccount, serviceaccount ]
|
||||
service_account_contents:
|
||||
description:
|
||||
- The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
|
||||
type: jsonarg
|
||||
service_account_file:
|
||||
description:
|
||||
- The path of a Service Account JSON file if serviceaccount is selected as type.
|
||||
type: path
|
||||
service_account_email:
|
||||
description:
|
||||
- An optional service account email address if machineaccount is selected
|
||||
and the user does not wish to use the default email.
|
||||
type: str
|
||||
scopes:
|
||||
description:
|
||||
- Array of scopes to be used.
|
||||
type: list
|
||||
elements: str
|
||||
env_type:
|
||||
description:
|
||||
- Specifies which Ansible environment you're running this module within.
|
||||
- This should not be set unless you know what you're doing.
|
||||
- This only alters the User Agent string for any API requests.
|
||||
type: str
|
||||
notes:
|
||||
- for authentication, you can set service_account_file using the
|
||||
c(gcp_service_account_file) env variable.
|
||||
- for authentication, you can set service_account_contents using the
|
||||
c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
|
||||
- For authentication, you can set service_account_email using the
|
||||
C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
|
||||
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
|
||||
variable.
|
||||
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
|
||||
- Environment variables values will only be used if the playbook values are
|
||||
not set.
|
||||
- The I(service_account_email) and I(service_account_file) options are
|
||||
mutually exclusive.
|
||||
'''
|
||||
136
plugins/doc_fragments/docker.py
Normal file
136
plugins/doc_fragments/docker.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r'''
|
||||
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix://var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: localhost
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_cert:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ssl_version:
|
||||
description:
|
||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||
used instead.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||
install the C(docker) Python module. Note that both modules should *not*
|
||||
be installed at the same time. Also note that when both modules are installed
|
||||
and one of them is uninstalled, the other might no longer function and a
|
||||
reinstall of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
23
plugins/doc_fragments/hetzner.py
Normal file
23
plugins/doc_fragments/hetzner.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
hetzner_user:
|
||||
description: The username for the Robot webservice user.
|
||||
type: str
|
||||
required: yes
|
||||
hetzner_password:
|
||||
description: The password for the Robot webservice user.
|
||||
type: str
|
||||
required: yes
|
||||
'''
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Huawei Inc.
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -30,6 +30,7 @@ options:
|
||||
description:
|
||||
- Keycloak realm name to authenticate to for API access.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
auth_client_secret:
|
||||
description:
|
||||
@@ -40,6 +41,7 @@ options:
|
||||
description:
|
||||
- Username to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- username
|
||||
|
||||
@@ -47,15 +49,10 @@ options:
|
||||
description:
|
||||
- Password to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- password
|
||||
|
||||
token:
|
||||
description:
|
||||
- Authentication token for Keycloak API.
|
||||
type: str
|
||||
version_added: 3.0.0
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- Verify TLS certificates (do not disable this in production).
|
||||
|
||||
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
@@ -0,0 +1,133 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
resource_definition:
|
||||
description:
|
||||
- "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
|
||||
resource parameters not covered by this module's parameters."
|
||||
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
|
||||
I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
|
||||
aliases:
|
||||
- definition
|
||||
- inline
|
||||
type: dict
|
||||
wait:
|
||||
description:
|
||||
- "I(True) if the module should wait for the resource to get into desired state."
|
||||
type: bool
|
||||
default: yes
|
||||
force:
|
||||
description:
|
||||
- If set to C(no), and I(state) is C(present), an existing object will be replaced.
|
||||
type: bool
|
||||
default: no
|
||||
wait_timeout:
|
||||
description:
|
||||
- The amount of time in seconds the module should wait for the resource to get into desired state.
|
||||
type: int
|
||||
default: 120
|
||||
wait_sleep:
|
||||
description:
|
||||
- Number of seconds to sleep between checks.
|
||||
default: 5
|
||||
version_added: '0.2.0'
|
||||
memory:
|
||||
description:
|
||||
- The amount of memory to be requested by virtual machine.
|
||||
- For example 1024Mi.
|
||||
type: str
|
||||
memory_limit:
|
||||
description:
|
||||
- The maximum memory to be used by virtual machine.
|
||||
- For example 1024Mi.
|
||||
type: str
|
||||
machine_type:
|
||||
description:
|
||||
- QEMU machine type is the actual chipset of the virtual machine.
|
||||
type: str
|
||||
merge_type:
|
||||
description:
|
||||
- Whether to override the default patch merge approach with a specific type.
|
||||
- If more than one merge type is given, the merge types will be tried in order.
|
||||
- "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
|
||||
on resource kinds that combine Custom Resources and built-in resources, as
|
||||
Custom Resource Definitions typically aren't updatable by the usual strategic merge."
|
||||
- "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
|
||||
type: list
|
||||
choices: [ json, merge, strategic-merge ]
|
||||
cpu_shares:
|
||||
description:
|
||||
- "Specify CPU shares."
|
||||
type: int
|
||||
cpu_limit:
|
||||
description:
|
||||
- "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
|
||||
every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
|
||||
type: int
|
||||
cpu_cores:
|
||||
description:
|
||||
- "Number of CPU cores."
|
||||
type: int
|
||||
cpu_model:
|
||||
description:
|
||||
- "CPU model."
|
||||
- "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
|
||||
- "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
|
||||
- "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
|
||||
- "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
|
||||
type: str
|
||||
bootloader:
|
||||
description:
|
||||
- "Specify the bootloader of the virtual machine."
|
||||
- "All virtual machines use BIOS by default for booting."
|
||||
type: str
|
||||
smbios_uuid:
|
||||
description:
|
||||
- "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
|
||||
type: str
|
||||
cpu_features:
|
||||
description:
|
||||
- "List of dictionary to fine-tune features provided by the selected CPU model."
|
||||
- "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
|
||||
- "I(Note): In case a policy is omitted for a feature, it will default to require."
|
||||
- "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
|
||||
type: list
|
||||
headless:
|
||||
description:
|
||||
- "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
|
||||
- "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
|
||||
compatible and comes with a memory size of 16 MB."
|
||||
hugepage_size:
|
||||
description:
|
||||
- "Specify huge page size."
|
||||
type: str
|
||||
tablets:
|
||||
description:
|
||||
- "Specify tablets to be used as input devices"
|
||||
type: list
|
||||
hostname:
|
||||
description:
|
||||
- "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
|
||||
name will be used."
|
||||
subdomain:
|
||||
description:
|
||||
- "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
|
||||
the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
|
||||
itself can pick up a hostname."
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- openshift >= 0.8.2
|
||||
notes:
|
||||
- "In order to use this module you have to install Openshift Python SDK.
|
||||
To ensure it's installed with correct version you can create the following task:
|
||||
I(pip: name=openshift>=0.8.2)"
|
||||
'''
|
||||
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard oVirt documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
disks:
|
||||
description:
|
||||
- List of dictionaries which specify disks of the virtual machine.
|
||||
- "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
|
||||
- "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
|
||||
- Each disk must have specified a I(volume) that declares which volume type of the disk
|
||||
All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
|
||||
type: list
|
||||
labels:
|
||||
description:
|
||||
- Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
|
||||
specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
|
||||
imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
|
||||
Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
|
||||
- More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
|
||||
type: dict
|
||||
interfaces:
|
||||
description:
|
||||
- An interface defines a virtual network interface of a virtual machine (also called a frontend).
|
||||
- All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
|
||||
- Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
|
||||
All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
|
||||
type: list
|
||||
cloud_init_nocloud:
|
||||
description:
|
||||
- "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
|
||||
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
||||
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
||||
type: dict
|
||||
affinity:
|
||||
description:
|
||||
- "Describes node affinity scheduling rules for the vm."
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
|
||||
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
|
||||
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
|
||||
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
|
||||
C(term); the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
|
||||
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
|
||||
each C(term) are intersected, i.e. all terms must be satisfied."
|
||||
type: dict
|
||||
node_affinity:
|
||||
description:
|
||||
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
|
||||
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
|
||||
match_expressions; the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
|
||||
may or may not try to eventually evict the vm from its node."
|
||||
type: dict
|
||||
anti_affinity:
|
||||
description:
|
||||
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
|
||||
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
|
||||
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
|
||||
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
|
||||
the corresponding C(term); the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
|
||||
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
|
||||
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
|
||||
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
|
||||
type: dict
|
||||
'''
|
||||
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
|
||||
options:
|
||||
bind_dn:
|
||||
description:
|
||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
|
||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
|
||||
- If this is blank, we'll use an anonymous bind.
|
||||
type: str
|
||||
bind_pw:
|
||||
@@ -27,21 +27,10 @@ options:
|
||||
description:
|
||||
- The DN of the entry to add or remove.
|
||||
type: str
|
||||
referrals_chasing:
|
||||
choices: [disabled, anonymous]
|
||||
default: anonymous
|
||||
type: str
|
||||
description:
|
||||
- Set the referrals chasing behavior.
|
||||
- C(anonymous) follow referrals anonymously. This is the default behavior.
|
||||
- C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
|
||||
version_added: 2.0.0
|
||||
server_uri:
|
||||
description:
|
||||
- The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
|
||||
- A URI to the LDAP server.
|
||||
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
|
||||
- Note that when using multiple URIs you cannot determine to which URI your client gets connected.
|
||||
- For URIs containing additional fields, particularly when using commas, behavior is undefined.
|
||||
type: str
|
||||
default: ldapi:///
|
||||
start_tls:
|
||||
@@ -55,12 +44,4 @@ options:
|
||||
- This should only be used on sites using self-signed certificates.
|
||||
type: bool
|
||||
default: yes
|
||||
sasl_class:
|
||||
description:
|
||||
- The class to use for SASL authentication.
|
||||
- possible choices are C(external), C(gssapi).
|
||||
type: str
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
version_added: "2.0.0"
|
||||
'''
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2017 Lenovo, Inc.
|
||||
# Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user