mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-30 10:26:52 +00:00
Compare commits
273 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
332ba8166c | ||
|
|
725450e57a | ||
|
|
f4311e08aa | ||
|
|
9e7b067904 | ||
|
|
d29db3ecf9 | ||
|
|
4aba7d5b87 | ||
|
|
88d00c32db | ||
|
|
f1e1b46ce2 | ||
|
|
c4256d8674 | ||
|
|
0bfed46136 | ||
|
|
a04912dec0 | ||
|
|
7f92aa0854 | ||
|
|
a16164cb72 | ||
|
|
3960153f70 | ||
|
|
6d4760eb20 | ||
|
|
777a741d4d | ||
|
|
aaf42f3646 | ||
|
|
c167ac10e0 | ||
|
|
154d8a313c | ||
|
|
b76492687b | ||
|
|
a118bb8d05 | ||
|
|
d2b1df49c1 | ||
|
|
fb3085e78d | ||
|
|
ad163ed3af | ||
|
|
15257e9a64 | ||
|
|
c642ee9157 | ||
|
|
7e89bc6f61 | ||
|
|
9defd1aca1 | ||
|
|
ff1a8415bd | ||
|
|
961011891b | ||
|
|
6470d3defe | ||
|
|
32ac93fb16 | ||
|
|
1dfe7963cf | ||
|
|
a46fb7bcae | ||
|
|
70a8ca6ac3 | ||
|
|
17f598fdc2 | ||
|
|
bc13182e1d | ||
|
|
6cbd0c772e | ||
|
|
49314a42ef | ||
|
|
4953fda9a0 | ||
|
|
e03431d9f6 | ||
|
|
ab94b0ace0 | ||
|
|
bd8df8e94e | ||
|
|
9bb439632b | ||
|
|
9e780b9d5e | ||
|
|
60e0a660ce | ||
|
|
7664146c9b | ||
|
|
28c455d234 | ||
|
|
74d4561a33 | ||
|
|
a07db2f731 | ||
|
|
528602f1b9 | ||
|
|
b99586e26a | ||
|
|
999620c789 | ||
|
|
d5c24e67e8 | ||
|
|
b1d1391be5 | ||
|
|
02f0abfb36 | ||
|
|
659ef811a3 | ||
|
|
b86161886f | ||
|
|
e29d585412 | ||
|
|
703bb465c7 | ||
|
|
1d290c129f | ||
|
|
af913c9a6a | ||
|
|
028481c55e | ||
|
|
247da9890b | ||
|
|
73d573b915 | ||
|
|
d1a7423196 | ||
|
|
7ace59f505 | ||
|
|
bcf0060f10 | ||
|
|
07e35f7505 | ||
|
|
9279e4532d | ||
|
|
51bf08c690 | ||
|
|
87e31ae886 | ||
|
|
7713202d9b | ||
|
|
e669562a0f | ||
|
|
17e39e3744 | ||
|
|
c1a6feaf25 | ||
|
|
bdeb63e579 | ||
|
|
7e247b0eea | ||
|
|
2fe6a34e3f | ||
|
|
ca97eb6f93 | ||
|
|
b529955c07 | ||
|
|
d5d24302b6 | ||
|
|
51a3594494 | ||
|
|
85fd4240f6 | ||
|
|
490495937b | ||
|
|
0e7a130ec3 | ||
|
|
5239357077 | ||
|
|
8cd126be26 | ||
|
|
d2e259da4a | ||
|
|
6bd10adb97 | ||
|
|
71b63e6a75 | ||
|
|
1ce00126c5 | ||
|
|
011e27caf5 | ||
|
|
726ea65f4f | ||
|
|
85307d28e2 | ||
|
|
f6fe843a57 | ||
|
|
ee04231964 | ||
|
|
e09392e867 | ||
|
|
1b7c49cf56 | ||
|
|
e5cddcaf87 | ||
|
|
82162b35c4 | ||
|
|
2233c94a6f | ||
|
|
eba42c9eb9 | ||
|
|
dbb145bc71 | ||
|
|
373df2ba68 | ||
|
|
4cd7476604 | ||
|
|
24f973a9d1 | ||
|
|
f1ca1ccd89 | ||
|
|
0564a2239f | ||
|
|
a3a33cb019 | ||
|
|
399c28c11e | ||
|
|
18da4d22f8 | ||
|
|
d4435b0b8d | ||
|
|
c4983f9b90 | ||
|
|
0005df8910 | ||
|
|
130709348d | ||
|
|
901bca58bb | ||
|
|
5a826a5cb7 | ||
|
|
924f18535a | ||
|
|
be27bf1eae | ||
|
|
29819e04ec | ||
|
|
bf9a6c08d0 | ||
|
|
6708ee1afd | ||
|
|
88bd8fc7ea | ||
|
|
5d0a0d27e5 | ||
|
|
d74680a3c6 | ||
|
|
19a7aa462b | ||
|
|
176c9a90ca | ||
|
|
c4e93b0b5f | ||
|
|
08831e193f | ||
|
|
6ea7616541 | ||
|
|
34c164dc78 | ||
|
|
2ff06d2fdf | ||
|
|
3a69dd949d | ||
|
|
82c79e9a06 | ||
|
|
96a970475f | ||
|
|
f3e07723cd | ||
|
|
9f93219611 | ||
|
|
922dd0fc10 | ||
|
|
a3a0c5c3fd | ||
|
|
a20e221d6f | ||
|
|
d0a9ced474 | ||
|
|
b035084caa | ||
|
|
b56857932e | ||
|
|
7da1f3ffea | ||
|
|
c826a81b40 | ||
|
|
af4f1f727d | ||
|
|
5571a0cdf8 | ||
|
|
bb2ad10eef | ||
|
|
788dc4bc23 | ||
|
|
705118247d | ||
|
|
1b579dfdc2 | ||
|
|
e3e3682eb3 | ||
|
|
3c6e84b21c | ||
|
|
28ec0b07e9 | ||
|
|
22e0fa03b2 | ||
|
|
b3cac071fa | ||
|
|
ebb9d8a6fa | ||
|
|
f8fcc827cd | ||
|
|
f9ac30a531 | ||
|
|
efa884b64a | ||
|
|
ee8f87412a | ||
|
|
2cb3cec659 | ||
|
|
6092cd89bc | ||
|
|
dd47c3a548 | ||
|
|
06678d4ce3 | ||
|
|
ba10525125 | ||
|
|
713e386c66 | ||
|
|
adf61bf7f4 | ||
|
|
97507b50b5 | ||
|
|
c0971e41b0 | ||
|
|
0b28f5d9e4 | ||
|
|
7c0175322b | ||
|
|
4e497ace29 | ||
|
|
26bb835975 | ||
|
|
5d3a2a3bd4 | ||
|
|
686cdf2a6b | ||
|
|
4928810dda | ||
|
|
4dc2e14039 | ||
|
|
6ec769b051 | ||
|
|
e4d3d24b26 | ||
|
|
572e3f0814 | ||
|
|
e03ade818a | ||
|
|
54725bea77 | ||
|
|
db24f9857a | ||
|
|
c00147e532 | ||
|
|
0baceda7f6 | ||
|
|
c563813e4e | ||
|
|
1dbd7d4d00 | ||
|
|
41b72c0055 | ||
|
|
96a8390b5e | ||
|
|
25474f657a | ||
|
|
d7c4849473 | ||
|
|
0d459e5662 | ||
|
|
01bbab6b2c | ||
|
|
59a7064392 | ||
|
|
8e7b779ec9 | ||
|
|
1ba5344258 | ||
|
|
58e9454379 | ||
|
|
af3dec9b97 | ||
|
|
99a161bd06 | ||
|
|
feabad39f4 | ||
|
|
4a5276b589 | ||
|
|
e342dfb467 | ||
|
|
5b425fc297 | ||
|
|
d8328312a1 | ||
|
|
2ce326ca5b | ||
|
|
90ed2fa5c3 | ||
|
|
407d776610 | ||
|
|
951806c888 | ||
|
|
0fe7ea63a8 | ||
|
|
3a95a84963 | ||
|
|
2c3e93cc4d | ||
|
|
656b25a4a1 | ||
|
|
1863694297 | ||
|
|
c0f753dd21 | ||
|
|
369cde2320 | ||
|
|
e90872b486 | ||
|
|
b52d3504cb | ||
|
|
1e150cda01 | ||
|
|
db135b83dc | ||
|
|
ad4866bb3b | ||
|
|
83339c44b3 | ||
|
|
71633249c4 | ||
|
|
fdf244d488 | ||
|
|
5575d454ab | ||
|
|
d4633cfcd5 | ||
|
|
11315c8c69 | ||
|
|
6c387f87dd | ||
|
|
33cf4877f5 | ||
|
|
6e2fee77a7 | ||
|
|
502e5ceb79 | ||
|
|
4685a53f29 | ||
|
|
79616f47cb | ||
|
|
496218b6e6 | ||
|
|
8bd8ccd974 | ||
|
|
c802de865a | ||
|
|
1dfd6e395c | ||
|
|
25eabb39a6 | ||
|
|
869e0e60c2 | ||
|
|
cae5823685 | ||
|
|
3d0dbc1fb0 | ||
|
|
912583026f | ||
|
|
748304dadd | ||
|
|
253c2179de | ||
|
|
fcc72e5af1 | ||
|
|
d472953e10 | ||
|
|
c78d6c95d6 | ||
|
|
c9cb987eb7 | ||
|
|
099a99d288 | ||
|
|
26ea01d5b4 | ||
|
|
a9afbe59e5 | ||
|
|
dc9cab36ac | ||
|
|
99265c5126 | ||
|
|
57aede6b95 | ||
|
|
e51e41203a | ||
|
|
54644179ea | ||
|
|
7d6a1a4483 | ||
|
|
2715e4456c | ||
|
|
a335d1cc56 | ||
|
|
a89b43b110 | ||
|
|
1b599bde37 | ||
|
|
7bd987e2b9 | ||
|
|
8b0896a43d | ||
|
|
402bb01501 | ||
|
|
75afd83508 | ||
|
|
b25f0f3cd2 | ||
|
|
9226c4b0d5 | ||
|
|
fe3e262209 | ||
|
|
b9fac26dcd | ||
|
|
343e5a03a7 | ||
|
|
acea082a7c | ||
|
|
0cff1f116f |
@@ -144,20 +144,20 @@ stages:
|
|||||||
test: osx/10.11
|
test: osx/10.11
|
||||||
- name: macOS 10.15
|
- name: macOS 10.15
|
||||||
test: macos/10.15
|
test: macos/10.15
|
||||||
- name: macOS 11.1
|
|
||||||
test: macos/11.1
|
|
||||||
- name: RHEL 7.8
|
- name: RHEL 7.8
|
||||||
test: rhel/7.8
|
test: rhel/7.8
|
||||||
- name: RHEL 8.2
|
- name: RHEL 8.2
|
||||||
test: rhel/8.2
|
test: rhel/8.2
|
||||||
- name: FreeBSD 11.4
|
- name: FreeBSD 11.1
|
||||||
test: freebsd/11.4
|
test: freebsd/11.1
|
||||||
- name: FreeBSD 12.2
|
- name: FreeBSD 12.1
|
||||||
test: freebsd/12.2
|
test: freebsd/12.1
|
||||||
groups:
|
groups:
|
||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
- stage: Remote_2_10
|
- stage: Remote_2_10
|
||||||
displayName: Remote 2.10
|
displayName: Remote 2.10
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -166,8 +166,8 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.10/{0}
|
testFormat: 2.10/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: macOS 11.1
|
- name: OS X 10.11
|
||||||
test: macos/11.1
|
test: osx/10.11
|
||||||
- name: RHEL 8.2
|
- name: RHEL 8.2
|
||||||
test: rhel/8.2
|
test: rhel/8.2
|
||||||
- name: FreeBSD 12.1
|
- name: FreeBSD 12.1
|
||||||
@@ -175,6 +175,9 @@ stages:
|
|||||||
groups:
|
groups:
|
||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
- stage: Remote_2_9
|
- stage: Remote_2_9
|
||||||
displayName: Remote 2.9
|
displayName: Remote 2.9
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -185,11 +188,14 @@ stages:
|
|||||||
targets:
|
targets:
|
||||||
- name: RHEL 8.2
|
- name: RHEL 8.2
|
||||||
test: rhel/8.2
|
test: rhel/8.2
|
||||||
- name: FreeBSD 12.0
|
#- name: FreeBSD 12.0
|
||||||
test: freebsd/12.0
|
# test: freebsd/12.0
|
||||||
groups:
|
groups:
|
||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
|
||||||
### Docker
|
### Docker
|
||||||
- stage: Docker_devel
|
- stage: Docker_devel
|
||||||
@@ -218,12 +224,12 @@ stages:
|
|||||||
test: ubuntu1604
|
test: ubuntu1604
|
||||||
- name: Ubuntu 18.04
|
- name: Ubuntu 18.04
|
||||||
test: ubuntu1804
|
test: ubuntu1804
|
||||||
- name: Ubuntu 20.04
|
|
||||||
test: ubuntu2004
|
|
||||||
groups:
|
groups:
|
||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
- stage: Docker_2_10
|
- stage: Docker_2_10
|
||||||
displayName: Docker 2.10
|
displayName: Docker 2.10
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -232,15 +238,20 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.10/linux/{0}
|
testFormat: 2.10/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: CentOS 8
|
#- name: CentOS 8
|
||||||
test: centos8
|
# test: centos8
|
||||||
- name: Fedora 32
|
- name: Fedora 32
|
||||||
test: fedora32
|
test: fedora32
|
||||||
- name: openSUSE 15 py3
|
- name: openSUSE 15 py3
|
||||||
test: opensuse15
|
test: opensuse15
|
||||||
|
- name: Ubuntu 18.04
|
||||||
|
test: ubuntu1804
|
||||||
groups:
|
groups:
|
||||||
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
- stage: Docker_2_9
|
- stage: Docker_2_9
|
||||||
displayName: Docker 2.9
|
displayName: Docker 2.9
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -249,15 +260,20 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.9/linux/{0}
|
testFormat: 2.9/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: CentOS 8
|
#- name: CentOS 8
|
||||||
test: centos8
|
# test: centos8
|
||||||
- name: Fedora 31
|
#- name: Fedora 31
|
||||||
test: fedora31
|
# test: fedora31
|
||||||
- name: openSUSE 15 py3
|
#- name: openSUSE 15 py3
|
||||||
test: opensuse15
|
# test: opensuse15
|
||||||
|
- name: Ubuntu 18.04
|
||||||
|
test: ubuntu1804
|
||||||
groups:
|
groups:
|
||||||
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
|
||||||
### Cloud
|
### Cloud
|
||||||
- stage: Cloud_devel
|
- stage: Cloud_devel
|
||||||
@@ -290,7 +306,7 @@ stages:
|
|||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.9/cloud/{0}/1
|
testFormat: 2.9/cloud/{0}/1
|
||||||
targets:
|
targets:
|
||||||
- test: 3.6
|
- test: 2.7
|
||||||
- stage: Summary
|
- stage: Summary
|
||||||
condition: succeededOrFailed()
|
condition: succeededOrFailed()
|
||||||
dependsOn:
|
dependsOn:
|
||||||
|
|||||||
145
.github/BOTMETA.yml
vendored
145
.github/BOTMETA.yml
vendored
@@ -20,6 +20,9 @@ files:
|
|||||||
maintainers: $team_macos
|
maintainers: $team_macos
|
||||||
labels: macos say
|
labels: macos say
|
||||||
keywords: brew cask darwin homebrew macosx macports osx
|
keywords: brew cask darwin homebrew macosx macports osx
|
||||||
|
$callbacks/stderr.py:
|
||||||
|
maintainers: ysn2233
|
||||||
|
labels: stderr
|
||||||
$callbacks/sumologic.py:
|
$callbacks/sumologic.py:
|
||||||
maintainers: ryancurrah
|
maintainers: ryancurrah
|
||||||
labels: sumologic
|
labels: sumologic
|
||||||
@@ -28,6 +31,11 @@ files:
|
|||||||
$callbacks/unixy.py:
|
$callbacks/unixy.py:
|
||||||
maintainers: akatch
|
maintainers: akatch
|
||||||
labels: unixy
|
labels: unixy
|
||||||
|
$connections/docker.py:
|
||||||
|
maintainers: $team_docker
|
||||||
|
labels: cloud docker
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
$connections/:
|
$connections/:
|
||||||
labels: connections
|
labels: connections
|
||||||
$connections/kubectl.py:
|
$connections/kubectl.py:
|
||||||
@@ -36,10 +44,24 @@ files:
|
|||||||
$connections/lxd.py:
|
$connections/lxd.py:
|
||||||
maintainers: mattclay
|
maintainers: mattclay
|
||||||
labels: lxd
|
labels: lxd
|
||||||
|
$connections/oc.py:
|
||||||
|
maintainers: chouseknecht fabianvf flaper87 maxamillion
|
||||||
|
labels: oc
|
||||||
$connections/saltstack.py:
|
$connections/saltstack.py:
|
||||||
labels: saltstack
|
labels: saltstack
|
||||||
$doc_fragments/:
|
$doc_fragments/:
|
||||||
labels: docs_fragments
|
labels: docs_fragments
|
||||||
|
$doc_fragments/docker.py:
|
||||||
|
maintainers: $team_docker
|
||||||
|
labels: cloud docker
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
|
$doc_fragments/gcp.py:
|
||||||
|
maintainers: $team_google
|
||||||
|
labels: gcp
|
||||||
|
supershipit: erjohnso rambleraptor
|
||||||
|
$doc_fragments/hetzner.py:
|
||||||
|
labels: hetzner
|
||||||
$doc_fragments/hpe3par.py:
|
$doc_fragments/hpe3par.py:
|
||||||
maintainers: farhan7500 gautamphegde
|
maintainers: farhan7500 gautamphegde
|
||||||
labels: hpe3par
|
labels: hpe3par
|
||||||
@@ -48,17 +70,19 @@ files:
|
|||||||
labels: hwc
|
labels: hwc
|
||||||
$doc_fragments/nomad.py:
|
$doc_fragments/nomad.py:
|
||||||
maintainers: chris93111
|
maintainers: chris93111
|
||||||
|
$doc_fragments/postgres.py:
|
||||||
|
maintainers: $team_postgresql
|
||||||
|
labels: postgres postgresql
|
||||||
|
keywords: database postgres postgresql
|
||||||
$doc_fragments/xenserver.py:
|
$doc_fragments/xenserver.py:
|
||||||
maintainers: bvitnik
|
maintainers: bvitnik
|
||||||
labels: xenserver
|
labels: xenserver
|
||||||
$filters/dict_kv.py:
|
$filters/dict_kv.py:
|
||||||
maintainers: giner
|
maintainers: giner
|
||||||
$filters/jc.py:
|
|
||||||
maintainers: kellyjonbrazil
|
|
||||||
$filters/list.py:
|
|
||||||
maintainers: vbotka
|
|
||||||
$filters/time.py:
|
$filters/time.py:
|
||||||
maintainers: resmo
|
maintainers: resmo
|
||||||
|
$filters/jc.py:
|
||||||
|
maintainers: kellyjonbrazil
|
||||||
$httpapis/:
|
$httpapis/:
|
||||||
maintainers: $team_networking
|
maintainers: $team_networking
|
||||||
labels: networking
|
labels: networking
|
||||||
@@ -68,6 +92,16 @@ files:
|
|||||||
keywords: firepower ftd
|
keywords: firepower ftd
|
||||||
$inventories/:
|
$inventories/:
|
||||||
labels: inventories
|
labels: inventories
|
||||||
|
$inventories/docker_machine.py:
|
||||||
|
maintainers: $team_docker
|
||||||
|
labels: cloud docker
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
|
$inventories/docker_swarm.py:
|
||||||
|
maintainers: $team_docker morph027
|
||||||
|
labels: cloud docker docker_swarm
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
$inventories/linode.py:
|
$inventories/linode.py:
|
||||||
maintainers: $team_linode
|
maintainers: $team_linode
|
||||||
labels: cloud linode
|
labels: cloud linode
|
||||||
@@ -94,6 +128,9 @@ files:
|
|||||||
maintainers: amigus
|
maintainers: amigus
|
||||||
$lookups/dsv.py:
|
$lookups/dsv.py:
|
||||||
maintainers: amigus
|
maintainers: amigus
|
||||||
|
$lookups/hashi_vault.py:
|
||||||
|
labels: hashi_vault
|
||||||
|
maintainers: briantist
|
||||||
$lookups/manifold.py:
|
$lookups/manifold.py:
|
||||||
maintainers: galanoff
|
maintainers: galanoff
|
||||||
labels: manifold
|
labels: manifold
|
||||||
@@ -102,6 +139,11 @@ files:
|
|||||||
labels: infoblox networking
|
labels: infoblox networking
|
||||||
$module_utils/:
|
$module_utils/:
|
||||||
labels: module_utils
|
labels: module_utils
|
||||||
|
$module_utils/docker/:
|
||||||
|
maintainers: $team_docker
|
||||||
|
labels: cloud
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
$module_utils/gitlab.py:
|
$module_utils/gitlab.py:
|
||||||
notify: jlozadad
|
notify: jlozadad
|
||||||
maintainers: $team_gitlab
|
maintainers: $team_gitlab
|
||||||
@@ -116,6 +158,10 @@ files:
|
|||||||
$module_utils/ipa.py:
|
$module_utils/ipa.py:
|
||||||
maintainers: $team_ipa
|
maintainers: $team_ipa
|
||||||
labels: ipa
|
labels: ipa
|
||||||
|
$module_utils/kubevirt.py:
|
||||||
|
maintainers: $team_kubevirt
|
||||||
|
labels: cloud kubevirt
|
||||||
|
keywords: kubevirt
|
||||||
$module_utils/manageiq.py:
|
$module_utils/manageiq.py:
|
||||||
maintainers: $team_manageiq
|
maintainers: $team_manageiq
|
||||||
labels: manageiq
|
labels: manageiq
|
||||||
@@ -131,6 +177,10 @@ files:
|
|||||||
$module_utils/oracle/oci_utils.py:
|
$module_utils/oracle/oci_utils.py:
|
||||||
maintainers: $team_oracle
|
maintainers: $team_oracle
|
||||||
labels: cloud
|
labels: cloud
|
||||||
|
$module_utils/postgres.py:
|
||||||
|
maintainers: $team_postgresql
|
||||||
|
labels: postgres postgresql
|
||||||
|
keywords: database postgres postgresql
|
||||||
$module_utils/pure.py:
|
$module_utils/pure.py:
|
||||||
maintainers: $team_purestorage
|
maintainers: $team_purestorage
|
||||||
labels: pure pure_storage
|
labels: pure pure_storage
|
||||||
@@ -162,11 +212,46 @@ files:
|
|||||||
labels: dimensiondata_network
|
labels: dimensiondata_network
|
||||||
$modules/cloud/dimensiondata/dimensiondata_vlan.py:
|
$modules/cloud/dimensiondata/dimensiondata_vlan.py:
|
||||||
maintainers: tintoy
|
maintainers: tintoy
|
||||||
|
$modules/cloud/docker/:
|
||||||
|
maintainers: $team_docker
|
||||||
|
ignore: cove
|
||||||
|
supershipit: felixfontein
|
||||||
|
$modules/cloud/docker/docker_compose.py:
|
||||||
|
maintainers: sluther
|
||||||
|
labels: docker_compose
|
||||||
|
$modules/cloud/docker/docker_config.py:
|
||||||
|
maintainers: ushuz
|
||||||
|
$modules/cloud/docker/docker_container.py:
|
||||||
|
maintainers: dusdanig softzilla zfil
|
||||||
|
ignore: ThomasSteinbach cove joshuaconner
|
||||||
|
$modules/cloud/docker/docker_image.py:
|
||||||
|
maintainers: softzilla ssbarnea
|
||||||
|
$modules/cloud/docker/docker_login.py:
|
||||||
|
maintainers: olsaki
|
||||||
|
$modules/cloud/docker/docker_network.py:
|
||||||
|
maintainers: keitwb
|
||||||
|
labels: docker_network
|
||||||
|
$modules/cloud/docker/docker_stack_task_info.py:
|
||||||
|
maintainers: imjoseangel
|
||||||
|
$modules/cloud/docker/docker_swarm_service.py:
|
||||||
|
maintainers: hannseman
|
||||||
|
labels: docker_swarm_service
|
||||||
|
$modules/cloud/docker/docker_swarm_service_info.py:
|
||||||
|
maintainers: hannseman
|
||||||
|
$modules/cloud/docker/docker_volume.py:
|
||||||
|
maintainers: agronholm
|
||||||
|
$modules/cloud/google/:
|
||||||
|
maintainers: $team_google
|
||||||
|
ignore: supertom
|
||||||
|
supershipit: $team_google
|
||||||
$modules/cloud/heroku/heroku_collaborator.py:
|
$modules/cloud/heroku/heroku_collaborator.py:
|
||||||
maintainers: marns93
|
maintainers: marns93
|
||||||
$modules/cloud/huawei/:
|
$modules/cloud/huawei/:
|
||||||
maintainers: $team_huawei huaweicloud
|
maintainers: $team_huawei huaweicloud
|
||||||
keywords: cloud huawei hwc
|
keywords: cloud huawei hwc
|
||||||
|
$modules/cloud/kubevirt/:
|
||||||
|
maintainers: $team_kubevirt kubevirt
|
||||||
|
keywords: kubevirt
|
||||||
$modules/cloud/linode/:
|
$modules/cloud/linode/:
|
||||||
maintainers: $team_linode
|
maintainers: $team_linode
|
||||||
$modules/cloud/linode/linode.py:
|
$modules/cloud/linode/linode.py:
|
||||||
@@ -174,7 +259,7 @@ files:
|
|||||||
$modules/cloud/lxc/lxc_container.py:
|
$modules/cloud/lxc/lxc_container.py:
|
||||||
maintainers: cloudnull
|
maintainers: cloudnull
|
||||||
$modules/cloud/lxd/:
|
$modules/cloud/lxd/:
|
||||||
ignore: hnakamur
|
maintainers: hnakamur
|
||||||
$modules/cloud/memset/:
|
$modules/cloud/memset/:
|
||||||
maintainers: glitchcrab
|
maintainers: glitchcrab
|
||||||
$modules/cloud/misc/cloud_init_data_facts.py:
|
$modules/cloud/misc/cloud_init_data_facts.py:
|
||||||
@@ -191,10 +276,6 @@ files:
|
|||||||
labels: proxmox_kvm virt
|
labels: proxmox_kvm virt
|
||||||
ignore: skvidal
|
ignore: skvidal
|
||||||
keywords: kvm libvirt proxmox qemu
|
keywords: kvm libvirt proxmox qemu
|
||||||
$modules/cloud/misc/proxmox_snap.py:
|
|
||||||
maintainers: $team_virt
|
|
||||||
labels: proxmox virt
|
|
||||||
keywords: kvm libvirt proxmox qemu
|
|
||||||
$modules/cloud/misc/proxmox_template.py:
|
$modules/cloud/misc/proxmox_template.py:
|
||||||
maintainers: $team_virt UnderGreen
|
maintainers: $team_virt UnderGreen
|
||||||
labels: proxmox_template virt
|
labels: proxmox_template virt
|
||||||
@@ -323,6 +404,20 @@ files:
|
|||||||
$modules/database/mssql/mssql_db.py:
|
$modules/database/mssql/mssql_db.py:
|
||||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||||
labels: mssql_db
|
labels: mssql_db
|
||||||
|
$modules/database/postgresql/:
|
||||||
|
keywords: database postgres postgresql
|
||||||
|
labels: postgres postgresql
|
||||||
|
maintainers: $team_postgresql
|
||||||
|
$modules/database/postgresql/postgresql_ext.py:
|
||||||
|
maintainers: dschep strk
|
||||||
|
$modules/database/postgresql/postgresql_lang.py:
|
||||||
|
maintainers: jensdepuydt
|
||||||
|
$modules/database/postgresql/postgresql_privs.py:
|
||||||
|
maintainers: b6d
|
||||||
|
$modules/database/postgresql/postgresql_query.py:
|
||||||
|
maintainers: archf wrouesnel
|
||||||
|
$modules/database/postgresql/postgresql_tablespace.py:
|
||||||
|
maintainers: antoinell
|
||||||
$modules/database/vertica/:
|
$modules/database/vertica/:
|
||||||
maintainers: dareko
|
maintainers: dareko
|
||||||
$modules/files/archive.py:
|
$modules/files/archive.py:
|
||||||
@@ -344,8 +439,6 @@ files:
|
|||||||
maintainers: Rylon
|
maintainers: Rylon
|
||||||
$modules/identity/ipa/:
|
$modules/identity/ipa/:
|
||||||
maintainers: $team_ipa
|
maintainers: $team_ipa
|
||||||
$modules/identity/ipa/ipa_pwpolicy.py:
|
|
||||||
maintainers: adralioh
|
|
||||||
$modules/identity/ipa/ipa_service.py:
|
$modules/identity/ipa/ipa_service.py:
|
||||||
maintainers: cprh
|
maintainers: cprh
|
||||||
$modules/identity/ipa/ipa_vault.py:
|
$modules/identity/ipa/ipa_vault.py:
|
||||||
@@ -430,6 +523,14 @@ files:
|
|||||||
maintainers: briceburg
|
maintainers: briceburg
|
||||||
$modules/net_tools/haproxy.py:
|
$modules/net_tools/haproxy.py:
|
||||||
maintainers: ravibhure
|
maintainers: ravibhure
|
||||||
|
$modules/net_tools/hetzner_failover_ip.py:
|
||||||
|
maintainers: felixfontein
|
||||||
|
$modules/net_tools/hetzner_failover_ip_info.py:
|
||||||
|
maintainers: felixfontein
|
||||||
|
$modules/net_tools/hetzner_firewall.py:
|
||||||
|
maintainers: felixfontein
|
||||||
|
$modules/net_tools/hetzner_firewall_info.py:
|
||||||
|
maintainers: felixfontein
|
||||||
$modules/net_tools/:
|
$modules/net_tools/:
|
||||||
maintainers: nerzhul
|
maintainers: nerzhul
|
||||||
$modules/net_tools/infinity/infinity.py:
|
$modules/net_tools/infinity/infinity.py:
|
||||||
@@ -475,7 +576,7 @@ files:
|
|||||||
$modules/net_tools/nmcli.py:
|
$modules/net_tools/nmcli.py:
|
||||||
maintainers: alcamie101
|
maintainers: alcamie101
|
||||||
$modules/net_tools/snmp_facts.py:
|
$modules/net_tools/snmp_facts.py:
|
||||||
maintainers: ogenstad ujwalkomarla
|
maintainers: ogenstad bigmstone ujwalkomarla
|
||||||
$modules/notification/osx_say.py:
|
$modules/notification/osx_say.py:
|
||||||
maintainers: ansible mpdehaan
|
maintainers: ansible mpdehaan
|
||||||
labels: _osx_say
|
labels: _osx_say
|
||||||
@@ -529,7 +630,7 @@ files:
|
|||||||
$modules/notification/syslogger.py:
|
$modules/notification/syslogger.py:
|
||||||
maintainers: garbled1
|
maintainers: garbled1
|
||||||
$modules/notification/telegram.py:
|
$modules/notification/telegram.py:
|
||||||
maintainers: tyouxa loms
|
maintainers: tyouxa
|
||||||
$modules/notification/twilio.py:
|
$modules/notification/twilio.py:
|
||||||
maintainers: makaimc
|
maintainers: makaimc
|
||||||
$modules/notification/typetalk.py:
|
$modules/notification/typetalk.py:
|
||||||
@@ -568,8 +669,6 @@ files:
|
|||||||
ignore: kbrebanov
|
ignore: kbrebanov
|
||||||
$modules/packaging/os/apt_rpm.py:
|
$modules/packaging/os/apt_rpm.py:
|
||||||
maintainers: evgkrsk
|
maintainers: evgkrsk
|
||||||
$modules/packaging/os/copr.py:
|
|
||||||
maintainers: schlupov
|
|
||||||
$modules/packaging/os/flatpak.py:
|
$modules/packaging/os/flatpak.py:
|
||||||
maintainers: $team_flatpak
|
maintainers: $team_flatpak
|
||||||
$modules/packaging/os/flatpak_remote.py:
|
$modules/packaging/os/flatpak_remote.py:
|
||||||
@@ -656,8 +755,6 @@ files:
|
|||||||
maintainers: seandst
|
maintainers: seandst
|
||||||
$modules/packaging/os/rhsm_repository.py:
|
$modules/packaging/os/rhsm_repository.py:
|
||||||
maintainers: giovannisciortino
|
maintainers: giovannisciortino
|
||||||
$modules/packaging/os/rpm_ostree_pkg.py:
|
|
||||||
maintainers: dustymabe Akasurde
|
|
||||||
$modules/packaging/os/slackpkg.py:
|
$modules/packaging/os/slackpkg.py:
|
||||||
maintainers: KimNorgaard
|
maintainers: KimNorgaard
|
||||||
$modules/packaging/os/snap.py:
|
$modules/packaging/os/snap.py:
|
||||||
@@ -679,8 +776,6 @@ files:
|
|||||||
maintainers: pmakowski
|
maintainers: pmakowski
|
||||||
$modules/packaging/os/xbps.py:
|
$modules/packaging/os/xbps.py:
|
||||||
maintainers: dinoocch the-maldridge
|
maintainers: dinoocch the-maldridge
|
||||||
$modules/packaging/os/yum_versionlock.py:
|
|
||||||
maintainers: florianpaulhoberg aminvakil
|
|
||||||
$modules/packaging/os/zypper.py:
|
$modules/packaging/os/zypper.py:
|
||||||
maintainers: $team_suse
|
maintainers: $team_suse
|
||||||
labels: zypper
|
labels: zypper
|
||||||
@@ -695,6 +790,8 @@ files:
|
|||||||
maintainers: jagadeeshnv
|
maintainers: jagadeeshnv
|
||||||
$modules/remote_management/dellemc/ome_device_info.py:
|
$modules/remote_management/dellemc/ome_device_info.py:
|
||||||
maintainers: Sajna-Shetty
|
maintainers: Sajna-Shetty
|
||||||
|
$modules/remote_management/foreman/:
|
||||||
|
maintainers: ehelms ares ekohl xprazak2
|
||||||
$modules/remote_management/hpilo/:
|
$modules/remote_management/hpilo/:
|
||||||
maintainers: haad
|
maintainers: haad
|
||||||
ignore: dagwieers
|
ignore: dagwieers
|
||||||
@@ -736,6 +833,8 @@ files:
|
|||||||
maintainers: andreparames
|
maintainers: andreparames
|
||||||
$modules/source_control/git_config.py:
|
$modules/source_control/git_config.py:
|
||||||
maintainers: djmattyg007 mgedmin
|
maintainers: djmattyg007 mgedmin
|
||||||
|
$modules/source_control/github/github_hooks.py:
|
||||||
|
maintainers: pcgentry
|
||||||
$modules/source_control/github/github_deploy_key.py:
|
$modules/source_control/github/github_deploy_key.py:
|
||||||
maintainers: bincyber
|
maintainers: bincyber
|
||||||
$modules/source_control/github/github_issue.py:
|
$modules/source_control/github/github_issue.py:
|
||||||
@@ -903,8 +1002,6 @@ files:
|
|||||||
maintainers: bcoca
|
maintainers: bcoca
|
||||||
$modules/system/syspatch.py:
|
$modules/system/syspatch.py:
|
||||||
maintainers: precurse
|
maintainers: precurse
|
||||||
$modules/system/sysrc.py:
|
|
||||||
maintainers: dlundgren
|
|
||||||
$modules/system/sysupgrade.py:
|
$modules/system/sysupgrade.py:
|
||||||
maintainers: precurse
|
maintainers: precurse
|
||||||
$modules/system/timezone.py:
|
$modules/system/timezone.py:
|
||||||
@@ -1001,14 +1098,17 @@ macros:
|
|||||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||||
team_consul: colin-nolan sgargan
|
team_consul: colin-nolan sgargan
|
||||||
team_cyberark_conjur: jvanderhoof ryanprior
|
team_cyberark_conjur: jvanderhoof ryanprior
|
||||||
|
team_docker: DBendit WojciechowskiPiotr akshay196 danihodovic dariko felixfontein jwitko kassiansun tbouvet chouseknecht
|
||||||
team_e_spirit: MatrixCrawler getjack
|
team_e_spirit: MatrixCrawler getjack
|
||||||
team_flatpak: JayKayy oolongbrothers
|
team_flatpak: JayKayy oolongbrothers
|
||||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||||
|
team_google: erjohnso rambleraptor
|
||||||
team_hpux: bcoca davx8342
|
team_hpux: bcoca davx8342
|
||||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||||
team_ipa: Akasurde Nosmoht fxfitz
|
team_ipa: Akasurde Nosmoht fxfitz
|
||||||
team_jboss: Wolfant jairojunior wbrefvem
|
team_jboss: Wolfant jairojunior wbrefvem
|
||||||
team_keycloak: eikef ndclt
|
team_keycloak: eikef ndclt
|
||||||
|
team_kubevirt: machacekondra mmazur pkliczewski
|
||||||
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
||||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||||
@@ -1016,10 +1116,11 @@ macros:
|
|||||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||||
team_oracle: manojmeda mross22 nalsaber
|
team_oracle: manojmeda mross22 nalsaber
|
||||||
|
team_postgresql: Andersson007 Dorn- andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs ilicmilan
|
||||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||||
team_redfish: billdodd mraineri tomasg2012
|
team_redfish: billdodd mraineri tomasg2012
|
||||||
team_rhn: FlossWare alikins barnabycourt vritant
|
team_rhn: FlossWare alikins barnabycourt vritant
|
||||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
||||||
team_virt: joshainglis karmab Aversiste Thulium-Drake
|
team_virt: joshainglis karmab Aversiste
|
||||||
|
|||||||
1257
CHANGELOG.rst
1257
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
# Community General Collection
|
# Community General Collection
|
||||||
|
|
||||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||||
|
|
||||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||||
@@ -76,7 +76,7 @@ Basic instructions without release branches:
|
|||||||
|
|
||||||
## Release notes
|
## Release notes
|
||||||
|
|
||||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
|
See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,17 @@
|
|||||||
namespace: community
|
namespace: community
|
||||||
name: general
|
name: general
|
||||||
version: 2.0.0
|
version: 1.3.1
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- Ansible (https://github.com/ansible)
|
- Ansible (https://github.com/ansible)
|
||||||
description: null
|
description: null
|
||||||
license_file: COPYING
|
license_file: COPYING
|
||||||
tags: [community]
|
tags: [community]
|
||||||
# NOTE: No dependencies are expected to be added here
|
# NOTE: No more dependencies can be added to this list
|
||||||
# dependencies:
|
dependencies:
|
||||||
|
ansible.netcommon: '>=1.0.0'
|
||||||
|
community.kubernetes: '>=1.0.0'
|
||||||
|
google.cloud: '>=1.0.0'
|
||||||
repository: https://github.com/ansible-collections/community.general
|
repository: https://github.com/ansible-collections/community.general
|
||||||
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
||||||
homepage: https://github.com/ansible-collections/community.general
|
homepage: https://github.com/ansible-collections/community.general
|
||||||
|
|||||||
662
meta/runtime.yml
662
meta/runtime.yml
@@ -1,6 +1,36 @@
|
|||||||
---
|
|
||||||
requires_ansible: '>=2.9.10'
|
requires_ansible: '>=2.9.10'
|
||||||
action_groups:
|
action_groups:
|
||||||
|
docker:
|
||||||
|
- docker_swarm
|
||||||
|
- docker_image_facts
|
||||||
|
- docker_service
|
||||||
|
- docker_compose
|
||||||
|
- docker_config
|
||||||
|
- docker_container
|
||||||
|
- docker_container_info
|
||||||
|
- docker_host_info
|
||||||
|
- docker_image
|
||||||
|
- docker_image_info
|
||||||
|
- docker_login
|
||||||
|
- docker_network
|
||||||
|
- docker_network_info
|
||||||
|
- docker_node
|
||||||
|
- docker_node_info
|
||||||
|
- docker_prune
|
||||||
|
- docker_secret
|
||||||
|
- docker_swarm
|
||||||
|
- docker_swarm_info
|
||||||
|
- docker_swarm_service
|
||||||
|
- docker_swarm_service_info
|
||||||
|
- docker_volume
|
||||||
|
- docker_volume_info
|
||||||
|
k8s:
|
||||||
|
- kubevirt_cdi_upload
|
||||||
|
- kubevirt_preset
|
||||||
|
- kubevirt_pvc
|
||||||
|
- kubevirt_rs
|
||||||
|
- kubevirt_template
|
||||||
|
- kubevirt_vm
|
||||||
ovirt:
|
ovirt:
|
||||||
- ovirt_affinity_label_facts
|
- ovirt_affinity_label_facts
|
||||||
- ovirt_api_facts
|
- ovirt_api_facts
|
||||||
@@ -27,174 +57,252 @@ action_groups:
|
|||||||
- ovirt_vm_facts
|
- ovirt_vm_facts
|
||||||
- ovirt_vmpool_facts
|
- ovirt_vmpool_facts
|
||||||
plugin_routing:
|
plugin_routing:
|
||||||
connection:
|
|
||||||
docker:
|
|
||||||
redirect: community.docker.docker
|
|
||||||
oc:
|
|
||||||
redirect: community.okd.oc
|
|
||||||
lookup:
|
lookup:
|
||||||
gcp_storage_file:
|
conjur_variable:
|
||||||
redirect: community.google.gcp_storage_file
|
redirect: cyberark.conjur.conjur_variable
|
||||||
hashi_vault:
|
deprecation:
|
||||||
redirect: community.hashi_vault.hashi_vault
|
removal_version: 2.0.0
|
||||||
|
warning_text: The conjur_variable lookup has been moved to the cyberark.conjur collection.
|
||||||
modules:
|
modules:
|
||||||
ali_instance_facts:
|
ali_instance_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
docker_compose:
|
digital_ocean:
|
||||||
redirect: community.docker.docker_compose
|
deprecation:
|
||||||
docker_config:
|
removal_version: 2.0.0
|
||||||
redirect: community.docker.docker_config
|
warning_text: The digital_ocean module has been moved to the community.digitalocean collection.
|
||||||
docker_container:
|
redirect: community.digitalocean.digital_ocean
|
||||||
redirect: community.docker.docker_container
|
digital_ocean_account_facts:
|
||||||
docker_container_info:
|
deprecation:
|
||||||
redirect: community.docker.docker_container_info
|
removal_version: 2.0.0
|
||||||
docker_host_info:
|
warning_text: The digital_ocean_account_facts module has been moved to the community.digitalocean collection.
|
||||||
redirect: community.docker.docker_host_info
|
redirect: community.digitalocean.digital_ocean_account_facts
|
||||||
docker_image:
|
digital_ocean_account_info:
|
||||||
redirect: community.docker.docker_image
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_account_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_account_info
|
||||||
|
digital_ocean_block_storage:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_block_storage module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_block_storage
|
||||||
|
digital_ocean_certificate:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_certificate module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_certificate
|
||||||
|
digital_ocean_certificate_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_certificate_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_certificate_facts
|
||||||
|
digital_ocean_certificate_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_certificate_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_certificate_info
|
||||||
|
digital_ocean_domain:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_domain module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_domain
|
||||||
|
digital_ocean_domain_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_domain_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_domain_facts
|
||||||
|
digital_ocean_domain_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_domain_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_domain_info
|
||||||
|
digital_ocean_droplet:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_droplet module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_droplet
|
||||||
|
digital_ocean_firewall_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_firewall_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_firewall_facts
|
||||||
|
digital_ocean_firewall_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_firewall_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_firewall_info
|
||||||
|
digital_ocean_floating_ip:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_floating_ip module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_floating_ip
|
||||||
|
digital_ocean_floating_ip_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_floating_ip_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_floating_ip_facts
|
||||||
|
digital_ocean_floating_ip_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_floating_ip_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_floating_ip_info
|
||||||
|
digital_ocean_image_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_image_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_image_facts
|
||||||
|
digital_ocean_image_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_image_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_image_info
|
||||||
|
digital_ocean_load_balancer_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_load_balancer_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_load_balancer_facts
|
||||||
|
digital_ocean_load_balancer_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_load_balancer_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_load_balancer_info
|
||||||
|
digital_ocean_region_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_region_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_region_facts
|
||||||
|
digital_ocean_region_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_region_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_region_info
|
||||||
|
digital_ocean_size_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_size_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_size_facts
|
||||||
|
digital_ocean_size_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_size_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_size_info
|
||||||
|
digital_ocean_snapshot_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_snapshot_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_snapshot_facts
|
||||||
|
digital_ocean_snapshot_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_snapshot_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_snapshot_info
|
||||||
|
digital_ocean_sshkey:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_sshkey module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_sshkey
|
||||||
|
digital_ocean_sshkey_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_sshkey_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_sshkey_facts
|
||||||
|
digital_ocean_sshkey_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_sshkey_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_sshkey_info
|
||||||
|
digital_ocean_tag:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_tag module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_tag
|
||||||
|
digital_ocean_tag_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_tag_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_tag_facts
|
||||||
|
digital_ocean_tag_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_tag_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_tag_info
|
||||||
|
digital_ocean_volume_facts:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_volume_facts module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_volume_facts
|
||||||
|
digital_ocean_volume_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The digital_ocean_volume_info module has been moved to the community.digitalocean collection.
|
||||||
|
redirect: community.digitalocean.digital_ocean_volume_info
|
||||||
docker_image_facts:
|
docker_image_facts:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use community.docker.docker_image_info instead.
|
warning_text: see plugin documentation for details
|
||||||
docker_image_info:
|
|
||||||
redirect: community.docker.docker_image_info
|
|
||||||
docker_login:
|
|
||||||
redirect: community.docker.docker_login
|
|
||||||
docker_network:
|
|
||||||
redirect: community.docker.docker_network
|
|
||||||
docker_network_info:
|
|
||||||
redirect: community.docker.docker_network_info
|
|
||||||
docker_node:
|
|
||||||
redirect: community.docker.docker_node
|
|
||||||
docker_node_info:
|
|
||||||
redirect: community.docker.docker_node_info
|
|
||||||
docker_prune:
|
|
||||||
redirect: community.docker.docker_prune
|
|
||||||
docker_secret:
|
|
||||||
redirect: community.docker.docker_secret
|
|
||||||
docker_service:
|
docker_service:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use community.docker.docker_compose instead.
|
warning_text: see plugin documentation for details
|
||||||
docker_stack:
|
firewalld:
|
||||||
redirect: community.docker.docker_stack
|
deprecation:
|
||||||
docker_stack_info:
|
removal_version: 2.0.0
|
||||||
redirect: community.docker.docker_stack_info
|
warning_text: The firewalld module has been moved to the ansible.posix collection.
|
||||||
docker_stack_task_info:
|
redirect: ansible.posix.firewalld
|
||||||
redirect: community.docker.docker_stack_task_info
|
|
||||||
docker_swarm:
|
|
||||||
redirect: community.docker.docker_swarm
|
|
||||||
docker_swarm_info:
|
|
||||||
redirect: community.docker.docker_swarm_info
|
|
||||||
docker_swarm_service:
|
|
||||||
redirect: community.docker.docker_swarm_service
|
|
||||||
docker_swarm_service_info:
|
|
||||||
redirect: community.docker.docker_swarm_service_info
|
|
||||||
docker_volume:
|
|
||||||
redirect: community.docker.docker_volume
|
|
||||||
docker_volume_info:
|
|
||||||
redirect: community.docker.docker_volume_info
|
|
||||||
foreman:
|
foreman:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use the modules from the theforeman.foreman collection instead.
|
warning_text: see plugin documentation for details
|
||||||
gc_storage:
|
|
||||||
redirect: community.google.gc_storage
|
|
||||||
gcdns_record:
|
gcdns_record:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_dns_resource_record_set instead.
|
warning_text: see plugin documentation for details
|
||||||
gcdns_zone:
|
gcdns_zone:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_dns_managed_zone instead.
|
warning_text: see plugin documentation for details
|
||||||
gce:
|
gce:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_instance instead.
|
warning_text: see plugin documentation for details
|
||||||
gce_eip:
|
|
||||||
redirect: community.google.gce_eip
|
|
||||||
gce_img:
|
|
||||||
redirect: community.google.gce_img
|
|
||||||
gce_instance_template:
|
|
||||||
redirect: community.google.gce_instance_template
|
|
||||||
gce_labels:
|
|
||||||
redirect: community.google.gce_labels
|
|
||||||
gce_lb:
|
|
||||||
redirect: community.google.gce_lb
|
|
||||||
gce_mig:
|
|
||||||
redirect: community.google.gce_mig
|
|
||||||
gce_net:
|
|
||||||
redirect: community.google.gce_net
|
|
||||||
gce_pd:
|
|
||||||
redirect: community.google.gce_pd
|
|
||||||
gce_snapshot:
|
|
||||||
redirect: community.google.gce_snapshot
|
|
||||||
gce_tag:
|
|
||||||
redirect: community.google.gce_tag
|
|
||||||
gcp_backend_service:
|
gcp_backend_service:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_backend_service instead.
|
warning_text: see plugin documentation for details
|
||||||
gcp_forwarding_rule:
|
gcp_forwarding_rule:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead.
|
warning_text: see plugin documentation for details
|
||||||
gcp_healthcheck:
|
gcp_healthcheck:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead.
|
warning_text: see plugin documentation for details
|
||||||
gcp_target_proxy:
|
gcp_target_proxy:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_target_http_proxy instead.
|
warning_text: see plugin documentation for details
|
||||||
gcp_url_map:
|
gcp_url_map:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_compute_url_map instead.
|
warning_text: see plugin documentation for details
|
||||||
gcpubsub:
|
|
||||||
redirect: community.google.gcpubsub
|
|
||||||
gcpubsub_info:
|
|
||||||
redirect: community.google.gcpubsub_info
|
|
||||||
gcpubsub_facts:
|
gcpubsub_facts:
|
||||||
redirect: community.google.gcpubsub_info
|
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: Use community.google.gcpubsub_info instead.
|
warning_text: see plugin documentation for details
|
||||||
gcspanner:
|
gcspanner:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead.
|
warning_text: see plugin documentation for details
|
||||||
github_hooks:
|
github_hooks:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
|
warning_text: see plugin documentation for details
|
||||||
gluster_heal_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 3.0.0
|
|
||||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
|
|
||||||
gluster_peer:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 3.0.0
|
|
||||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
|
|
||||||
gluster_volume:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 3.0.0
|
|
||||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
|
|
||||||
helm:
|
helm:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
||||||
hetzner_failover_ip:
|
|
||||||
redirect: community.hrobot.failover_ip
|
|
||||||
hetzner_failover_ip_info:
|
|
||||||
redirect: community.hrobot.failover_ip_info
|
|
||||||
hetzner_firewall:
|
|
||||||
redirect: community.hrobot.firewall
|
|
||||||
hetzner_firewall_info:
|
|
||||||
redirect: community.hrobot.firewall_info
|
|
||||||
hpilo_facts:
|
hpilo_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -203,26 +311,44 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
|
infini_export:
|
||||||
|
redirect: infinidat.infinibox.infini_export
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_export module has been moved to the infinidat collection.
|
||||||
|
infini_export_client:
|
||||||
|
redirect: infinidat.infinibox.infini_export_client
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_export_client module has been moved to the infinidat collection.
|
||||||
|
infini_fs:
|
||||||
|
redirect: infinidat.infinibox.infini_fs
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_fs module has been moved to the infinidat collection.
|
||||||
|
infini_host:
|
||||||
|
redirect: infinidat.infinibox.infini_host
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_host module has been moved to the infinidat collection.
|
||||||
|
infini_pool:
|
||||||
|
redirect: infinidat.infinibox.infini_pool
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_pool module has been moved to the infinidat collection.
|
||||||
|
infini_vol:
|
||||||
|
redirect: infinidat.infinibox.infini_vol
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The infini_vol module has been moved to the infinidat collection.
|
||||||
jenkins_job_facts:
|
jenkins_job_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
katello:
|
katello:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use the modules from the theforeman.foreman collection instead.
|
warning_text: see plugin documentation for details
|
||||||
kubevirt_cdi_upload:
|
|
||||||
redirect: community.kubevirt.kubevirt_cdi_upload
|
|
||||||
kubevirt_preset:
|
|
||||||
redirect: community.kubevirt.kubevirt_preset
|
|
||||||
kubevirt_pvc:
|
|
||||||
redirect: community.kubevirt.kubevirt_pvc
|
|
||||||
kubevirt_rs:
|
|
||||||
redirect: community.kubevirt.kubevirt_rs
|
|
||||||
kubevirt_template:
|
|
||||||
redirect: community.kubevirt.kubevirt_template
|
|
||||||
kubevirt_vm:
|
|
||||||
redirect: community.kubevirt.kubevirt_vm
|
|
||||||
ldap_attr:
|
ldap_attr:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -243,38 +369,68 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
|
mysql_db:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_db module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_db
|
||||||
|
mysql_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_info module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_info
|
||||||
|
mysql_query:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_query module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_query
|
||||||
|
mysql_replication:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_replication module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_replication
|
||||||
|
mysql_user:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_user module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_user
|
||||||
|
mysql_variables:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql_variables module has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql_variables
|
||||||
na_cdot_aggregate:
|
na_cdot_aggregate:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_aggregate instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_license:
|
na_cdot_license:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_license instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_lun:
|
na_cdot_lun:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_lun instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_qtree:
|
na_cdot_qtree:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_qtree instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_svm:
|
na_cdot_svm:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_svm instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_user:
|
na_cdot_user:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_user instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_user_role:
|
na_cdot_user_role:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_user_role instead.
|
warning_text: see plugin documentation for details
|
||||||
na_cdot_volume:
|
na_cdot_volume:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.ontap.na_ontap_volume instead.
|
warning_text: see plugin documentation for details
|
||||||
na_ontap_gather_facts:
|
na_ontap_gather_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -431,50 +587,41 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
postgresql_copy:
|
proxysql_backend_servers:
|
||||||
redirect: community.postgresql.postgresql_copy
|
deprecation:
|
||||||
postgresql_db:
|
removal_version: 2.0.0
|
||||||
redirect: community.postgresql.postgresql_db
|
warning_text: The proxysql_backend_servers module has been moved to the community.proxysql collection.
|
||||||
postgresql_ext:
|
redirect: community.proxysql.proxysql_backend_servers
|
||||||
redirect: community.postgresql.postgresql_ext
|
proxysql_global_variables:
|
||||||
postgresql_idx:
|
deprecation:
|
||||||
redirect: community.postgresql.postgresql_idx
|
removal_version: 2.0.0
|
||||||
postgresql_info:
|
warning_text: The proxysql_global_variables module has been moved to the community.proxysql collection.
|
||||||
redirect: community.postgresql.postgresql_info
|
redirect: community.proxysql.proxysql_global_variables
|
||||||
postgresql_lang:
|
proxysql_manage_config:
|
||||||
redirect: community.postgresql.postgresql_lang
|
deprecation:
|
||||||
postgresql_membership:
|
removal_version: 2.0.0
|
||||||
redirect: community.postgresql.postgresql_membership
|
warning_text: The proxysql_manage_config module has been moved to the community.proxysql collection.
|
||||||
postgresql_owner:
|
redirect: community.proxysql.proxysql_manage_config
|
||||||
redirect: community.postgresql.postgresql_owner
|
proxysql_mysql_users:
|
||||||
postgresql_pg_hba:
|
deprecation:
|
||||||
redirect: community.postgresql.postgresql_pg_hba
|
removal_version: 2.0.0
|
||||||
postgresql_ping:
|
warning_text: The proxysql_mysql_users module has been moved to the community.proxysql collection.
|
||||||
redirect: community.postgresql.postgresql_ping
|
redirect: community.proxysql.proxysql_mysql_users
|
||||||
postgresql_privs:
|
proxysql_query_rules:
|
||||||
redirect: community.postgresql.postgresql_privs
|
deprecation:
|
||||||
postgresql_publication:
|
removal_version: 2.0.0
|
||||||
redirect: community.postgresql.postgresql_publication
|
warning_text: The proxysql_query_rules module has been moved to the community.proxysql collection.
|
||||||
postgresql_query:
|
redirect: community.proxysql.proxysql_query_rules
|
||||||
redirect: community.postgresql.postgresql_query
|
proxysql_replication_hostgroups:
|
||||||
postgresql_schema:
|
deprecation:
|
||||||
redirect: community.postgresql.postgresql_schema
|
removal_version: 2.0.0
|
||||||
postgresql_sequence:
|
warning_text: The proxysql_replication_hostgroups module has been moved to the community.proxysql collection.
|
||||||
redirect: community.postgresql.postgresql_sequence
|
redirect: community.proxysql.proxysql_replication_hostgroups
|
||||||
postgresql_set:
|
proxysql_scheduler:
|
||||||
redirect: community.postgresql.postgresql_set
|
deprecation:
|
||||||
postgresql_slot:
|
removal_version: 2.0.0
|
||||||
redirect: community.postgresql.postgresql_slot
|
warning_text: The proxysql_scheduler module has been moved to the community.proxysql collection.
|
||||||
postgresql_subscription:
|
redirect: community.proxysql.proxysql_scheduler
|
||||||
redirect: community.postgresql.postgresql_subscription
|
|
||||||
postgresql_table:
|
|
||||||
redirect: community.postgresql.postgresql_table
|
|
||||||
postgresql_tablespace:
|
|
||||||
redirect: community.postgresql.postgresql_tablespace
|
|
||||||
postgresql_user_obj_stat_info:
|
|
||||||
redirect: community.postgresql.postgresql_user_obj_stat_info
|
|
||||||
postgresql_user:
|
|
||||||
redirect: community.postgresql.postgresql_user
|
|
||||||
purefa_facts:
|
purefa_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -520,25 +667,25 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
sf_account_manager:
|
sf_account_manager:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.elementsw.na_elementsw_account instead.
|
warning_text: see plugin documentation for details
|
||||||
sf_check_connections:
|
sf_check_connections:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.elementsw.na_elementsw_check_connections instead.
|
warning_text: see plugin documentation for details
|
||||||
sf_snapshot_schedule_manager:
|
sf_snapshot_schedule_manager:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead.
|
warning_text: see plugin documentation for details
|
||||||
sf_volume_access_group_manager:
|
sf_volume_access_group_manager:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.elementsw.na_elementsw_access_group instead.
|
warning_text: see plugin documentation for details
|
||||||
sf_volume_manager:
|
sf_volume_manager:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
|
warning_text: see plugin documentation for details
|
||||||
smartos_image_facts:
|
smartos_image_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -552,52 +699,57 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
doc_fragments:
|
doc_fragments:
|
||||||
_gcp:
|
digital_ocean:
|
||||||
redirect: community.google._gcp
|
deprecation:
|
||||||
docker:
|
removal_version: 2.0.0
|
||||||
redirect: community.docker.docker
|
warning_text: The digital_ocean docs_fragment has been moved to the community.digitalocean collection.
|
||||||
hetzner:
|
redirect: community.digitalocean.digital_ocean
|
||||||
redirect: community.hrobot.robot
|
infinibox:
|
||||||
kubevirt_common_options:
|
redirect: infinidat.infinibox.infinibox
|
||||||
redirect: community.kubevirt.kubevirt_common_options
|
deprecation:
|
||||||
kubevirt_vm_options:
|
removal_version: 2.0.0
|
||||||
redirect: community.kubevirt.kubevirt_vm_options
|
warning_text: The infinibox doc_fragments plugin has been moved to the infinidat.infinibox collection.
|
||||||
postgresql:
|
mysql:
|
||||||
redirect: community.postgresql.postgresql
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql docs_fragment has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql
|
||||||
|
proxysql:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The proxysql docs_fragment has been moved to the community.proxysql collection.
|
||||||
|
redirect: community.proxysql.proxysql
|
||||||
module_utils:
|
module_utils:
|
||||||
docker.common:
|
digital_ocean:
|
||||||
redirect: community.docker.common
|
deprecation:
|
||||||
docker.swarm:
|
removal_version: 2.0.0
|
||||||
redirect: community.docker.swarm
|
warning_text: The digital_ocean module_utils has been moved to the community.digitalocean collection.
|
||||||
gcdns:
|
redirect: community.digitalocean.digital_ocean
|
||||||
redirect: community.google.gcdns
|
firewalld:
|
||||||
gce:
|
deprecation:
|
||||||
redirect: community.google.gce
|
removal_version: 2.0.0
|
||||||
gcp:
|
warning_text: The firewalld module_utils has been moved to the ansible.posix collection.
|
||||||
redirect: community.google.gcp
|
redirect: ansible.posix.firewalld
|
||||||
hetzner:
|
infinibox:
|
||||||
redirect: community.hrobot.robot
|
redirect: infinidat.infinibox.infinibox
|
||||||
kubevirt:
|
deprecation:
|
||||||
redirect: community.kubevirt.kubevirt
|
removal_version: 2.0.0
|
||||||
postgresql:
|
warning_text: The infinibox module_utils plugin has been moved to the infinidat.infinibox collection.
|
||||||
redirect: community.postgresql.postgresql
|
mysql:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 2.0.0
|
||||||
|
warning_text: The mysql module_utils has been moved to the community.mysql collection.
|
||||||
|
redirect: community.mysql.mysql
|
||||||
callback:
|
callback:
|
||||||
actionable:
|
actionable:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options.
|
warning_text: see plugin documentation for details
|
||||||
full_skip:
|
full_skip:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option.
|
warning_text: see plugin documentation for details
|
||||||
stderr:
|
stderr:
|
||||||
tombstone:
|
deprecation:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option.
|
warning_text: see plugin documentation for details
|
||||||
inventory:
|
|
||||||
docker_machine:
|
|
||||||
redirect: community.docker.docker_machine
|
|
||||||
docker_swarm:
|
|
||||||
redirect: community.docker.docker_swarm
|
|
||||||
kubevirt:
|
|
||||||
redirect: community.kubevirt.kubevirt
|
|
||||||
|
|||||||
@@ -98,9 +98,25 @@ class ActionModule(ActionBase):
|
|||||||
task_async,
|
task_async,
|
||||||
max_timeout))
|
max_timeout))
|
||||||
|
|
||||||
# inject the async directory based on the shell option into the
|
# BEGIN snippet from async_status action plugin
|
||||||
# module args
|
env_async_dir = [e for e in self._task.environment if
|
||||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
"ANSIBLE_ASYNC_DIR" in e]
|
||||||
|
if len(env_async_dir) > 0:
|
||||||
|
# for backwards compatibility we need to get the dir from
|
||||||
|
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
|
||||||
|
# deprecated and will be removed in favour of shell options
|
||||||
|
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
|
||||||
|
|
||||||
|
msg = "Setting the async dir from the environment keyword " \
|
||||||
|
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
|
||||||
|
"shell option instead"
|
||||||
|
display.deprecated(msg, version='2.0.0',
|
||||||
|
collection_name='community.general') # was Ansible 2.12
|
||||||
|
else:
|
||||||
|
# inject the async directory based on the shell option into the
|
||||||
|
# module args
|
||||||
|
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||||
|
# END snippet from async_status action plugin
|
||||||
|
|
||||||
# Bind the loop max duration to consistent values on both
|
# Bind the loop max duration to consistent values on both
|
||||||
# remote and local sides (if not the same, make the loop
|
# remote and local sides (if not the same, make the loop
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: doas
|
become: doas
|
||||||
short_description: Do As user
|
short_description: Do As user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: dzdo
|
become: dzdo
|
||||||
short_description: Centrify's Direct Authorize
|
short_description: Centrify's Direct Authorize
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: ksu
|
become: ksu
|
||||||
short_description: Kerberos substitute user
|
short_description: Kerberos substitute user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: machinectl
|
become: machinectl
|
||||||
short_description: Systemd's machinectl privilege escalation
|
short_description: Systemd's machinectl privilege escalation
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: pbrun
|
become: pbrun
|
||||||
short_description: PowerBroker run
|
short_description: PowerBroker run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: pfexec
|
become: pfexec
|
||||||
short_description: profile based execution
|
short_description: profile based execution
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: pmrun
|
become: pmrun
|
||||||
short_description: Privilege Manager run
|
short_description: Privilege Manager run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: sesu
|
become: sesu
|
||||||
short_description: CA Privileged Access Manager
|
short_description: CA Privileged Access Manager
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
||||||
|
|||||||
9
plugins/cache/memcached.py
vendored
9
plugins/cache/memcached.py
vendored
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: memcached
|
cache: memcached
|
||||||
short_description: Use memcached DB for cache
|
short_description: Use memcached DB for cache
|
||||||
description:
|
description:
|
||||||
- This cache uses JSON formatted, per host records saved in memcached.
|
- This cache uses JSON formatted, per host records saved in memcached.
|
||||||
@@ -53,7 +53,6 @@ from ansible import constants as C
|
|||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.common._collections_compat import MutableSet
|
from ansible.module_utils.common._collections_compat import MutableSet
|
||||||
from ansible.plugins.cache import BaseCacheModule
|
from ansible.plugins.cache import BaseCacheModule
|
||||||
from ansible.release import __version__ as ansible_base_version
|
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -181,9 +180,9 @@ class CacheModule(BaseCacheModule):
|
|||||||
self._timeout = self.get_option('_timeout')
|
self._timeout = self.get_option('_timeout')
|
||||||
self._prefix = self.get_option('_prefix')
|
self._prefix = self.get_option('_prefix')
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# TODO: remove once we no longer support Ansible 2.9
|
display.deprecated('Rather than importing CacheModules directly, '
|
||||||
if not ansible_base_version.startswith('2.9.'):
|
'use ansible.plugins.loader.cache_loader',
|
||||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||||
if C.CACHE_PLUGIN_CONNECTION:
|
if C.CACHE_PLUGIN_CONNECTION:
|
||||||
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
||||||
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
||||||
|
|||||||
2
plugins/cache/pickle.py
vendored
2
plugins/cache/pickle.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: pickle
|
cache: pickle
|
||||||
short_description: Pickle formatted files.
|
short_description: Pickle formatted files.
|
||||||
description:
|
description:
|
||||||
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
||||||
|
|||||||
9
plugins/cache/redis.py
vendored
9
plugins/cache/redis.py
vendored
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: redis
|
cache: redis
|
||||||
short_description: Use Redis DB for cache
|
short_description: Use Redis DB for cache
|
||||||
description:
|
description:
|
||||||
- This cache uses JSON formatted, per host records saved in Redis.
|
- This cache uses JSON formatted, per host records saved in Redis.
|
||||||
@@ -69,7 +69,6 @@ from ansible.errors import AnsibleError
|
|||||||
from ansible.module_utils._text import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||||
from ansible.plugins.cache import BaseCacheModule
|
from ansible.plugins.cache import BaseCacheModule
|
||||||
from ansible.release import __version__ as ansible_base_version
|
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -104,9 +103,9 @@ class CacheModule(BaseCacheModule):
|
|||||||
self._keys_set = self.get_option('_keyset_name')
|
self._keys_set = self.get_option('_keyset_name')
|
||||||
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# TODO: remove once we no longer support Ansible 2.9
|
display.deprecated('Rather than importing CacheModules directly, '
|
||||||
if not ansible_base_version.startswith('2.9.'):
|
'use ansible.plugins.loader.cache_loader',
|
||||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||||
if C.CACHE_PLUGIN_CONNECTION:
|
if C.CACHE_PLUGIN_CONNECTION:
|
||||||
uri = C.CACHE_PLUGIN_CONNECTION
|
uri = C.CACHE_PLUGIN_CONNECTION
|
||||||
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
||||||
|
|||||||
2
plugins/cache/yaml.py
vendored
2
plugins/cache/yaml.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: yaml
|
cache: yaml
|
||||||
short_description: YAML formatted files.
|
short_description: YAML formatted files.
|
||||||
description:
|
description:
|
||||||
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
||||||
|
|||||||
61
plugins/callback/actionable.py
Normal file
61
plugins/callback/actionable.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
|
||||||
|
# (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
author: Unknown (!UNKNOWN)
|
||||||
|
callback: actionable
|
||||||
|
type: stdout
|
||||||
|
short_description: shows only items that need attention
|
||||||
|
description:
|
||||||
|
- Use this callback when you dont care about OK nor Skipped.
|
||||||
|
- This callback suppresses any non Failed or Changed status.
|
||||||
|
deprecated:
|
||||||
|
why: The 'default' callback plugin now supports this functionality
|
||||||
|
removed_in: '2.0.0' # was Ansible 2.11
|
||||||
|
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- default_callback
|
||||||
|
requirements:
|
||||||
|
- set as stdout callback in configuration
|
||||||
|
# Override defaults from 'default' callback plugin
|
||||||
|
options:
|
||||||
|
display_skipped_hosts:
|
||||||
|
name: Show skipped hosts
|
||||||
|
description: "Toggle to control displaying skipped task/host results in a task"
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
env:
|
||||||
|
- name: DISPLAY_SKIPPED_HOSTS
|
||||||
|
deprecated:
|
||||||
|
why: environment variables without "ANSIBLE_" prefix are deprecated
|
||||||
|
version: "2.0.0" # was Ansible 2.12
|
||||||
|
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
|
||||||
|
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
|
||||||
|
ini:
|
||||||
|
- key: display_skipped_hosts
|
||||||
|
section: defaults
|
||||||
|
display_ok_hosts:
|
||||||
|
name: Show 'ok' hosts
|
||||||
|
description: "Toggle to control displaying 'ok' task/host results in a task"
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_DISPLAY_OK_HOSTS
|
||||||
|
ini:
|
||||||
|
- key: display_ok_hosts
|
||||||
|
section: defaults
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackModule(CallbackModule_default):
|
||||||
|
|
||||||
|
CALLBACK_VERSION = 2.0
|
||||||
|
CALLBACK_TYPE = 'stdout'
|
||||||
|
CALLBACK_NAME = 'community.general.actionable'
|
||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: cgroup_memory_recap
|
callback: cgroup_memory_recap
|
||||||
type: aggregate
|
type: aggregate
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: context_demo
|
callback: context_demo
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: demo callback that adds play/task context
|
short_description: demo callback that adds play/task context
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: counter_enabled
|
callback: counter_enabled
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: adds counters to the output items (tasks and hosts/task)
|
short_description: adds counters to the output items (tasks and hosts/task)
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: dense
|
callback: dense
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: minimal stdout output
|
short_description: minimal stdout output
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: diy
|
callback: diy
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: Customize the output
|
short_description: Customize the output
|
||||||
version_added: 0.2.0
|
version_added: 0.2.0
|
||||||
|
|||||||
76
plugins/callback/full_skip.py
Normal file
76
plugins/callback/full_skip.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
author: Unknown (!UNKNOWN)
|
||||||
|
callback: full_skip
|
||||||
|
type: stdout
|
||||||
|
short_description: suppresses tasks if all hosts skipped
|
||||||
|
description:
|
||||||
|
- Use this plugin when you do not care about any output for tasks that were completely skipped
|
||||||
|
deprecated:
|
||||||
|
why: The 'default' callback plugin now supports this functionality
|
||||||
|
removed_in: '2.0.0' # was Ansible 2.11
|
||||||
|
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- default_callback
|
||||||
|
requirements:
|
||||||
|
- set as stdout in configuration
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackModule(CallbackModule_default):
|
||||||
|
|
||||||
|
'''
|
||||||
|
This is the default callback interface, which simply prints messages
|
||||||
|
to stdout when new callback events are received.
|
||||||
|
'''
|
||||||
|
|
||||||
|
CALLBACK_VERSION = 2.0
|
||||||
|
CALLBACK_TYPE = 'stdout'
|
||||||
|
CALLBACK_NAME = 'community.general.full_skip'
|
||||||
|
|
||||||
|
def v2_runner_on_skipped(self, result):
|
||||||
|
self.outlines = []
|
||||||
|
|
||||||
|
def v2_playbook_item_on_skipped(self, result):
|
||||||
|
self.outlines = []
|
||||||
|
|
||||||
|
def v2_runner_item_on_skipped(self, result):
|
||||||
|
self.outlines = []
|
||||||
|
|
||||||
|
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||||
|
self.display()
|
||||||
|
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||||
|
|
||||||
|
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||||
|
self.outlines = []
|
||||||
|
self.outlines.append("TASK [%s]" % task.get_name().strip())
|
||||||
|
if self._display.verbosity >= 2:
|
||||||
|
path = task.get_path()
|
||||||
|
if path:
|
||||||
|
self.outlines.append("task path: %s" % path)
|
||||||
|
|
||||||
|
def v2_playbook_item_on_ok(self, result):
|
||||||
|
self.display()
|
||||||
|
super(CallbackModule, self).v2_playbook_item_on_ok(result)
|
||||||
|
|
||||||
|
def v2_runner_on_ok(self, result):
|
||||||
|
self.display()
|
||||||
|
super(CallbackModule, self).v2_runner_on_ok(result)
|
||||||
|
|
||||||
|
def display(self):
|
||||||
|
if len(self.outlines) == 0:
|
||||||
|
return
|
||||||
|
(first, rest) = self.outlines[0], self.outlines[1:]
|
||||||
|
self._display.banner(first)
|
||||||
|
for line in rest:
|
||||||
|
self._display.display(line)
|
||||||
|
self.outlines = []
|
||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: hipchat
|
callback: hipchat
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration.
|
- whitelist in configuration.
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: jabber
|
callback: jabber
|
||||||
type: notification
|
type: notification
|
||||||
short_description: post task events to a jabber server
|
short_description: post task events to a jabber server
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: log_plays
|
callback: log_plays
|
||||||
type: notification
|
type: notification
|
||||||
short_description: write playbook output to log file
|
short_description: write playbook output to log file
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: logdna
|
callback: logdna
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends playbook logs to LogDNA
|
short_description: Sends playbook logs to LogDNA
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: logentries
|
callback: logentries
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends events to Logentries
|
short_description: Sends events to Logentries
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
|
||||||
# (C) 2017 Ansible Project
|
# (C) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = '''
|
||||||
author: Yevhen Khmelenko (@ujenmr)
|
author: Unknown (!UNKNOWN)
|
||||||
name: logstash
|
callback: logstash
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends events to Logstash
|
short_description: Sends events to Logstash
|
||||||
description:
|
description:
|
||||||
@@ -43,61 +43,16 @@ DOCUMENTATION = r'''
|
|||||||
key: type
|
key: type
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
default: ansible
|
default: ansible
|
||||||
pre_command:
|
|
||||||
description: Executes command before run and result put to ansible_pre_command_output field.
|
|
||||||
version_added: 2.0.0
|
|
||||||
ini:
|
|
||||||
- section: callback_logstash
|
|
||||||
key: pre_command
|
|
||||||
env:
|
|
||||||
- name: LOGSTASH_PRE_COMMAND
|
|
||||||
format_version:
|
|
||||||
description: Logging format
|
|
||||||
type: str
|
|
||||||
version_added: 2.0.0
|
|
||||||
ini:
|
|
||||||
- section: callback_logstash
|
|
||||||
key: format_version
|
|
||||||
env:
|
|
||||||
- name: LOGSTASH_FORMAT_VERSION
|
|
||||||
default: v1
|
|
||||||
choices:
|
|
||||||
- v1
|
|
||||||
- v2
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = r'''
|
|
||||||
ansible.cfg: |
|
|
||||||
# Enable Callback plugin
|
|
||||||
[defaults]
|
|
||||||
callback_whitelist = community.general.logstash
|
|
||||||
|
|
||||||
[callback_logstash]
|
|
||||||
server = logstash.example.com
|
|
||||||
port = 5000
|
|
||||||
pre_command = git rev-parse HEAD
|
|
||||||
type = ansible
|
|
||||||
|
|
||||||
11-input-tcp.conf: |
|
|
||||||
# Enable Logstash TCP Input
|
|
||||||
input {
|
|
||||||
tcp {
|
|
||||||
port => 5000
|
|
||||||
codec => json
|
|
||||||
add_field => { "[@metadata][beat]" => "notify" }
|
|
||||||
add_field => { "[@metadata][type]" => "ansible" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import socket
|
import socket
|
||||||
import uuid
|
import uuid
|
||||||
import logging
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import logstash
|
import logstash
|
||||||
HAS_LOGSTASH = True
|
HAS_LOGSTASH = True
|
||||||
@@ -108,78 +63,76 @@ from ansible.plugins.callback import CallbackBase
|
|||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
class CallbackModule(CallbackBase):
|
||||||
|
"""
|
||||||
|
ansible logstash callback plugin
|
||||||
|
ansible.cfg:
|
||||||
|
callback_plugins = <path_to_callback_plugins_folder>
|
||||||
|
callback_whitelist = logstash
|
||||||
|
and put the plugin in <path_to_callback_plugins_folder>
|
||||||
|
|
||||||
|
logstash config:
|
||||||
|
input {
|
||||||
|
tcp {
|
||||||
|
port => 5000
|
||||||
|
codec => json
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Requires:
|
||||||
|
python-logstash
|
||||||
|
|
||||||
|
This plugin makes use of the following environment variables or ini config:
|
||||||
|
LOGSTASH_SERVER (optional): defaults to localhost
|
||||||
|
LOGSTASH_PORT (optional): defaults to 5000
|
||||||
|
LOGSTASH_TYPE (optional): defaults to ansible
|
||||||
|
"""
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
CALLBACK_VERSION = 2.0
|
||||||
CALLBACK_TYPE = 'aggregate'
|
CALLBACK_TYPE = 'aggregate'
|
||||||
CALLBACK_NAME = 'community.general.logstash'
|
CALLBACK_NAME = 'community.general.logstash'
|
||||||
CALLBACK_NEEDS_WHITELIST = True
|
CALLBACK_NEEDS_WHITELIST = True
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, display=None):
|
||||||
super(CallbackModule, self).__init__()
|
super(CallbackModule, self).__init__(display=display)
|
||||||
|
|
||||||
if not HAS_LOGSTASH:
|
if not HAS_LOGSTASH:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning("The required python-logstash/python3-logstash is not installed. "
|
self._display.warning("The required python-logstash is not installed. "
|
||||||
"pip install python-logstash for Python 2"
|
"pip install python-logstash")
|
||||||
"pip install python3-logstash for Python 3")
|
|
||||||
|
|
||||||
self.start_time = datetime.utcnow()
|
self.start_time = datetime.utcnow()
|
||||||
|
|
||||||
def _init_plugin(self):
|
|
||||||
if not self.disabled:
|
|
||||||
self.logger = logging.getLogger('python-logstash-logger')
|
|
||||||
self.logger.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
self.handler = logstash.TCPLogstashHandler(
|
|
||||||
self.ls_server,
|
|
||||||
self.ls_port,
|
|
||||||
version=1,
|
|
||||||
message_type=self.ls_type
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.addHandler(self.handler)
|
|
||||||
self.hostname = socket.gethostname()
|
|
||||||
self.session = str(uuid.uuid4())
|
|
||||||
self.errors = 0
|
|
||||||
|
|
||||||
self.base_data = {
|
|
||||||
'session': self.session,
|
|
||||||
'host': self.hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.ls_pre_command is not None:
|
|
||||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
|
||||||
self.ls_pre_command).read()
|
|
||||||
|
|
||||||
if self._options is not None:
|
|
||||||
self.base_data['ansible_checkmode'] = self._options.check
|
|
||||||
self.base_data['ansible_tags'] = self._options.tags
|
|
||||||
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
|
||||||
self.base_data['inventory'] = self._options.inventory
|
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||||
|
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||||
|
|
||||||
self.ls_server = self.get_option('server')
|
self.logger = logging.getLogger('python-logstash-logger')
|
||||||
self.ls_port = int(self.get_option('port'))
|
self.logger.setLevel(logging.DEBUG)
|
||||||
self.ls_type = self.get_option('type')
|
|
||||||
self.ls_pre_command = self.get_option('pre_command')
|
|
||||||
self.ls_format_version = self.get_option('format_version')
|
|
||||||
|
|
||||||
self._init_plugin()
|
self.logstash_server = self.get_option('server')
|
||||||
|
self.logstash_port = self.get_option('port')
|
||||||
|
self.logstash_type = self.get_option('type')
|
||||||
|
self.handler = logstash.TCPLogstashHandler(
|
||||||
|
self.logstash_server,
|
||||||
|
int(self.logstash_port),
|
||||||
|
version=1,
|
||||||
|
message_type=self.logstash_type
|
||||||
|
)
|
||||||
|
self.logger.addHandler(self.handler)
|
||||||
|
self.hostname = socket.gethostname()
|
||||||
|
self.session = str(uuid.uuid1())
|
||||||
|
self.errors = 0
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
data = self.base_data.copy()
|
self.playbook = playbook._file_name
|
||||||
data['ansible_type'] = "start"
|
data = {
|
||||||
data['status'] = "OK"
|
'status': "OK",
|
||||||
data['ansible_playbook'] = playbook._file_name
|
'host': self.hostname,
|
||||||
|
'session': self.session,
|
||||||
if (self.ls_format_version == "v2"):
|
'ansible_type': "start",
|
||||||
self.logger.info(
|
'ansible_playbook': self.playbook,
|
||||||
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
|
}
|
||||||
)
|
self.logger.info("ansible start", extra=data)
|
||||||
else:
|
|
||||||
self.logger.info("ansible start", extra=data)
|
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
def v2_playbook_on_stats(self, stats):
|
||||||
end_time = datetime.utcnow()
|
end_time = datetime.utcnow()
|
||||||
@@ -193,201 +146,103 @@ class CallbackModule(CallbackBase):
|
|||||||
else:
|
else:
|
||||||
status = "FAILED"
|
status = "FAILED"
|
||||||
|
|
||||||
data = self.base_data.copy()
|
data = {
|
||||||
data['ansible_type'] = "finish"
|
'status': status,
|
||||||
data['status'] = status
|
'host': self.hostname,
|
||||||
data['ansible_playbook_duration'] = runtime.total_seconds()
|
'session': self.session,
|
||||||
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
|
'ansible_type': "finish",
|
||||||
|
'ansible_playbook': self.playbook,
|
||||||
if (self.ls_format_version == "v2"):
|
'ansible_playbook_duration': runtime.total_seconds(),
|
||||||
self.logger.info(
|
'ansible_result': json.dumps(summarize_stat),
|
||||||
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
|
}
|
||||||
)
|
self.logger.info("ansible stats", extra=data)
|
||||||
else:
|
|
||||||
self.logger.info("ansible stats", extra=data)
|
|
||||||
|
|
||||||
def v2_playbook_on_play_start(self, play):
|
|
||||||
self.play_id = str(play._uuid)
|
|
||||||
|
|
||||||
if play.name:
|
|
||||||
self.play_name = play.name
|
|
||||||
|
|
||||||
data = self.base_data.copy()
|
|
||||||
data['ansible_type'] = "start"
|
|
||||||
data['status'] = "OK"
|
|
||||||
data['ansible_play_id'] = self.play_id
|
|
||||||
data['ansible_play_name'] = self.play_name
|
|
||||||
|
|
||||||
if (self.ls_format_version == "v2"):
|
|
||||||
self.logger.info("START PLAY | %s", self.play_name, extra=data)
|
|
||||||
else:
|
|
||||||
self.logger.info("ansible play", extra=data)
|
|
||||||
|
|
||||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
|
||||||
self.task_id = str(task._uuid)
|
|
||||||
|
|
||||||
'''
|
|
||||||
Tasks and handler tasks are dealt with here
|
|
||||||
'''
|
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result, **kwargs):
|
def v2_runner_on_ok(self, result, **kwargs):
|
||||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
data = {
|
||||||
|
'status': "OK",
|
||||||
data = self.base_data.copy()
|
'host': self.hostname,
|
||||||
if task_name == 'setup':
|
'session': self.session,
|
||||||
data['ansible_type'] = "setup"
|
'ansible_type': "task",
|
||||||
data['status'] = "OK"
|
'ansible_playbook': self.playbook,
|
||||||
data['ansible_host'] = result._host.name
|
'ansible_host': result._host.name,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_task': result._task,
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_result': self._dump_results(result._result)
|
||||||
data['ansible_task'] = task_name
|
}
|
||||||
data['ansible_facts'] = self._dump_results(result._result)
|
self.logger.info("ansible ok", extra=data)
|
||||||
|
|
||||||
if (self.ls_format_version == "v2"):
|
|
||||||
self.logger.info(
|
|
||||||
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger.info("ansible facts", extra=data)
|
|
||||||
else:
|
|
||||||
if 'changed' in result._result.keys():
|
|
||||||
data['ansible_changed'] = result._result['changed']
|
|
||||||
else:
|
|
||||||
data['ansible_changed'] = False
|
|
||||||
|
|
||||||
data['ansible_type'] = "task"
|
|
||||||
data['status'] = "OK"
|
|
||||||
data['ansible_host'] = result._host.name
|
|
||||||
data['ansible_play_id'] = self.play_id
|
|
||||||
data['ansible_play_name'] = self.play_name
|
|
||||||
data['ansible_task'] = task_name
|
|
||||||
data['ansible_task_id'] = self.task_id
|
|
||||||
data['ansible_result'] = self._dump_results(result._result)
|
|
||||||
|
|
||||||
if (self.ls_format_version == "v2"):
|
|
||||||
self.logger.info(
|
|
||||||
"TASK OK | %s | RESULT | %s",
|
|
||||||
task_name, self._dump_results(result._result), extra=data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger.info("ansible ok", extra=data)
|
|
||||||
|
|
||||||
def v2_runner_on_skipped(self, result, **kwargs):
|
def v2_runner_on_skipped(self, result, **kwargs):
|
||||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
data = {
|
||||||
|
'status': "SKIPPED",
|
||||||
data = self.base_data.copy()
|
'host': self.hostname,
|
||||||
data['ansible_type'] = "task"
|
'session': self.session,
|
||||||
data['status'] = "SKIPPED"
|
'ansible_type': "task",
|
||||||
data['ansible_host'] = result._host.name
|
'ansible_playbook': self.playbook,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_task': result._task,
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_host': result._host.name
|
||||||
data['ansible_task'] = task_name
|
}
|
||||||
data['ansible_task_id'] = self.task_id
|
self.logger.info("ansible skipped", extra=data)
|
||||||
data['ansible_result'] = self._dump_results(result._result)
|
|
||||||
|
|
||||||
if (self.ls_format_version == "v2"):
|
|
||||||
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
|
|
||||||
else:
|
|
||||||
self.logger.info("ansible skipped", extra=data)
|
|
||||||
|
|
||||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||||
data = self.base_data.copy()
|
data = {
|
||||||
data['ansible_type'] = "import"
|
'status': "IMPORTED",
|
||||||
data['status'] = "IMPORTED"
|
'host': self.hostname,
|
||||||
data['ansible_host'] = result._host.name
|
'session': self.session,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_type': "import",
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_playbook': self.playbook,
|
||||||
data['imported_file'] = imported_file
|
'ansible_host': result._host.name,
|
||||||
|
'imported_file': imported_file
|
||||||
if (self.ls_format_version == "v2"):
|
}
|
||||||
self.logger.info("IMPORT | %s", imported_file, extra=data)
|
self.logger.info("ansible import", extra=data)
|
||||||
else:
|
|
||||||
self.logger.info("ansible import", extra=data)
|
|
||||||
|
|
||||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||||
data = self.base_data.copy()
|
data = {
|
||||||
data['ansible_type'] = "import"
|
'status': "NOT IMPORTED",
|
||||||
data['status'] = "NOT IMPORTED"
|
'host': self.hostname,
|
||||||
data['ansible_host'] = result._host.name
|
'session': self.session,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_type': "import",
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_playbook': self.playbook,
|
||||||
data['imported_file'] = missing_file
|
'ansible_host': result._host.name,
|
||||||
|
'missing_file': missing_file
|
||||||
if (self.ls_format_version == "v2"):
|
}
|
||||||
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
|
self.logger.info("ansible import", extra=data)
|
||||||
else:
|
|
||||||
self.logger.info("ansible import", extra=data)
|
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, **kwargs):
|
def v2_runner_on_failed(self, result, **kwargs):
|
||||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
data = {
|
||||||
|
'status': "FAILED",
|
||||||
data = self.base_data.copy()
|
'host': self.hostname,
|
||||||
if 'changed' in result._result.keys():
|
'session': self.session,
|
||||||
data['ansible_changed'] = result._result['changed']
|
'ansible_type': "task",
|
||||||
else:
|
'ansible_playbook': self.playbook,
|
||||||
data['ansible_changed'] = False
|
'ansible_host': result._host.name,
|
||||||
|
'ansible_task': result._task,
|
||||||
data['ansible_type'] = "task"
|
'ansible_result': self._dump_results(result._result)
|
||||||
data['status'] = "FAILED"
|
}
|
||||||
data['ansible_host'] = result._host.name
|
|
||||||
data['ansible_play_id'] = self.play_id
|
|
||||||
data['ansible_play_name'] = self.play_name
|
|
||||||
data['ansible_task'] = task_name
|
|
||||||
data['ansible_task_id'] = self.task_id
|
|
||||||
data['ansible_result'] = self._dump_results(result._result)
|
|
||||||
|
|
||||||
self.errors += 1
|
self.errors += 1
|
||||||
if (self.ls_format_version == "v2"):
|
self.logger.error("ansible failed", extra=data)
|
||||||
self.logger.error(
|
|
||||||
"TASK FAILED | %s | HOST | %s | RESULT | %s",
|
|
||||||
task_name, self.hostname,
|
|
||||||
self._dump_results(result._result), extra=data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger.error("ansible failed", extra=data)
|
|
||||||
|
|
||||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
data = {
|
||||||
|
'status': "UNREACHABLE",
|
||||||
data = self.base_data.copy()
|
'host': self.hostname,
|
||||||
data['ansible_type'] = "task"
|
'session': self.session,
|
||||||
data['status'] = "UNREACHABLE"
|
'ansible_type': "task",
|
||||||
data['ansible_host'] = result._host.name
|
'ansible_playbook': self.playbook,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_host': result._host.name,
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_task': result._task,
|
||||||
data['ansible_task'] = task_name
|
'ansible_result': self._dump_results(result._result)
|
||||||
data['ansible_task_id'] = self.task_id
|
}
|
||||||
data['ansible_result'] = self._dump_results(result._result)
|
self.logger.error("ansible unreachable", extra=data)
|
||||||
|
|
||||||
self.errors += 1
|
|
||||||
if (self.ls_format_version == "v2"):
|
|
||||||
self.logger.error(
|
|
||||||
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
|
|
||||||
task_name, self.hostname,
|
|
||||||
self._dump_results(result._result), extra=data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger.error("ansible unreachable", extra=data)
|
|
||||||
|
|
||||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
data = {
|
||||||
|
'status': "FAILED",
|
||||||
data = self.base_data.copy()
|
'host': self.hostname,
|
||||||
data['ansible_type'] = "task"
|
'session': self.session,
|
||||||
data['status'] = "FAILED"
|
'ansible_type': "task",
|
||||||
data['ansible_host'] = result._host.name
|
'ansible_playbook': self.playbook,
|
||||||
data['ansible_play_id'] = self.play_id
|
'ansible_host': result._host.name,
|
||||||
data['ansible_play_name'] = self.play_name
|
'ansible_task': result._task,
|
||||||
data['ansible_task'] = task_name
|
'ansible_result': self._dump_results(result._result)
|
||||||
data['ansible_task_id'] = self.task_id
|
}
|
||||||
data['ansible_result'] = self._dump_results(result._result)
|
|
||||||
|
|
||||||
self.errors += 1
|
self.errors += 1
|
||||||
if (self.ls_format_version == "v2"):
|
self.logger.error("ansible async", extra=data)
|
||||||
self.logger.error(
|
|
||||||
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
|
|
||||||
task_name, self.hostname,
|
|
||||||
self._dump_results(result._result), extra=data
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.logger.error("ansible async", extra=data)
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: mail
|
callback: mail
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends failure events via email
|
short_description: Sends failure events via email
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: nrdp
|
callback: nrdp
|
||||||
type: notification
|
type: notification
|
||||||
author: "Remi VERCHERE (@rverchere)"
|
author: "Remi VERCHERE (@rverchere)"
|
||||||
short_description: post task result to a nagios server through nrdp
|
short_description: post task result to a nagios server through nrdp
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: 'null'
|
callback: 'null'
|
||||||
type: stdout
|
type: stdout
|
||||||
requirements:
|
requirements:
|
||||||
- set as main display callback
|
- set as main display callback
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: say
|
callback: say
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelisting in configuration
|
- whitelisting in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: selective
|
callback: selective
|
||||||
type: stdout
|
type: stdout
|
||||||
requirements:
|
requirements:
|
||||||
- set as main display callback
|
- set as main display callback
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: slack
|
callback: slack
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: splunk
|
callback: splunk
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||||
@@ -57,17 +57,6 @@ DOCUMENTATION = '''
|
|||||||
type: bool
|
type: bool
|
||||||
default: true
|
default: true
|
||||||
version_added: '1.0.0'
|
version_added: '1.0.0'
|
||||||
include_milliseconds:
|
|
||||||
description: Whether to include milliseconds as part of the generated timestamp field in the event
|
|
||||||
sent to the Splunk HTTP collector
|
|
||||||
env:
|
|
||||||
- name: SPLUNK_INCLUDE_MILLISECONDS
|
|
||||||
ini:
|
|
||||||
- section: callback_splunk
|
|
||||||
key: include_milliseconds
|
|
||||||
type: bool
|
|
||||||
default: false
|
|
||||||
version_added: 2.0.0
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
@@ -107,7 +96,7 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||||
self.user = getpass.getuser()
|
self.user = getpass.getuser()
|
||||||
|
|
||||||
def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
|
def send_event(self, url, authtoken, validate_certs, state, result, runtime):
|
||||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||||
self.ansible_check_mode = True
|
self.ansible_check_mode = True
|
||||||
|
|
||||||
@@ -127,13 +116,8 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
data['uuid'] = result._task._uuid
|
data['uuid'] = result._task._uuid
|
||||||
data['session'] = self.session
|
data['session'] = self.session
|
||||||
data['status'] = state
|
data['status'] = state
|
||||||
|
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
|
||||||
if include_milliseconds:
|
'+0000')
|
||||||
time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
|
|
||||||
else:
|
|
||||||
time_format = '%Y-%m-%d %H:%M:%S +0000'
|
|
||||||
|
|
||||||
data['timestamp'] = datetime.utcnow().strftime(time_format)
|
|
||||||
data['host'] = self.host
|
data['host'] = self.host
|
||||||
data['ip_address'] = self.ip_address
|
data['ip_address'] = self.ip_address
|
||||||
data['user'] = self.user
|
data['user'] = self.user
|
||||||
@@ -174,7 +158,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url = None
|
self.url = None
|
||||||
self.authtoken = None
|
self.authtoken = None
|
||||||
self.validate_certs = None
|
self.validate_certs = None
|
||||||
self.include_milliseconds = None
|
|
||||||
self.splunk = SplunkHTTPCollectorSource()
|
self.splunk = SplunkHTTPCollectorSource()
|
||||||
|
|
||||||
def _runtime(self, result):
|
def _runtime(self, result):
|
||||||
@@ -210,8 +193,6 @@ class CallbackModule(CallbackBase):
|
|||||||
|
|
||||||
self.validate_certs = self.get_option('validate_certs')
|
self.validate_certs = self.get_option('validate_certs')
|
||||||
|
|
||||||
self.include_milliseconds = self.get_option('include_milliseconds')
|
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
self.splunk.ansible_playbook = basename(playbook._file_name)
|
self.splunk.ansible_playbook = basename(playbook._file_name)
|
||||||
|
|
||||||
@@ -226,7 +207,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
|
||||||
'OK',
|
'OK',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -237,7 +217,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
|
||||||
'SKIPPED',
|
'SKIPPED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -248,7 +227,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -259,7 +237,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -270,7 +247,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
|
||||||
'UNREACHABLE',
|
'UNREACHABLE',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
|
|||||||
71
plugins/callback/stderr.py
Normal file
71
plugins/callback/stderr.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# (c) 2017, Frederic Van Espen <github@freh.be>
|
||||||
|
# (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
author: Unknown (!UNKNOWN)
|
||||||
|
callback: stderr
|
||||||
|
type: stdout
|
||||||
|
requirements:
|
||||||
|
- set as main display callback
|
||||||
|
short_description: Splits output, sending failed tasks to stderr
|
||||||
|
deprecated:
|
||||||
|
why: The 'default' callback plugin now supports this functionality
|
||||||
|
removed_in: '2.0.0' # was Ansible 2.11
|
||||||
|
alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- default_callback
|
||||||
|
description:
|
||||||
|
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
|
||||||
|
- Also it does not output skipped host/task/item status
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackModule(CallbackModule_default):
|
||||||
|
|
||||||
|
'''
|
||||||
|
This is the stderr callback plugin, which reuses the default
|
||||||
|
callback plugin but sends error output to stderr.
|
||||||
|
'''
|
||||||
|
|
||||||
|
CALLBACK_VERSION = 2.0
|
||||||
|
CALLBACK_TYPE = 'stdout'
|
||||||
|
CALLBACK_NAME = 'community.general.stderr'
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self.super_ref = super(CallbackModule, self)
|
||||||
|
self.super_ref.__init__()
|
||||||
|
|
||||||
|
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||||
|
|
||||||
|
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||||
|
self._clean_results(result._result, result._task.action)
|
||||||
|
|
||||||
|
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
||||||
|
self._print_task_banner(result._task)
|
||||||
|
|
||||||
|
self._handle_exception(result._result, use_stderr=True)
|
||||||
|
self._handle_warnings(result._result)
|
||||||
|
|
||||||
|
if result._task.loop and 'results' in result._result:
|
||||||
|
self._process_items(result)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if delegated_vars:
|
||||||
|
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
|
||||||
|
self._dump_results(result._result)), color=C.COLOR_ERROR,
|
||||||
|
stderr=True)
|
||||||
|
else:
|
||||||
|
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
|
||||||
|
color=C.COLOR_ERROR, stderr=True)
|
||||||
|
|
||||||
|
if ignore_errors:
|
||||||
|
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: sumologic
|
callback: sumologic
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends task result events to Sumologic
|
short_description: Sends task result events to Sumologic
|
||||||
author: "Ryan Currah (@ryancurrah)"
|
author: "Ryan Currah (@ryancurrah)"
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: syslog_json
|
callback: syslog_json
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: unixy
|
callback: unixy
|
||||||
type: stdout
|
type: stdout
|
||||||
author: Allyson Bowles (@akatch)
|
author: Allyson Bowles (@akatch)
|
||||||
short_description: condensed Ansible output
|
short_description: condensed Ansible output
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: yaml
|
callback: yaml
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: yaml-ized Ansible screen output
|
short_description: yaml-ized Ansible screen output
|
||||||
description:
|
description:
|
||||||
@@ -50,7 +50,7 @@ def my_represent_scalar(self, tag, value, style=None):
|
|||||||
# ...no trailing space
|
# ...no trailing space
|
||||||
value = value.rstrip()
|
value = value.rstrip()
|
||||||
# ...and non-printable characters
|
# ...and non-printable characters
|
||||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
value = ''.join(x for x in value if x in string.printable)
|
||||||
# ...tabs prevent blocks from expanding
|
# ...tabs prevent blocks from expanding
|
||||||
value = value.expandtabs()
|
value = value.expandtabs()
|
||||||
# ...and odd bits of whitespace
|
# ...and odd bits of whitespace
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
||||||
name: chroot
|
connection: chroot
|
||||||
short_description: Interact with local chroot
|
short_description: Interact with local chroot
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
||||||
|
|||||||
364
plugins/connection/docker.py
Normal file
364
plugins/connection/docker.py
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
# Based on the chroot connection plugin by Maykel Moya
|
||||||
|
#
|
||||||
|
# (c) 2014, Lorin Hochstein
|
||||||
|
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||||
|
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
# Copyright (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
author:
|
||||||
|
- Lorin Hochestein (!UNKNOWN)
|
||||||
|
- Leendert Brouwer (!UNKNOWN)
|
||||||
|
connection: docker
|
||||||
|
short_description: Run tasks in docker containers
|
||||||
|
description:
|
||||||
|
- Run commands or put/fetch files to an existing docker container.
|
||||||
|
options:
|
||||||
|
remote_user:
|
||||||
|
description:
|
||||||
|
- The user to execute as inside the container
|
||||||
|
vars:
|
||||||
|
- name: ansible_user
|
||||||
|
- name: ansible_docker_user
|
||||||
|
docker_extra_args:
|
||||||
|
description:
|
||||||
|
- Extra arguments to pass to the docker command line
|
||||||
|
default: ''
|
||||||
|
remote_addr:
|
||||||
|
description:
|
||||||
|
- The name of the container you want to access.
|
||||||
|
default: inventory_hostname
|
||||||
|
vars:
|
||||||
|
- name: ansible_host
|
||||||
|
- name: ansible_docker_host
|
||||||
|
'''
|
||||||
|
|
||||||
|
import distutils.spawn
|
||||||
|
import fcntl
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
|
||||||
|
from distutils.version import LooseVersion
|
||||||
|
|
||||||
|
import ansible.constants as C
|
||||||
|
from ansible.compat import selectors
|
||||||
|
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||||
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
|
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||||
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
''' Local docker based connections '''
|
||||||
|
|
||||||
|
transport = 'community.general.docker'
|
||||||
|
has_pipelining = True
|
||||||
|
|
||||||
|
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||||
|
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||||
|
|
||||||
|
# Note: docker supports running as non-root in some configurations.
|
||||||
|
# (For instance, setting the UNIX socket file to be readable and
|
||||||
|
# writable by a specific UNIX group and then putting users into that
|
||||||
|
# group). Therefore we don't check that the user is root when using
|
||||||
|
# this connection. But if the user is getting a permission denied
|
||||||
|
# error it probably means that docker on their system is only
|
||||||
|
# configured to be connected to by root and they are not running as
|
||||||
|
# root.
|
||||||
|
|
||||||
|
# Windows uses Powershell modules
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||||
|
|
||||||
|
if 'docker_command' in kwargs:
|
||||||
|
self.docker_cmd = kwargs['docker_command']
|
||||||
|
else:
|
||||||
|
self.docker_cmd = distutils.spawn.find_executable('docker')
|
||||||
|
if not self.docker_cmd:
|
||||||
|
raise AnsibleError("docker command not found in PATH")
|
||||||
|
|
||||||
|
docker_version = self._get_docker_version()
|
||||||
|
if docker_version == u'dev':
|
||||||
|
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||||
|
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
|
||||||
|
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||||
|
|
||||||
|
# The remote user we will request from docker (if supported)
|
||||||
|
self.remote_user = None
|
||||||
|
# The actual user which will execute commands in docker (if known)
|
||||||
|
self.actual_user = None
|
||||||
|
|
||||||
|
if self._play_context.remote_user is not None:
|
||||||
|
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
|
||||||
|
# Support for specifying the exec user was added in docker 1.7
|
||||||
|
self.remote_user = self._play_context.remote_user
|
||||||
|
self.actual_user = self.remote_user
|
||||||
|
else:
|
||||||
|
self.actual_user = self._get_docker_remote_user()
|
||||||
|
|
||||||
|
if self.actual_user != self._play_context.remote_user:
|
||||||
|
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||||
|
.format(docker_version, self.actual_user or u'?'))
|
||||||
|
elif self._display.verbosity > 2:
|
||||||
|
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||||
|
# Only do this if display verbosity is high enough that we'll need the value
|
||||||
|
# This saves overhead from calling into docker when we don't need to
|
||||||
|
self.actual_user = self._get_docker_remote_user()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _sanitize_version(version):
|
||||||
|
return re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||||
|
|
||||||
|
def _old_docker_version(self):
|
||||||
|
cmd_args = []
|
||||||
|
if self._play_context.docker_extra_args:
|
||||||
|
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||||
|
|
||||||
|
old_version_subcommand = ['version']
|
||||||
|
|
||||||
|
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||||
|
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
cmd_output, err = p.communicate()
|
||||||
|
|
||||||
|
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||||
|
|
||||||
|
def _new_docker_version(self):
|
||||||
|
# no result yet, must be newer Docker version
|
||||||
|
cmd_args = []
|
||||||
|
if self._play_context.docker_extra_args:
|
||||||
|
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||||
|
|
||||||
|
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||||
|
|
||||||
|
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||||
|
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
cmd_output, err = p.communicate()
|
||||||
|
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||||
|
|
||||||
|
def _get_docker_version(self):
|
||||||
|
|
||||||
|
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||||
|
if returncode == 0:
|
||||||
|
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||||
|
if line.startswith(u'Server version:'): # old docker versions
|
||||||
|
return self._sanitize_version(line.split()[2])
|
||||||
|
|
||||||
|
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||||
|
if returncode:
|
||||||
|
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||||
|
|
||||||
|
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||||
|
|
||||||
|
def _get_docker_remote_user(self):
|
||||||
|
""" Get the default user configured in the docker container """
|
||||||
|
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
out, err = p.communicate()
|
||||||
|
out = to_text(out, errors='surrogate_or_strict')
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||||
|
return None
|
||||||
|
|
||||||
|
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||||
|
return out.strip() or u'root'
|
||||||
|
|
||||||
|
def _build_exec_cmd(self, cmd):
|
||||||
|
""" Build the local docker exec command to run cmd on remote_host
|
||||||
|
|
||||||
|
If remote_user is available and is supported by the docker
|
||||||
|
version we are using, it will be provided to docker exec.
|
||||||
|
"""
|
||||||
|
|
||||||
|
local_cmd = [self.docker_cmd]
|
||||||
|
|
||||||
|
if self._play_context.docker_extra_args:
|
||||||
|
local_cmd += self._play_context.docker_extra_args.split(' ')
|
||||||
|
|
||||||
|
local_cmd += [b'exec']
|
||||||
|
|
||||||
|
if self.remote_user is not None:
|
||||||
|
local_cmd += [b'-u', self.remote_user]
|
||||||
|
|
||||||
|
# -i is needed to keep stdin open which allows pipelining to work
|
||||||
|
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
|
||||||
|
|
||||||
|
return local_cmd
|
||||||
|
|
||||||
|
def _connect(self, port=None):
|
||||||
|
""" Connect to the container. Nothing to do """
|
||||||
|
super(Connection, self)._connect()
|
||||||
|
if not self._connected:
|
||||||
|
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||||
|
self.actual_user or u'?'), host=self._play_context.remote_addr
|
||||||
|
)
|
||||||
|
self._connected = True
|
||||||
|
|
||||||
|
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||||
|
""" Run a command on the docker host """
|
||||||
|
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||||
|
|
||||||
|
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||||
|
|
||||||
|
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
|
||||||
|
display.debug("opening command with Popen()")
|
||||||
|
|
||||||
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
|
|
||||||
|
p = subprocess.Popen(
|
||||||
|
local_cmd,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
display.debug("done running command with Popen()")
|
||||||
|
|
||||||
|
if self.become and self.become.expect_prompt() and sudoable:
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
selector = selectors.DefaultSelector()
|
||||||
|
selector.register(p.stdout, selectors.EVENT_READ)
|
||||||
|
selector.register(p.stderr, selectors.EVENT_READ)
|
||||||
|
|
||||||
|
become_output = b''
|
||||||
|
try:
|
||||||
|
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||||
|
events = selector.select(self._play_context.timeout)
|
||||||
|
if not events:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||||
|
|
||||||
|
for key, event in events:
|
||||||
|
if key.fileobj == p.stdout:
|
||||||
|
chunk = p.stdout.read()
|
||||||
|
elif key.fileobj == p.stderr:
|
||||||
|
chunk = p.stderr.read()
|
||||||
|
|
||||||
|
if not chunk:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||||
|
become_output += chunk
|
||||||
|
finally:
|
||||||
|
selector.close()
|
||||||
|
|
||||||
|
if not self.become.check_success(become_output):
|
||||||
|
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||||
|
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
|
||||||
|
display.debug("getting output with communicate()")
|
||||||
|
stdout, stderr = p.communicate(in_data)
|
||||||
|
display.debug("done communicating")
|
||||||
|
|
||||||
|
display.debug("done with docker.exec_command()")
|
||||||
|
return (p.returncode, stdout, stderr)
|
||||||
|
|
||||||
|
def _prefix_login_path(self, remote_path):
|
||||||
|
''' Make sure that we put files into a standard path
|
||||||
|
|
||||||
|
If a path is relative, then we need to choose where to put it.
|
||||||
|
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||||
|
exist in any given chroot. So for now we're choosing "/" instead.
|
||||||
|
This also happens to be the former default.
|
||||||
|
|
||||||
|
Can revisit using $HOME instead if it's a problem
|
||||||
|
'''
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
import ntpath
|
||||||
|
return ntpath.normpath(remote_path)
|
||||||
|
else:
|
||||||
|
if not remote_path.startswith(os.path.sep):
|
||||||
|
remote_path = os.path.join(os.path.sep, remote_path)
|
||||||
|
return os.path.normpath(remote_path)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
""" Transfer a file from local to docker container """
|
||||||
|
super(Connection, self).put_file(in_path, out_path)
|
||||||
|
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||||
|
|
||||||
|
out_path = self._prefix_login_path(out_path)
|
||||||
|
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||||
|
raise AnsibleFileNotFound(
|
||||||
|
"file or module does not exist: %s" % to_native(in_path))
|
||||||
|
|
||||||
|
out_path = shlex_quote(out_path)
|
||||||
|
# Older docker doesn't have native support for copying files into
|
||||||
|
# running containers, so we use docker exec to implement this
|
||||||
|
# Although docker version 1.8 and later provide support, the
|
||||||
|
# owner and group of the files are always set to root
|
||||||
|
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||||
|
if not os.fstat(in_file.fileno()).st_size:
|
||||||
|
count = ' count=0'
|
||||||
|
else:
|
||||||
|
count = ''
|
||||||
|
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||||
|
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||||
|
try:
|
||||||
|
p = subprocess.Popen(args, stdin=in_file,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
except OSError:
|
||||||
|
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||||
|
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
""" Fetch a file from container to local. """
|
||||||
|
super(Connection, self).fetch_file(in_path, out_path)
|
||||||
|
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||||
|
|
||||||
|
in_path = self._prefix_login_path(in_path)
|
||||||
|
# out_path is the final file path, but docker takes a directory, not a
|
||||||
|
# file path
|
||||||
|
out_dir = os.path.dirname(out_path)
|
||||||
|
|
||||||
|
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||||
|
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||||
|
|
||||||
|
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
p.communicate()
|
||||||
|
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
import ntpath
|
||||||
|
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||||
|
else:
|
||||||
|
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
# Older docker doesn't have native support for fetching files command `cp`
|
||||||
|
# If `cp` fails, try to use `dd` instead
|
||||||
|
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||||
|
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||||
|
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||||
|
try:
|
||||||
|
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||||
|
stdout=out_file, stderr=subprocess.PIPE)
|
||||||
|
except OSError:
|
||||||
|
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||||
|
|
||||||
|
# Rename if needed
|
||||||
|
if actual_out_path != out_path:
|
||||||
|
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
""" Terminate the connection. Nothing to do for Docker"""
|
||||||
|
super(Connection, self).close()
|
||||||
|
self._connected = False
|
||||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Michael Scherer (@msherer) <misc@zarb.org>
|
author: Michael Scherer (@msherer) <misc@zarb.org>
|
||||||
name: funcd
|
connection: funcd
|
||||||
short_description: Use funcd to connect to target
|
short_description: Use funcd to connect to target
|
||||||
description:
|
description:
|
||||||
- This transport permits you to use Ansible over Func.
|
- This transport permits you to use Ansible over Func.
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
||||||
name: iocage
|
connection: iocage
|
||||||
short_description: Run tasks in iocage jails
|
short_description: Run tasks in iocage jails
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing iocage jail
|
- Run commands or put/fetch files to an existing iocage jail
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Ansible Core Team
|
author: Ansible Core Team
|
||||||
name: jail
|
connection: jail
|
||||||
short_description: Run tasks in jails
|
short_description: Run tasks in jails
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing jail
|
- Run commands or put/fetch files to an existing jail
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
||||||
name: lxc
|
connection: lxc
|
||||||
short_description: Run tasks in lxc containers via lxc python library
|
short_description: Run tasks in lxc containers via lxc python library
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Matt Clay (@mattclay) <matt@mystile.com>
|
author: Matt Clay (@mattclay) <matt@mystile.com>
|
||||||
name: lxd
|
connection: lxd
|
||||||
short_description: Run tasks in lxc containers via lxc CLI
|
short_description: Run tasks in lxc containers via lxc CLI
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
||||||
options:
|
options:
|
||||||
remote_addr:
|
remote_addr:
|
||||||
description:
|
description:
|
||||||
- Container identifier.
|
- Container identifier
|
||||||
default: inventory_hostname
|
default: inventory_hostname
|
||||||
vars:
|
vars:
|
||||||
- name: ansible_host
|
- name: ansible_host
|
||||||
@@ -26,19 +26,6 @@ DOCUMENTATION = '''
|
|||||||
vars:
|
vars:
|
||||||
- name: ansible_executable
|
- name: ansible_executable
|
||||||
- name: ansible_lxd_executable
|
- name: ansible_lxd_executable
|
||||||
remote:
|
|
||||||
description:
|
|
||||||
- Name of the LXD remote to use.
|
|
||||||
default: local
|
|
||||||
vars:
|
|
||||||
- name: ansible_lxd_remote
|
|
||||||
version_added: 2.0.0
|
|
||||||
project:
|
|
||||||
description:
|
|
||||||
- Name of the LXD project to use.
|
|
||||||
vars:
|
|
||||||
- name: ansible_lxd_project
|
|
||||||
version_added: 2.0.0
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -83,15 +70,7 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd]
|
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
|
||||||
if self.get_option("project"):
|
|
||||||
local_cmd.extend(["--project", self.get_option("project")])
|
|
||||||
local_cmd.extend([
|
|
||||||
"exec",
|
|
||||||
"%s:%s" % (self.get_option("remote"), self._host),
|
|
||||||
"--",
|
|
||||||
self._play_context.executable, "-c", cmd
|
|
||||||
])
|
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
||||||
@@ -119,14 +98,7 @@ class Connection(ConnectionBase):
|
|||||||
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||||
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd]
|
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
|
||||||
if self.get_option("project"):
|
|
||||||
local_cmd.extend(["--project", self.get_option("project")])
|
|
||||||
local_cmd.extend([
|
|
||||||
"file", "push",
|
|
||||||
in_path,
|
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
|
||||||
])
|
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
|
|
||||||
@@ -139,14 +111,7 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd]
|
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
|
||||||
if self.get_option("project"):
|
|
||||||
local_cmd.extend(["--project", self.get_option("project")])
|
|
||||||
local_cmd.extend([
|
|
||||||
"file", "pull",
|
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
|
||||||
out_path
|
|
||||||
])
|
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
|
|
||||||
|
|||||||
173
plugins/connection/oc.py
Normal file
173
plugins/connection/oc.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
# Based on the docker connection plugin
|
||||||
|
#
|
||||||
|
# Connection plugin for configuring kubernetes containers with kubectl
|
||||||
|
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
author:
|
||||||
|
- xuxinkun (!UNKNOWN)
|
||||||
|
|
||||||
|
connection: oc
|
||||||
|
|
||||||
|
short_description: Execute tasks in pods running on OpenShift.
|
||||||
|
|
||||||
|
description:
|
||||||
|
- Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
|
||||||
|
container platform.
|
||||||
|
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- oc (go binary)
|
||||||
|
|
||||||
|
options:
|
||||||
|
oc_pod:
|
||||||
|
description:
|
||||||
|
- Pod name. Required when the host name does not match pod name.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_pod
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_POD
|
||||||
|
oc_container:
|
||||||
|
description:
|
||||||
|
- Container name. Required when a pod contains more than one container.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_container
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_CONTAINER
|
||||||
|
oc_namespace:
|
||||||
|
description:
|
||||||
|
- The namespace of the pod
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_namespace
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_NAMESPACE
|
||||||
|
oc_extra_args:
|
||||||
|
description:
|
||||||
|
- Extra arguments to pass to the oc command line.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_extra_args
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_EXTRA_ARGS
|
||||||
|
oc_kubeconfig:
|
||||||
|
description:
|
||||||
|
- Path to a oc config file. Defaults to I(~/.kube/conig)
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_kubeconfig
|
||||||
|
- name: ansible_oc_config
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_KUBECONFIG
|
||||||
|
oc_context:
|
||||||
|
description:
|
||||||
|
- The name of a context found in the K8s config file.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_context
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_CONTEXT
|
||||||
|
oc_host:
|
||||||
|
description:
|
||||||
|
- URL for accessing the API.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_host
|
||||||
|
- name: ansible_oc_server
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_HOST
|
||||||
|
- name: K8S_AUTH_SERVER
|
||||||
|
oc_token:
|
||||||
|
description:
|
||||||
|
- API authentication bearer token.
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_token
|
||||||
|
- name: ansible_oc_api_key
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_TOKEN
|
||||||
|
- name: K8S_AUTH_API_KEY
|
||||||
|
client_cert:
|
||||||
|
description:
|
||||||
|
- Path to a certificate used to authenticate with the API.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_cert_file
|
||||||
|
- name: ansible_oc_client_cert
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_CERT_FILE
|
||||||
|
aliases: [ oc_cert_file ]
|
||||||
|
client_key:
|
||||||
|
description:
|
||||||
|
- Path to a key file used to authenticate with the API.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_key_file
|
||||||
|
- name: ansible_oc_client_key
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_KEY_FILE
|
||||||
|
aliases: [ oc_key_file ]
|
||||||
|
ca_cert:
|
||||||
|
description:
|
||||||
|
- Path to a CA certificate used to authenticate with the API.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_ssl_ca_cert
|
||||||
|
- name: ansible_oc_ca_cert
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_SSL_CA_CERT
|
||||||
|
aliases: [ oc_ssl_ca_cert ]
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_oc_verify_ssl
|
||||||
|
- name: ansible_oc_validate_certs
|
||||||
|
env:
|
||||||
|
- name: K8S_AUTH_VERIFY_SSL
|
||||||
|
aliases: [ oc_verify_ssl ]
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
|
||||||
|
|
||||||
|
|
||||||
|
CONNECTION_TRANSPORT = 'community.general.oc'
|
||||||
|
|
||||||
|
CONNECTION_OPTIONS = {
|
||||||
|
'oc_container': '-c',
|
||||||
|
'oc_namespace': '-n',
|
||||||
|
'oc_kubeconfig': '--config',
|
||||||
|
'oc_context': '--context',
|
||||||
|
'oc_host': '--server',
|
||||||
|
'client_cert': '--client-certificate',
|
||||||
|
'client_key': '--client-key',
|
||||||
|
'ca_cert': '--certificate-authority',
|
||||||
|
'validate_certs': '--insecure-skip-tls-verify',
|
||||||
|
'oc_token': '--token'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(KubectlConnection):
|
||||||
|
''' Local oc based connections '''
|
||||||
|
transport = CONNECTION_TRANSPORT
|
||||||
|
connection_options = CONNECTION_OPTIONS
|
||||||
|
documentation = DOCUMENTATION
|
||||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: qubes
|
connection: qubes
|
||||||
short_description: Interact with an existing QubesOS AppVM
|
short_description: Interact with an existing QubesOS AppVM
|
||||||
|
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
||||||
name: saltstack
|
connection: saltstack
|
||||||
short_description: Allow ansible to piggyback on salt minions
|
short_description: Allow ansible to piggyback on salt minions
|
||||||
description:
|
description:
|
||||||
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Ansible Core Team
|
author: Ansible Core Team
|
||||||
name: zone
|
connection: zone
|
||||||
short_description: Run tasks in a zone instance
|
short_description: Run tasks in a zone instance
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing zone
|
- Run commands or put/fetch files to an existing zone
|
||||||
|
|||||||
62
plugins/doc_fragments/_gcp.py
Normal file
62
plugins/doc_fragments/_gcp.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2018, Google Inc.
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
# GCP doc fragment.
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- The Google Cloud Platform project to use.
|
||||||
|
type: str
|
||||||
|
auth_kind:
|
||||||
|
description:
|
||||||
|
- The type of credential used.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
choices: [ application, machineaccount, serviceaccount ]
|
||||||
|
service_account_contents:
|
||||||
|
description:
|
||||||
|
- The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
|
||||||
|
type: jsonarg
|
||||||
|
service_account_file:
|
||||||
|
description:
|
||||||
|
- The path of a Service Account JSON file if serviceaccount is selected as type.
|
||||||
|
type: path
|
||||||
|
service_account_email:
|
||||||
|
description:
|
||||||
|
- An optional service account email address if machineaccount is selected
|
||||||
|
and the user does not wish to use the default email.
|
||||||
|
type: str
|
||||||
|
scopes:
|
||||||
|
description:
|
||||||
|
- Array of scopes to be used.
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
env_type:
|
||||||
|
description:
|
||||||
|
- Specifies which Ansible environment you're running this module within.
|
||||||
|
- This should not be set unless you know what you're doing.
|
||||||
|
- This only alters the User Agent string for any API requests.
|
||||||
|
type: str
|
||||||
|
notes:
|
||||||
|
- for authentication, you can set service_account_file using the
|
||||||
|
c(gcp_service_account_file) env variable.
|
||||||
|
- for authentication, you can set service_account_contents using the
|
||||||
|
c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
|
||||||
|
- For authentication, you can set service_account_email using the
|
||||||
|
C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
|
||||||
|
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
|
||||||
|
variable.
|
||||||
|
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
|
||||||
|
- Environment variables values will only be used if the playbook values are
|
||||||
|
not set.
|
||||||
|
- The I(service_account_email) and I(service_account_file) options are
|
||||||
|
mutually exclusive.
|
||||||
|
'''
|
||||||
136
plugins/doc_fragments/docker.py
Normal file
136
plugins/doc_fragments/docker.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Docker doc fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
|
||||||
|
options:
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||||
|
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||||
|
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: unix://var/run/docker.sock
|
||||||
|
aliases: [ docker_url ]
|
||||||
|
tls_hostname:
|
||||||
|
description:
|
||||||
|
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||||
|
be used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: localhost
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: auto
|
||||||
|
aliases: [ docker_api_version ]
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: int
|
||||||
|
default: 60
|
||||||
|
ca_cert:
|
||||||
|
description:
|
||||||
|
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_ca_cert, cacert_path ]
|
||||||
|
client_cert:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_client_cert, cert_path ]
|
||||||
|
client_key:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS key file.
|
||||||
|
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_client_key, key_path ]
|
||||||
|
ssl_version:
|
||||||
|
description:
|
||||||
|
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||||
|
used instead.
|
||||||
|
type: str
|
||||||
|
tls:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||||
|
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
aliases: [ tls_verify ]
|
||||||
|
debug:
|
||||||
|
description:
|
||||||
|
- Debug mode
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||||
|
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||||
|
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||||
|
with the product that sets up the environment. It will set these variables for you. See
|
||||||
|
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||||
|
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||||
|
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||||
|
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||||
|
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||||
|
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||||
|
|
||||||
|
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||||
|
options: {}
|
||||||
|
requirements:
|
||||||
|
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||||
|
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||||
|
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||||
|
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||||
|
install the C(docker) Python module. Note that both modules should *not*
|
||||||
|
be installed at the same time. Also note that when both modules are installed
|
||||||
|
and one of them is uninstalled, the other might no longer function and a
|
||||||
|
reinstall of it is required."
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||||
|
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||||
|
|
||||||
|
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||||
|
options: {}
|
||||||
|
requirements:
|
||||||
|
- "Python >= 2.7"
|
||||||
|
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||||
|
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||||
|
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||||
|
This module does *not* work with docker-py."
|
||||||
|
'''
|
||||||
23
plugins/doc_fragments/hetzner.py
Normal file
23
plugins/doc_fragments/hetzner.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Standard files documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
hetzner_user:
|
||||||
|
description: The username for the Robot webservice user.
|
||||||
|
type: str
|
||||||
|
required: yes
|
||||||
|
hetzner_password:
|
||||||
|
description: The password for the Robot webservice user.
|
||||||
|
type: str
|
||||||
|
required: yes
|
||||||
|
'''
|
||||||
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
resource_definition:
|
||||||
|
description:
|
||||||
|
- "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
|
||||||
|
resource parameters not covered by this module's parameters."
|
||||||
|
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
|
||||||
|
I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
|
||||||
|
aliases:
|
||||||
|
- definition
|
||||||
|
- inline
|
||||||
|
type: dict
|
||||||
|
wait:
|
||||||
|
description:
|
||||||
|
- "I(True) if the module should wait for the resource to get into desired state."
|
||||||
|
type: bool
|
||||||
|
default: yes
|
||||||
|
force:
|
||||||
|
description:
|
||||||
|
- If set to C(no), and I(state) is C(present), an existing object will be replaced.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
wait_timeout:
|
||||||
|
description:
|
||||||
|
- The amount of time in seconds the module should wait for the resource to get into desired state.
|
||||||
|
type: int
|
||||||
|
default: 120
|
||||||
|
wait_sleep:
|
||||||
|
description:
|
||||||
|
- Number of seconds to sleep between checks.
|
||||||
|
default: 5
|
||||||
|
version_added: '0.2.0'
|
||||||
|
memory:
|
||||||
|
description:
|
||||||
|
- The amount of memory to be requested by virtual machine.
|
||||||
|
- For example 1024Mi.
|
||||||
|
type: str
|
||||||
|
memory_limit:
|
||||||
|
description:
|
||||||
|
- The maximum memory to be used by virtual machine.
|
||||||
|
- For example 1024Mi.
|
||||||
|
type: str
|
||||||
|
machine_type:
|
||||||
|
description:
|
||||||
|
- QEMU machine type is the actual chipset of the virtual machine.
|
||||||
|
type: str
|
||||||
|
merge_type:
|
||||||
|
description:
|
||||||
|
- Whether to override the default patch merge approach with a specific type.
|
||||||
|
- If more than one merge type is given, the merge types will be tried in order.
|
||||||
|
- "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
|
||||||
|
on resource kinds that combine Custom Resources and built-in resources, as
|
||||||
|
Custom Resource Definitions typically aren't updatable by the usual strategic merge."
|
||||||
|
- "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
|
||||||
|
type: list
|
||||||
|
choices: [ json, merge, strategic-merge ]
|
||||||
|
cpu_shares:
|
||||||
|
description:
|
||||||
|
- "Specify CPU shares."
|
||||||
|
type: int
|
||||||
|
cpu_limit:
|
||||||
|
description:
|
||||||
|
- "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
|
||||||
|
every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
|
||||||
|
type: int
|
||||||
|
cpu_cores:
|
||||||
|
description:
|
||||||
|
- "Number of CPU cores."
|
||||||
|
type: int
|
||||||
|
cpu_model:
|
||||||
|
description:
|
||||||
|
- "CPU model."
|
||||||
|
- "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
|
||||||
|
- "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
|
||||||
|
- "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
|
||||||
|
- "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
|
||||||
|
type: str
|
||||||
|
bootloader:
|
||||||
|
description:
|
||||||
|
- "Specify the bootloader of the virtual machine."
|
||||||
|
- "All virtual machines use BIOS by default for booting."
|
||||||
|
type: str
|
||||||
|
smbios_uuid:
|
||||||
|
description:
|
||||||
|
- "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
|
||||||
|
type: str
|
||||||
|
cpu_features:
|
||||||
|
description:
|
||||||
|
- "List of dictionary to fine-tune features provided by the selected CPU model."
|
||||||
|
- "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
|
||||||
|
- "I(Note): In case a policy is omitted for a feature, it will default to require."
|
||||||
|
- "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
|
||||||
|
type: list
|
||||||
|
headless:
|
||||||
|
description:
|
||||||
|
- "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
|
||||||
|
- "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
|
||||||
|
compatible and comes with a memory size of 16 MB."
|
||||||
|
hugepage_size:
|
||||||
|
description:
|
||||||
|
- "Specify huge page size."
|
||||||
|
type: str
|
||||||
|
tablets:
|
||||||
|
description:
|
||||||
|
- "Specify tablets to be used as input devices"
|
||||||
|
type: list
|
||||||
|
hostname:
|
||||||
|
description:
|
||||||
|
- "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
|
||||||
|
name will be used."
|
||||||
|
subdomain:
|
||||||
|
description:
|
||||||
|
- "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
|
||||||
|
the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
|
||||||
|
itself can pick up a hostname."
|
||||||
|
requirements:
|
||||||
|
- python >= 2.7
|
||||||
|
- openshift >= 0.8.2
|
||||||
|
notes:
|
||||||
|
- "In order to use this module you have to install Openshift Python SDK.
|
||||||
|
To ensure it's installed with correct version you can create the following task:
|
||||||
|
I(pip: name=openshift>=0.8.2)"
|
||||||
|
'''
|
||||||
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Standard oVirt documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
disks:
|
||||||
|
description:
|
||||||
|
- List of dictionaries which specify disks of the virtual machine.
|
||||||
|
- "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
|
||||||
|
- "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
|
||||||
|
- Each disk must have specified a I(volume) that declares which volume type of the disk
|
||||||
|
All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
|
||||||
|
type: list
|
||||||
|
labels:
|
||||||
|
description:
|
||||||
|
- Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
|
||||||
|
specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
|
||||||
|
imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
|
||||||
|
Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
|
||||||
|
- More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
|
||||||
|
type: dict
|
||||||
|
interfaces:
|
||||||
|
description:
|
||||||
|
- An interface defines a virtual network interface of a virtual machine (also called a frontend).
|
||||||
|
- All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
|
||||||
|
- Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
|
||||||
|
All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
|
||||||
|
type: list
|
||||||
|
cloud_init_nocloud:
|
||||||
|
description:
|
||||||
|
- "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
|
||||||
|
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
||||||
|
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
||||||
|
type: dict
|
||||||
|
affinity:
|
||||||
|
description:
|
||||||
|
- "Describes node affinity scheduling rules for the vm."
|
||||||
|
type: dict
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
|
||||||
|
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
|
||||||
|
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
|
||||||
|
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
|
||||||
|
C(term); the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||||
|
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
|
||||||
|
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
|
||||||
|
each C(term) are intersected, i.e. all terms must be satisfied."
|
||||||
|
type: dict
|
||||||
|
node_affinity:
|
||||||
|
description:
|
||||||
|
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
|
||||||
|
type: dict
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
|
||||||
|
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
|
||||||
|
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
|
||||||
|
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
|
||||||
|
match_expressions; the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||||
|
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
|
||||||
|
may or may not try to eventually evict the vm from its node."
|
||||||
|
type: dict
|
||||||
|
anti_affinity:
|
||||||
|
description:
|
||||||
|
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
|
||||||
|
type: dict
|
||||||
|
suboptions:
|
||||||
|
soft:
|
||||||
|
description:
|
||||||
|
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
|
||||||
|
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
|
||||||
|
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
|
||||||
|
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
|
||||||
|
the corresponding C(term); the nodes with the highest sum are the most preferred."
|
||||||
|
type: dict
|
||||||
|
hard:
|
||||||
|
description:
|
||||||
|
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
|
||||||
|
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
|
||||||
|
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
|
||||||
|
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
|
||||||
|
type: dict
|
||||||
|
'''
|
||||||
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
|
|||||||
options:
|
options:
|
||||||
bind_dn:
|
bind_dn:
|
||||||
description:
|
description:
|
||||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
|
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
|
||||||
- If this is blank, we'll use an anonymous bind.
|
- If this is blank, we'll use an anonymous bind.
|
||||||
type: str
|
type: str
|
||||||
bind_pw:
|
bind_pw:
|
||||||
@@ -27,15 +27,6 @@ options:
|
|||||||
description:
|
description:
|
||||||
- The DN of the entry to add or remove.
|
- The DN of the entry to add or remove.
|
||||||
type: str
|
type: str
|
||||||
referrals_chasing:
|
|
||||||
choices: [disabled, anonymous]
|
|
||||||
default: anonymous
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Set the referrals chasing behavior.
|
|
||||||
- C(anonymous) follow referrals anonymously. This is the default behavior.
|
|
||||||
- C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
|
|
||||||
version_added: 2.0.0
|
|
||||||
server_uri:
|
server_uri:
|
||||||
description:
|
description:
|
||||||
- A URI to the LDAP server.
|
- A URI to the LDAP server.
|
||||||
@@ -53,12 +44,4 @@ options:
|
|||||||
- This should only be used on sites using self-signed certificates.
|
- This should only be used on sites using self-signed certificates.
|
||||||
type: bool
|
type: bool
|
||||||
default: yes
|
default: yes
|
||||||
sasl_class:
|
|
||||||
description:
|
|
||||||
- The class to use for SASL authentication.
|
|
||||||
- possible choices are C(external), C(gssapi).
|
|
||||||
type: str
|
|
||||||
choices: ['external', 'gssapi']
|
|
||||||
default: external
|
|
||||||
version_added: "2.0.0"
|
|
||||||
'''
|
'''
|
||||||
|
|||||||
@@ -78,24 +78,6 @@ options:
|
|||||||
variable.
|
variable.
|
||||||
type: int
|
type: int
|
||||||
default: 1000
|
default: 1000
|
||||||
http_pool_connections:
|
|
||||||
description:
|
|
||||||
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
http_pool_maxsize:
|
|
||||||
description:
|
|
||||||
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
silent_ssl_warnings:
|
|
||||||
description:
|
|
||||||
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
|
||||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
notes:
|
notes:
|
||||||
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
||||||
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
||||||
|
|||||||
62
plugins/doc_fragments/postgres.py
Normal file
62
plugins/doc_fragments/postgres.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
# Postgres documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
login_user:
|
||||||
|
description:
|
||||||
|
- The username used to authenticate with.
|
||||||
|
type: str
|
||||||
|
default: postgres
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- The password used to authenticate with.
|
||||||
|
type: str
|
||||||
|
login_host:
|
||||||
|
description:
|
||||||
|
- Host running the database.
|
||||||
|
type: str
|
||||||
|
login_unix_socket:
|
||||||
|
description:
|
||||||
|
- Path to a Unix domain socket for local connections.
|
||||||
|
type: str
|
||||||
|
port:
|
||||||
|
description:
|
||||||
|
- Database port to connect to.
|
||||||
|
type: int
|
||||||
|
default: 5432
|
||||||
|
aliases: [ login_port ]
|
||||||
|
ssl_mode:
|
||||||
|
description:
|
||||||
|
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||||
|
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||||
|
- Default of C(prefer) matches libpq default.
|
||||||
|
type: str
|
||||||
|
default: prefer
|
||||||
|
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||||
|
ca_cert:
|
||||||
|
description:
|
||||||
|
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||||
|
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||||
|
type: str
|
||||||
|
aliases: [ ssl_rootcert ]
|
||||||
|
notes:
|
||||||
|
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
|
||||||
|
- To avoid "Peer authentication failed for user postgres" error,
|
||||||
|
use postgres user as a I(become_user).
|
||||||
|
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
||||||
|
ensure that psycopg2 is installed on the host before using this module.
|
||||||
|
- If the remote host is the PostgreSQL server (which is the default case), then
|
||||||
|
PostgreSQL must also be installed on the remote host.
|
||||||
|
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||||
|
on the remote host before using this module.
|
||||||
|
- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
|
||||||
|
requirements: [ psycopg2 ]
|
||||||
|
'''
|
||||||
@@ -42,23 +42,4 @@ options:
|
|||||||
type: bool
|
type: bool
|
||||||
default: no
|
default: no
|
||||||
requirements: [ "proxmoxer", "requests" ]
|
requirements: [ "proxmoxer", "requests" ]
|
||||||
'''
|
|
||||||
|
|
||||||
SELECTION = r'''
|
|
||||||
options:
|
|
||||||
vmid:
|
|
||||||
description:
|
|
||||||
- Specifies the instance ID.
|
|
||||||
- If not set the next available ID will be fetched from ProxmoxAPI.
|
|
||||||
type: int
|
|
||||||
node:
|
|
||||||
description:
|
|
||||||
- Proxmox VE node on which to operate.
|
|
||||||
- Only required for I(state=present).
|
|
||||||
- For every other states it will be autodiscovered.
|
|
||||||
type: str
|
|
||||||
pool:
|
|
||||||
description:
|
|
||||||
- Add the new VM to the specified pool.
|
|
||||||
type: str
|
|
||||||
'''
|
'''
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2020, Vladimir Botka <vbotka@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleFilterError
|
|
||||||
from ansible.module_utils.six import string_types
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
|
||||||
from collections import defaultdict
|
|
||||||
from operator import itemgetter
|
|
||||||
|
|
||||||
|
|
||||||
def lists_mergeby(l1, l2, index):
|
|
||||||
''' merge lists by attribute index. Example:
|
|
||||||
- debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" '''
|
|
||||||
|
|
||||||
if not isinstance(l1, Sequence):
|
|
||||||
raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' %
|
|
||||||
(l1, type(l1)))
|
|
||||||
|
|
||||||
if not isinstance(l2, Sequence):
|
|
||||||
raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' %
|
|
||||||
(l2, type(l2)))
|
|
||||||
|
|
||||||
if not isinstance(index, string_types):
|
|
||||||
raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' %
|
|
||||||
(index, type(index)))
|
|
||||||
|
|
||||||
d = defaultdict(dict)
|
|
||||||
for l in (l1, l2):
|
|
||||||
for elem in l:
|
|
||||||
if not isinstance(elem, Mapping):
|
|
||||||
raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem))
|
|
||||||
if index in elem.keys():
|
|
||||||
d[elem[index]].update(elem)
|
|
||||||
return sorted(d.values(), key=itemgetter(index))
|
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
''' Ansible list filters '''
|
|
||||||
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
'lists_mergeby': lists_mergeby,
|
|
||||||
}
|
|
||||||
@@ -8,6 +8,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Orion Poplawski (@opoplawski)
|
author: Orion Poplawski (@opoplawski)
|
||||||
name: cobbler
|
name: cobbler
|
||||||
|
plugin_type: inventory
|
||||||
short_description: Cobbler inventory source
|
short_description: Cobbler inventory source
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
description:
|
description:
|
||||||
|
|||||||
272
plugins/inventory/docker_machine.py
Normal file
272
plugins/inventory/docker_machine.py
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
name: docker_machine
|
||||||
|
plugin_type: inventory
|
||||||
|
author: Ximon Eighteen (@ximon18)
|
||||||
|
short_description: Docker Machine inventory source
|
||||||
|
requirements:
|
||||||
|
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- constructed
|
||||||
|
description:
|
||||||
|
- Get inventory hosts from Docker Machine.
|
||||||
|
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||||
|
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||||
|
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||||
|
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||||
|
required: yes
|
||||||
|
choices: ['docker_machine', 'community.general.docker_machine']
|
||||||
|
daemon_env:
|
||||||
|
description:
|
||||||
|
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||||
|
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||||
|
A warning will be issued for any skipped host if the choice is C(require).
|
||||||
|
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||||
|
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||||
|
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||||
|
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||||
|
type: str
|
||||||
|
choices:
|
||||||
|
- require
|
||||||
|
- require-silently
|
||||||
|
- optional
|
||||||
|
- optional-silently
|
||||||
|
- skip
|
||||||
|
default: require
|
||||||
|
running_required:
|
||||||
|
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||||
|
type: bool
|
||||||
|
default: yes
|
||||||
|
verbose_output:
|
||||||
|
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
|
||||||
|
type: bool
|
||||||
|
default: yes
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Minimal example
|
||||||
|
plugin: community.general.docker_machine
|
||||||
|
|
||||||
|
# Example using constructed features to create a group per Docker Machine driver
|
||||||
|
# (https://docs.docker.com/machine/drivers/), e.g.:
|
||||||
|
# $ docker-machine create --driver digitalocean ... mymachine
|
||||||
|
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||||
|
# {
|
||||||
|
# ...
|
||||||
|
# "digitalocean": {
|
||||||
|
# "hosts": [
|
||||||
|
# "mymachine"
|
||||||
|
# ]
|
||||||
|
# ...
|
||||||
|
# }
|
||||||
|
strict: no
|
||||||
|
keyed_groups:
|
||||||
|
- separator: ''
|
||||||
|
key: docker_machine_node_attributes.DriverName
|
||||||
|
|
||||||
|
# Example grouping hosts by Digital Machine tag
|
||||||
|
strict: no
|
||||||
|
keyed_groups:
|
||||||
|
- prefix: tag
|
||||||
|
key: 'dm_tags'
|
||||||
|
|
||||||
|
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||||
|
compose:
|
||||||
|
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.module_utils._text import to_native
|
||||||
|
from ansible.module_utils._text import to_text
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||||
|
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||||
|
|
||||||
|
NAME = 'community.general.docker_machine'
|
||||||
|
|
||||||
|
DOCKER_MACHINE_PATH = None
|
||||||
|
|
||||||
|
def _run_command(self, args):
|
||||||
|
if not self.DOCKER_MACHINE_PATH:
|
||||||
|
try:
|
||||||
|
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||||
|
except ValueError as e:
|
||||||
|
raise AnsibleError(to_native(e))
|
||||||
|
|
||||||
|
command = [self.DOCKER_MACHINE_PATH]
|
||||||
|
command.extend(args)
|
||||||
|
display.debug('Executing command {0}'.format(command))
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(command)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return to_text(result).strip()
|
||||||
|
|
||||||
|
def _get_docker_daemon_variables(self, machine_name):
|
||||||
|
'''
|
||||||
|
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||||
|
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
# This can happen when the machine is created but provisioning is incomplete
|
||||||
|
return []
|
||||||
|
|
||||||
|
# example output of docker-machine env --shell=sh:
|
||||||
|
# export DOCKER_TLS_VERIFY="1"
|
||||||
|
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||||
|
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||||
|
# export DOCKER_MACHINE_NAME="routinator"
|
||||||
|
# # Run this command to configure your shell:
|
||||||
|
# # eval $(docker-machine env --shell=bash routinator)
|
||||||
|
|
||||||
|
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||||
|
# with the same name and value but with a dm_ name prefix.
|
||||||
|
vars = []
|
||||||
|
for line in env_lines:
|
||||||
|
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||||
|
if match:
|
||||||
|
env_var_name = match.group(1)
|
||||||
|
env_var_value = match.group(2)
|
||||||
|
vars.append((env_var_name, env_var_value))
|
||||||
|
|
||||||
|
return vars
|
||||||
|
|
||||||
|
def _get_machine_names(self):
|
||||||
|
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||||
|
# with them.
|
||||||
|
ls_command = ['ls', '-q']
|
||||||
|
if self.get_option('running_required'):
|
||||||
|
ls_command.extend(['--filter', 'state=Running'])
|
||||||
|
|
||||||
|
try:
|
||||||
|
ls_lines = self._run_command(ls_command)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return ls_lines.splitlines()
|
||||||
|
|
||||||
|
def _inspect_docker_machine_host(self, node):
|
||||||
|
try:
|
||||||
|
inspect_lines = self._run_command(['inspect', self.node])
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return json.loads(inspect_lines)
|
||||||
|
|
||||||
|
def _ip_addr_docker_machine_host(self, node):
|
||||||
|
try:
|
||||||
|
ip_addr = self._run_command(['ip', self.node])
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return ip_addr
|
||||||
|
|
||||||
|
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||||
|
if not env_var_tuples:
|
||||||
|
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||||
|
if daemon_env in ('require', 'require-silently'):
|
||||||
|
if daemon_env == 'require':
|
||||||
|
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||||
|
return True
|
||||||
|
else: # 'optional', 'optional-silently'
|
||||||
|
if daemon_env == 'optional':
|
||||||
|
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _populate(self):
|
||||||
|
daemon_env = self.get_option('daemon_env')
|
||||||
|
try:
|
||||||
|
for self.node in self._get_machine_names():
|
||||||
|
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||||
|
if not self.node_attrs:
|
||||||
|
continue
|
||||||
|
|
||||||
|
machine_name = self.node_attrs['Driver']['MachineName']
|
||||||
|
|
||||||
|
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||||
|
# that could be used to set environment variables to influence a local Docker client:
|
||||||
|
if daemon_env == 'skip':
|
||||||
|
env_var_tuples = []
|
||||||
|
else:
|
||||||
|
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||||
|
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add an entry in the inventory for this host
|
||||||
|
self.inventory.add_host(machine_name)
|
||||||
|
|
||||||
|
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||||
|
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||||
|
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||||
|
if self.node_attrs['Driver']['IPAddress']:
|
||||||
|
ip_addr = self.node_attrs['Driver']['IPAddress']
|
||||||
|
else:
|
||||||
|
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
||||||
|
|
||||||
|
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||||
|
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||||
|
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
||||||
|
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||||
|
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||||
|
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||||
|
|
||||||
|
# set variables based on Docker Machine tags
|
||||||
|
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||||
|
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||||
|
|
||||||
|
# set variables based on Docker Machine env variables
|
||||||
|
for kv in env_var_tuples:
|
||||||
|
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||||
|
|
||||||
|
if self.get_option('verbose_output'):
|
||||||
|
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||||
|
|
||||||
|
# Use constructed if applicable
|
||||||
|
strict = self.get_option('strict')
|
||||||
|
|
||||||
|
# Composed variables
|
||||||
|
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||||
|
|
||||||
|
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||||
|
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||||
|
|
||||||
|
# Create groups based on variable values and add the corresponding hosts to it
|
||||||
|
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||||
|
to_native(e), orig_exc=e)
|
||||||
|
|
||||||
|
def verify_file(self, path):
|
||||||
|
"""Return the possibility of a file being consumable by this plugin."""
|
||||||
|
return (
|
||||||
|
super(InventoryModule, self).verify_file(path) and
|
||||||
|
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
||||||
|
|
||||||
|
def parse(self, inventory, loader, path, cache=True):
|
||||||
|
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||||
|
self._read_config_data(path)
|
||||||
|
self._populate()
|
||||||
255
plugins/inventory/docker_swarm.py
Normal file
255
plugins/inventory/docker_swarm.py
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||||
|
# Copyright (c) 2018 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
name: docker_swarm
|
||||||
|
plugin_type: inventory
|
||||||
|
author:
|
||||||
|
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||||
|
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||||
|
requirements:
|
||||||
|
- python >= 2.7
|
||||||
|
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- constructed
|
||||||
|
description:
|
||||||
|
- Reads inventories from the Docker swarm API.
|
||||||
|
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||||
|
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||||
|
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||||
|
I(nonleaders) - all nodes except the swarm leader."
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
|
||||||
|
for this plugin to recognize it as it's own.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
choices: [ docker_swarm, community.general.docker_swarm ]
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||||
|
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
aliases: [ docker_url ]
|
||||||
|
verbose_output:
|
||||||
|
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
||||||
|
C(EngineVersion))
|
||||||
|
type: bool
|
||||||
|
default: yes
|
||||||
|
tls:
|
||||||
|
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
validate_certs:
|
||||||
|
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||||
|
host server.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
aliases: [ tls_verify ]
|
||||||
|
client_key:
|
||||||
|
description: Path to the client's TLS key file.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_client_key, key_path ]
|
||||||
|
ca_cert:
|
||||||
|
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||||
|
certificate file.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_ca_cert, cacert_path ]
|
||||||
|
client_cert:
|
||||||
|
description: Path to the client's TLS certificate file.
|
||||||
|
type: path
|
||||||
|
aliases: [ tls_client_cert, cert_path ]
|
||||||
|
tls_hostname:
|
||||||
|
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||||
|
the server.
|
||||||
|
type: str
|
||||||
|
ssl_version:
|
||||||
|
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||||
|
type: str
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by docker-py.
|
||||||
|
type: str
|
||||||
|
aliases: [ docker_api_version ]
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||||
|
will be used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: int
|
||||||
|
default: 60
|
||||||
|
aliases: [ time_out ]
|
||||||
|
include_host_uri:
|
||||||
|
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||||
|
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||||
|
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||||
|
The port always defaults to C(2376).
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
include_host_uri_port:
|
||||||
|
description: Override the detected port number included in I(ansible_host_uri)
|
||||||
|
type: int
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Minimal example using local docker
|
||||||
|
plugin: community.general.docker_swarm
|
||||||
|
docker_host: unix://var/run/docker.sock
|
||||||
|
|
||||||
|
# Minimal example using remote docker
|
||||||
|
plugin: community.general.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
|
||||||
|
# Example using remote docker with unverified TLS
|
||||||
|
plugin: community.general.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
tls: yes
|
||||||
|
|
||||||
|
# Example using remote docker with verified TLS and client certificate verification
|
||||||
|
plugin: community.general.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
validate_certs: yes
|
||||||
|
ca_cert: /somewhere/ca.pem
|
||||||
|
client_key: /somewhere/key.pem
|
||||||
|
client_cert: /somewhere/cert.pem
|
||||||
|
|
||||||
|
# Example using constructed features to create groups and set ansible_host
|
||||||
|
plugin: community.general.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
strict: False
|
||||||
|
keyed_groups:
|
||||||
|
# add e.g. x86_64 hosts to an arch_x86_64 group
|
||||||
|
- prefix: arch
|
||||||
|
key: 'Description.Platform.Architecture'
|
||||||
|
# add e.g. linux hosts to an os_linux group
|
||||||
|
- prefix: os
|
||||||
|
key: 'Description.Platform.OS'
|
||||||
|
# create a group per node label
|
||||||
|
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
||||||
|
# hint: labels containing special characters will be converted to safe names
|
||||||
|
- key: 'Spec.Labels'
|
||||||
|
prefix: label
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.module_utils._text import to_native
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
|
from ansible.parsing.utils.addresses import parse_address
|
||||||
|
|
||||||
|
try:
|
||||||
|
import docker
|
||||||
|
HAS_DOCKER = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_DOCKER = False
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||||
|
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||||
|
|
||||||
|
NAME = 'community.general.docker_swarm'
|
||||||
|
|
||||||
|
def _fail(self, msg):
|
||||||
|
raise AnsibleError(msg)
|
||||||
|
|
||||||
|
def _populate(self):
|
||||||
|
raw_params = dict(
|
||||||
|
docker_host=self.get_option('docker_host'),
|
||||||
|
tls=self.get_option('tls'),
|
||||||
|
tls_verify=self.get_option('validate_certs'),
|
||||||
|
key_path=self.get_option('client_key'),
|
||||||
|
cacert_path=self.get_option('ca_cert'),
|
||||||
|
cert_path=self.get_option('client_cert'),
|
||||||
|
tls_hostname=self.get_option('tls_hostname'),
|
||||||
|
api_version=self.get_option('api_version'),
|
||||||
|
timeout=self.get_option('timeout'),
|
||||||
|
ssl_version=self.get_option('ssl_version'),
|
||||||
|
debug=None,
|
||||||
|
)
|
||||||
|
update_tls_hostname(raw_params)
|
||||||
|
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||||
|
self.client = docker.DockerClient(**connect_params)
|
||||||
|
self.inventory.add_group('all')
|
||||||
|
self.inventory.add_group('manager')
|
||||||
|
self.inventory.add_group('worker')
|
||||||
|
self.inventory.add_group('leader')
|
||||||
|
self.inventory.add_group('nonleaders')
|
||||||
|
|
||||||
|
if self.get_option('include_host_uri'):
|
||||||
|
if self.get_option('include_host_uri_port'):
|
||||||
|
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||||
|
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||||
|
host_uri_port = '2376'
|
||||||
|
else:
|
||||||
|
host_uri_port = '2375'
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.nodes = self.client.nodes.list()
|
||||||
|
for self.node in self.nodes:
|
||||||
|
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||||
|
self.inventory.add_host(self.node_attrs['ID'])
|
||||||
|
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||||
|
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||||
|
self.node_attrs['Status']['Addr'])
|
||||||
|
if self.get_option('include_host_uri'):
|
||||||
|
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||||
|
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||||
|
if self.get_option('verbose_output'):
|
||||||
|
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||||
|
if 'ManagerStatus' in self.node_attrs:
|
||||||
|
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||||
|
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||||
|
# Check moby/moby#35437 for details
|
||||||
|
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||||
|
self.node_attrs['Status']['Addr']
|
||||||
|
if self.get_option('include_host_uri'):
|
||||||
|
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||||
|
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||||
|
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||||
|
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||||
|
else:
|
||||||
|
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||||
|
else:
|
||||||
|
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||||
|
# Use constructed if applicable
|
||||||
|
strict = self.get_option('strict')
|
||||||
|
# Composed variables
|
||||||
|
self._set_composite_vars(self.get_option('compose'),
|
||||||
|
self.node_attrs,
|
||||||
|
self.node_attrs['ID'],
|
||||||
|
strict=strict)
|
||||||
|
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||||
|
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||||
|
self.node_attrs,
|
||||||
|
self.node_attrs['ID'],
|
||||||
|
strict=strict)
|
||||||
|
# Create groups based on variable values and add the corresponding hosts to it
|
||||||
|
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||||
|
self.node_attrs,
|
||||||
|
self.node_attrs['ID'],
|
||||||
|
strict=strict)
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||||
|
to_native(e))
|
||||||
|
|
||||||
|
def verify_file(self, path):
|
||||||
|
"""Return the possibly of a file being consumable by this plugin."""
|
||||||
|
return (
|
||||||
|
super(InventoryModule, self).verify_file(path) and
|
||||||
|
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
||||||
|
|
||||||
|
def parse(self, inventory, loader, path, cache=True):
|
||||||
|
if not HAS_DOCKER:
|
||||||
|
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||||
|
'https://github.com/docker/docker-py.')
|
||||||
|
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||||
|
self._read_config_data(path)
|
||||||
|
self._populate()
|
||||||
@@ -9,6 +9,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: gitlab_runners
|
name: gitlab_runners
|
||||||
|
plugin_type: inventory
|
||||||
author:
|
author:
|
||||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||||
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
||||||
|
|||||||
256
plugins/inventory/kubevirt.py
Normal file
256
plugins/inventory/kubevirt.py
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
# Copyright (c) 2018 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
name: kubevirt
|
||||||
|
plugin_type: inventory
|
||||||
|
author:
|
||||||
|
- KubeVirt Team (@kubevirt)
|
||||||
|
|
||||||
|
short_description: KubeVirt inventory source
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- inventory_cache
|
||||||
|
- constructed
|
||||||
|
description:
|
||||||
|
- Fetch running VirtualMachines for one or more namespaces.
|
||||||
|
- Groups by namespace, namespace_vms and labels.
|
||||||
|
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
|
||||||
|
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: token that ensures this is a source file for the 'kubevirt' plugin.
|
||||||
|
required: True
|
||||||
|
choices: ['kubevirt', 'community.general.kubevirt']
|
||||||
|
type: str
|
||||||
|
host_format:
|
||||||
|
description:
|
||||||
|
- Specify the format of the host in the inventory group.
|
||||||
|
default: "{namespace}-{name}-{uid}"
|
||||||
|
connections:
|
||||||
|
type: list
|
||||||
|
description:
|
||||||
|
- Optional list of cluster connection settings. If no connections are provided, the default
|
||||||
|
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
|
||||||
|
the active user is authorized to access.
|
||||||
|
suboptions:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
|
||||||
|
and port.
|
||||||
|
type: str
|
||||||
|
kubeconfig:
|
||||||
|
description:
|
||||||
|
- Path to an existing Kubernetes config file. If not provided, and no other connection
|
||||||
|
options are provided, the OpenShift client will attempt to load the default
|
||||||
|
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
|
||||||
|
environment variable.
|
||||||
|
type: str
|
||||||
|
context:
|
||||||
|
description:
|
||||||
|
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
|
||||||
|
variable.
|
||||||
|
type: str
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
|
||||||
|
type: str
|
||||||
|
api_key:
|
||||||
|
description:
|
||||||
|
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
|
||||||
|
variable.
|
||||||
|
type: str
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
|
||||||
|
environment variable.
|
||||||
|
type: str
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
|
||||||
|
environment variable.
|
||||||
|
type: str
|
||||||
|
cert_file:
|
||||||
|
description:
|
||||||
|
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
|
||||||
|
environment variable.
|
||||||
|
type: str
|
||||||
|
key_file:
|
||||||
|
description:
|
||||||
|
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
|
||||||
|
environment variable.
|
||||||
|
type: str
|
||||||
|
ssl_ca_cert:
|
||||||
|
description:
|
||||||
|
- Path to a CA certificate used to authenticate with the API. Can also be specified via
|
||||||
|
K8S_AUTH_SSL_CA_CERT environment variable.
|
||||||
|
type: str
|
||||||
|
verify_ssl:
|
||||||
|
description:
|
||||||
|
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
|
||||||
|
K8S_AUTH_VERIFY_SSL environment variable."
|
||||||
|
type: bool
|
||||||
|
namespaces:
|
||||||
|
description:
|
||||||
|
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
|
||||||
|
to access.
|
||||||
|
type: list
|
||||||
|
network_name:
|
||||||
|
description:
|
||||||
|
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
|
||||||
|
address.
|
||||||
|
type: str
|
||||||
|
aliases: [ interface_name ]
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- "Specify the KubeVirt API version."
|
||||||
|
type: str
|
||||||
|
annotation_variable:
|
||||||
|
description:
|
||||||
|
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
|
||||||
|
- "Note, that the value in ansible annotations should be json."
|
||||||
|
type: str
|
||||||
|
default: 'ansible'
|
||||||
|
requirements:
|
||||||
|
- "openshift >= 0.6"
|
||||||
|
- "PyYAML >= 3.11"
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# File must be named kubevirt.yaml or kubevirt.yml
|
||||||
|
|
||||||
|
# Authenticate with token, and return all virtual machines for all namespaces
|
||||||
|
plugin: community.general.kubevirt
|
||||||
|
connections:
|
||||||
|
- host: https://kubevirt.io
|
||||||
|
token: xxxxxxxxxxxxxxxx
|
||||||
|
ssl_verify: false
|
||||||
|
|
||||||
|
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
|
||||||
|
# connected to network myovsnetwork and from namespace vms
|
||||||
|
plugin: community.general.kubevirt
|
||||||
|
connections:
|
||||||
|
- namespaces:
|
||||||
|
- vms
|
||||||
|
network_name: myovsnetwork
|
||||||
|
'''
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
|
||||||
|
|
||||||
|
try:
|
||||||
|
from openshift.dynamic.exceptions import DynamicApiError
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
API_VERSION = 'kubevirt.io/v1alpha3'
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(K8sInventoryModule):
|
||||||
|
NAME = 'community.general.kubevirt'
|
||||||
|
|
||||||
|
def setup(self, config_data, cache, cache_key):
|
||||||
|
self.config_data = config_data
|
||||||
|
super(InventoryModule, self).setup(config_data, cache, cache_key)
|
||||||
|
|
||||||
|
def fetch_objects(self, connections):
|
||||||
|
client = self.get_api_client()
|
||||||
|
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
|
||||||
|
|
||||||
|
if connections:
|
||||||
|
for connection in connections:
|
||||||
|
client = self.get_api_client(**connection)
|
||||||
|
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
||||||
|
if connection.get('namespaces'):
|
||||||
|
namespaces = connection['namespaces']
|
||||||
|
else:
|
||||||
|
namespaces = self.get_available_namespaces(client)
|
||||||
|
interface_name = connection.get('network_name')
|
||||||
|
api_version = connection.get('api_version', API_VERSION)
|
||||||
|
annotation_variable = connection.get('annotation_variable', 'ansible')
|
||||||
|
for namespace in namespaces:
|
||||||
|
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
|
||||||
|
else:
|
||||||
|
name = self.get_default_host_name(client.configuration.host)
|
||||||
|
namespaces = self.get_available_namespaces(client)
|
||||||
|
for namespace in namespaces:
|
||||||
|
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
|
||||||
|
|
||||||
|
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
|
||||||
|
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
|
||||||
|
try:
|
||||||
|
obj = v1_vm.get(namespace=namespace)
|
||||||
|
except DynamicApiError as exc:
|
||||||
|
self.display.debug(exc)
|
||||||
|
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
|
||||||
|
|
||||||
|
namespace_group = 'namespace_{0}'.format(namespace)
|
||||||
|
namespace_vms_group = '{0}_vms'.format(namespace_group)
|
||||||
|
|
||||||
|
name = self._sanitize_group_name(name)
|
||||||
|
namespace_group = self._sanitize_group_name(namespace_group)
|
||||||
|
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
|
||||||
|
self.inventory.add_group(name)
|
||||||
|
self.inventory.add_group(namespace_group)
|
||||||
|
self.inventory.add_child(name, namespace_group)
|
||||||
|
self.inventory.add_group(namespace_vms_group)
|
||||||
|
self.inventory.add_child(namespace_group, namespace_vms_group)
|
||||||
|
for vm in obj.items:
|
||||||
|
if not (vm.status and vm.status.interfaces):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Find interface by its name:
|
||||||
|
if interface_name is None:
|
||||||
|
interface = vm.status.interfaces[0]
|
||||||
|
else:
|
||||||
|
interface = next(
|
||||||
|
(i for i in vm.status.interfaces if i.name == interface_name),
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
# If interface is not found or IP address is not reported skip this VM:
|
||||||
|
if interface is None or interface.ipAddress is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
|
||||||
|
vm_ip = interface.ipAddress
|
||||||
|
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
|
||||||
|
|
||||||
|
self.inventory.add_host(vm_name)
|
||||||
|
|
||||||
|
if vm.metadata.labels:
|
||||||
|
# create a group for each label_value
|
||||||
|
for key, value in vm.metadata.labels:
|
||||||
|
group_name = 'label_{0}_{1}'.format(key, value)
|
||||||
|
group_name = self._sanitize_group_name(group_name)
|
||||||
|
self.inventory.add_group(group_name)
|
||||||
|
self.inventory.add_child(group_name, vm_name)
|
||||||
|
vm_labels = dict(vm.metadata.labels)
|
||||||
|
else:
|
||||||
|
vm_labels = {}
|
||||||
|
|
||||||
|
self.inventory.add_child(namespace_vms_group, vm_name)
|
||||||
|
|
||||||
|
# add hostvars
|
||||||
|
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
|
||||||
|
self.inventory.set_variable(vm_name, 'labels', vm_labels)
|
||||||
|
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
|
||||||
|
self.inventory.set_variable(vm_name, 'object_type', 'vm')
|
||||||
|
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
|
||||||
|
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
|
||||||
|
|
||||||
|
# Add all variables which are listed in 'ansible' annotation:
|
||||||
|
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
|
||||||
|
for k, v in annotations_data.items():
|
||||||
|
self.inventory.set_variable(vm_name, k, v)
|
||||||
|
|
||||||
|
def verify_file(self, path):
|
||||||
|
if super(InventoryModule, self).verify_file(path):
|
||||||
|
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
@@ -6,6 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: linode
|
name: linode
|
||||||
|
plugin_type: inventory
|
||||||
author:
|
author:
|
||||||
- Luke Murphy (@decentral1se)
|
- Luke Murphy (@decentral1se)
|
||||||
short_description: Ansible dynamic inventory plugin for Linode.
|
short_description: Ansible dynamic inventory plugin for Linode.
|
||||||
@@ -16,10 +17,7 @@ DOCUMENTATION = r'''
|
|||||||
- Reads inventories from the Linode API v4.
|
- Reads inventories from the Linode API v4.
|
||||||
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
||||||
- Linode labels are used by default as the hostnames.
|
- Linode labels are used by default as the hostnames.
|
||||||
- The default inventory groups are built from groups (deprecated by
|
- The inventory groups are built from groups and not tags.
|
||||||
Linode) and not tags.
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- constructed
|
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: marks this as an instance of the 'linode' plugin
|
description: marks this as an instance of the 'linode' plugin
|
||||||
@@ -35,25 +33,11 @@ DOCUMENTATION = r'''
|
|||||||
default: []
|
default: []
|
||||||
type: list
|
type: list
|
||||||
required: false
|
required: false
|
||||||
tags:
|
|
||||||
description: Populate inventory only with instances which have at least one of the tags listed here.
|
|
||||||
default: []
|
|
||||||
type: list
|
|
||||||
reqired: false
|
|
||||||
version_added: 2.0.0
|
|
||||||
types:
|
types:
|
||||||
description: Populate inventory with instances with this type.
|
description: Populate inventory with instances with this type.
|
||||||
default: []
|
default: []
|
||||||
type: list
|
type: list
|
||||||
required: false
|
required: false
|
||||||
strict:
|
|
||||||
version_added: 2.0.0
|
|
||||||
compose:
|
|
||||||
version_added: 2.0.0
|
|
||||||
groups:
|
|
||||||
version_added: 2.0.0
|
|
||||||
keyed_groups:
|
|
||||||
version_added: 2.0.0
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r'''
|
||||||
@@ -67,27 +51,13 @@ regions:
|
|||||||
- eu-west
|
- eu-west
|
||||||
types:
|
types:
|
||||||
- g5-standard-2
|
- g5-standard-2
|
||||||
|
|
||||||
# Example with keyed_groups, groups, and compose
|
|
||||||
plugin: community.general.linode
|
|
||||||
access_token: foobar
|
|
||||||
keyed_groups:
|
|
||||||
- key: tags
|
|
||||||
separator: ''
|
|
||||||
- key: region
|
|
||||||
prefix: region
|
|
||||||
groups:
|
|
||||||
webservers: "'web' in (tags|list)"
|
|
||||||
mailservers: "'mail' in (tags|list)"
|
|
||||||
compose:
|
|
||||||
ansible_port: 2222
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible.module_utils.six import string_types
|
from ansible.module_utils.six import string_types
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -98,7 +68,7 @@ except ImportError:
|
|||||||
HAS_LINODE = False
|
HAS_LINODE = False
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
class InventoryModule(BaseInventoryPlugin):
|
||||||
|
|
||||||
NAME = 'community.general.linode'
|
NAME = 'community.general.linode'
|
||||||
|
|
||||||
@@ -141,7 +111,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
for linode_group in self.linode_groups:
|
for linode_group in self.linode_groups:
|
||||||
self.inventory.add_group(linode_group)
|
self.inventory.add_group(linode_group)
|
||||||
|
|
||||||
def _filter_by_config(self, regions, types, tags):
|
def _filter_by_config(self, regions, types):
|
||||||
"""Filter instances by user specified configuration."""
|
"""Filter instances by user specified configuration."""
|
||||||
if regions:
|
if regions:
|
||||||
self.instances = [
|
self.instances = [
|
||||||
@@ -155,12 +125,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
if instance.type.id in types
|
if instance.type.id in types
|
||||||
]
|
]
|
||||||
|
|
||||||
if tags:
|
|
||||||
self.instances = [
|
|
||||||
instance for instance in self.instances
|
|
||||||
if any(tag in instance.tags for tag in tags)
|
|
||||||
]
|
|
||||||
|
|
||||||
def _add_instances_to_groups(self):
|
def _add_instances_to_groups(self):
|
||||||
"""Add instance names to their dynamic inventory groups."""
|
"""Add instance names to their dynamic inventory groups."""
|
||||||
for instance in self.instances:
|
for instance in self.instances:
|
||||||
@@ -205,10 +169,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
'type_to_be': list,
|
'type_to_be': list,
|
||||||
'value': config_data.get('types', [])
|
'value': config_data.get('types', [])
|
||||||
},
|
},
|
||||||
'tags': {
|
|
||||||
'type_to_be': list,
|
|
||||||
'value': config_data.get('tags', [])
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for name in options:
|
for name in options:
|
||||||
@@ -220,9 +180,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
|
|
||||||
regions = options['regions']['value']
|
regions = options['regions']['value']
|
||||||
types = options['types']['value']
|
types = options['types']['value']
|
||||||
tags = options['tags']['value']
|
|
||||||
|
|
||||||
return regions, types, tags
|
return regions, types
|
||||||
|
|
||||||
def verify_file(self, path):
|
def verify_file(self, path):
|
||||||
"""Verify the Linode configuration file."""
|
"""Verify the Linode configuration file."""
|
||||||
@@ -244,27 +203,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
|
|
||||||
self._get_instances_inventory()
|
self._get_instances_inventory()
|
||||||
|
|
||||||
strict = self.get_option('strict')
|
regions, types = self._get_query_options(config_data)
|
||||||
regions, types, tags = self._get_query_options(config_data)
|
self._filter_by_config(regions, types)
|
||||||
self._filter_by_config(regions, types, tags)
|
|
||||||
|
|
||||||
self._add_groups()
|
self._add_groups()
|
||||||
self._add_instances_to_groups()
|
self._add_instances_to_groups()
|
||||||
self._add_hostvars_for_instances()
|
self._add_hostvars_for_instances()
|
||||||
for instance in self.instances:
|
|
||||||
variables = self.inventory.get_host(instance.label).get_vars()
|
|
||||||
self._add_host_to_composed_groups(
|
|
||||||
self.get_option('groups'),
|
|
||||||
variables,
|
|
||||||
instance.label,
|
|
||||||
strict=strict)
|
|
||||||
self._add_host_to_keyed_groups(
|
|
||||||
self.get_option('keyed_groups'),
|
|
||||||
variables,
|
|
||||||
instance.label,
|
|
||||||
strict=strict)
|
|
||||||
self._set_composite_vars(
|
|
||||||
self.get_option('compose'),
|
|
||||||
variables,
|
|
||||||
instance.label,
|
|
||||||
strict=strict)
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: nmap
|
name: nmap
|
||||||
|
plugin_type: inventory
|
||||||
short_description: Uses nmap to find hosts to target
|
short_description: Uses nmap to find hosts to target
|
||||||
description:
|
description:
|
||||||
- Uses a YAML configuration file with a valid YAML extension.
|
- Uses a YAML configuration file with a valid YAML extension.
|
||||||
|
|||||||
@@ -2,15 +2,18 @@
|
|||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|
||||||
|
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = '''
|
||||||
name: online
|
name: online
|
||||||
|
plugin_type: inventory
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
short_description: Online inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
- Get inventory hosts from Online
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: token that ensures this is a source file for the 'online' plugin.
|
description: token that ensures this is a source file for the 'online' plugin.
|
||||||
@@ -42,7 +45,7 @@ DOCUMENTATION = r'''
|
|||||||
- rpn
|
- rpn
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = '''
|
||||||
# online_inventory.yml file in YAML format
|
# online_inventory.yml file in YAML format
|
||||||
# Example command line: ansible-inventory --list -i online_inventory.yml
|
# Example command line: ansible-inventory --list -i online_inventory.yml
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: proxmox
|
name: proxmox
|
||||||
|
plugin_type: inventory
|
||||||
short_description: Proxmox inventory source
|
short_description: Proxmox inventory source
|
||||||
version_added: "1.2.0"
|
version_added: "1.2.0"
|
||||||
author:
|
author:
|
||||||
@@ -27,32 +28,17 @@ DOCUMENTATION = '''
|
|||||||
choices: ['community.general.proxmox']
|
choices: ['community.general.proxmox']
|
||||||
type: str
|
type: str
|
||||||
url:
|
url:
|
||||||
description:
|
description: URL to Proxmox cluster.
|
||||||
- URL to Proxmox cluster.
|
|
||||||
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
|
|
||||||
default: 'http://localhost:8006'
|
default: 'http://localhost:8006'
|
||||||
type: str
|
type: str
|
||||||
env:
|
|
||||||
- name: PROXMOX_URL
|
|
||||||
version_added: 2.0.0
|
|
||||||
user:
|
user:
|
||||||
description:
|
description: Proxmox authentication user.
|
||||||
- Proxmox authentication user.
|
|
||||||
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
|
|
||||||
required: yes
|
required: yes
|
||||||
type: str
|
type: str
|
||||||
env:
|
|
||||||
- name: PROXMOX_USER
|
|
||||||
version_added: 2.0.0
|
|
||||||
password:
|
password:
|
||||||
description:
|
description: Proxmox authentication password.
|
||||||
- Proxmox authentication password.
|
|
||||||
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
|
|
||||||
required: yes
|
required: yes
|
||||||
type: str
|
type: str
|
||||||
env:
|
|
||||||
- name: PROXMOX_PASSWORD
|
|
||||||
version_added: 2.0.0
|
|
||||||
validate_certs:
|
validate_certs:
|
||||||
description: Verify SSL certificate if using HTTPS.
|
description: Verify SSL certificate if using HTTPS.
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: scaleway
|
name: scaleway
|
||||||
|
plugin_type: inventory
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway inventory source
|
short_description: Scaleway inventory source
|
||||||
@@ -23,8 +24,6 @@ DOCUMENTATION = '''
|
|||||||
default:
|
default:
|
||||||
- ams1
|
- ams1
|
||||||
- par1
|
- par1
|
||||||
- par2
|
|
||||||
- waw1
|
|
||||||
tags:
|
tags:
|
||||||
description: Filter results on a specific tag
|
description: Filter results on a specific tag
|
||||||
type: list
|
type: list
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: stackpath_compute
|
name: stackpath_compute
|
||||||
|
plugin_type: inventory
|
||||||
short_description: StackPath Edge Computing inventory source
|
short_description: StackPath Edge Computing inventory source
|
||||||
version_added: 1.2.0
|
version_added: 1.2.0
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: virtualbox
|
name: virtualbox
|
||||||
|
plugin_type: inventory
|
||||||
short_description: virtualbox inventory source
|
short_description: virtualbox inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from the local virtualbox installation.
|
- Get inventory hosts from the local virtualbox installation.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: cartesian
|
lookup: cartesian
|
||||||
short_description: returns the cartesian product of lists
|
short_description: returns the cartesian product of lists
|
||||||
description:
|
description:
|
||||||
- Takes the input lists and returns a list that represents the product of the input lists.
|
- Takes the input lists and returns a list that represents the product of the input lists.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: chef_databag
|
lookup: chef_databag
|
||||||
short_description: fetches data from a Chef Databag
|
short_description: fetches data from a Chef Databag
|
||||||
description:
|
description:
|
||||||
- "This is a lookup plugin to provide access to chef data bags using the pychef package.
|
- "This is a lookup plugin to provide access to chef data bags using the pychef package.
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: consul_kv
|
lookup: consul_kv
|
||||||
short_description: Fetch metadata from a Consul key value store.
|
short_description: Fetch metadata from a Consul key value store.
|
||||||
description:
|
description:
|
||||||
- Lookup metadata for a playbook from the key value store in a Consul cluster.
|
- Lookup metadata for a playbook from the key value store in a Consul cluster.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: credstash
|
lookup: credstash
|
||||||
short_description: retrieve secrets from Credstash on AWS
|
short_description: retrieve secrets from Credstash on AWS
|
||||||
requirements:
|
requirements:
|
||||||
- credstash (python library)
|
- credstash (python library)
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: cyberarkpassword
|
lookup: cyberarkpassword
|
||||||
short_description: get secrets from CyberArk AIM
|
short_description: get secrets from CyberArk AIM
|
||||||
requirements:
|
requirements:
|
||||||
- CyberArk AIM tool installed
|
- CyberArk AIM tool installed
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: dig
|
lookup: dig
|
||||||
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
||||||
short_description: query DNS using the dnspython library
|
short_description: query DNS using the dnspython library
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: dnstxt
|
lookup: dnstxt
|
||||||
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
||||||
short_description: query a domain(s)'s DNS txt fields
|
short_description: query a domain(s)'s DNS txt fields
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import absolute_import, division, print_function
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
DOCUMENTATION = r"""
|
||||||
name: dsv
|
lookup: dsv
|
||||||
author: Adam Migus (@amigus) <adam@migus.org>
|
author: Adam Migus (@amigus) <adam@migus.org>
|
||||||
short_description: Get secrets from Thycotic DevOps Secrets Vault
|
short_description: Get secrets from Thycotic DevOps Secrets Vault
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author:
|
author:
|
||||||
- Jan-Piet Mens (@jpmens)
|
- Jan-Piet Mens (@jpmens)
|
||||||
name: etcd
|
lookup: etcd
|
||||||
short_description: get info from an etcd server
|
short_description: get info from an etcd server
|
||||||
description:
|
description:
|
||||||
- Retrieves data from an etcd server
|
- Retrieves data from an etcd server
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ DOCUMENTATION = '''
|
|||||||
author:
|
author:
|
||||||
- Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
|
- Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
|
||||||
version_added: '0.2.0'
|
version_added: '0.2.0'
|
||||||
name: etcd3
|
lookup: etcd3
|
||||||
short_description: Get key values from etcd3 server
|
short_description: Get key values from etcd3 server
|
||||||
description:
|
description:
|
||||||
- Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
|
- Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: filetree
|
lookup: filetree
|
||||||
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||||
short_description: recursively match all files in a directory tree
|
short_description: recursively match all files in a directory tree
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: flattened
|
lookup: flattened
|
||||||
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
||||||
short_description: return single list completely flattened
|
short_description: return single list completely flattened
|
||||||
description:
|
description:
|
||||||
|
|||||||
156
plugins/lookup/gcp_storage_file.py
Normal file
156
plugins/lookup/gcp_storage_file.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
lookup: gcp_storage_file
|
||||||
|
description:
|
||||||
|
- This lookup returns the contents from a file residing on Google Cloud Storage
|
||||||
|
short_description: Return GC Storage content
|
||||||
|
author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
|
||||||
|
requirements:
|
||||||
|
- python >= 2.6
|
||||||
|
- requests >= 2.18.4
|
||||||
|
- google-auth >= 1.3.0
|
||||||
|
options:
|
||||||
|
src:
|
||||||
|
description:
|
||||||
|
- Source location of file (may be local machine or cloud depending on action).
|
||||||
|
required: false
|
||||||
|
bucket:
|
||||||
|
description:
|
||||||
|
- The name of the bucket.
|
||||||
|
required: false
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- community.general._gcp
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
the value of foo.txt is {{ lookup('community.general.gcp_storage_file',
|
||||||
|
bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
|
||||||
|
auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
_raw:
|
||||||
|
description:
|
||||||
|
- base64 encoded file content
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
'''
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import mimetypes
|
||||||
|
import os
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
HAS_REQUESTS = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_REQUESTS = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
|
||||||
|
HAS_GOOGLE_CLOUD_COLLECTION = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_GOOGLE_CLOUD_COLLECTION = False
|
||||||
|
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class GcpMockModule(object):
|
||||||
|
def __init__(self, params):
|
||||||
|
self.params = params
|
||||||
|
|
||||||
|
def fail_json(self, *args, **kwargs):
|
||||||
|
raise AnsibleError(kwargs['msg'])
|
||||||
|
|
||||||
|
def raise_for_status(self, response):
|
||||||
|
try:
|
||||||
|
response.raise_for_status()
|
||||||
|
except getattr(requests.exceptions, 'RequestException'):
|
||||||
|
self.fail_json(msg="GCP returned error: %s" % response.json())
|
||||||
|
|
||||||
|
|
||||||
|
class GcpFileLookup():
|
||||||
|
def get_file_contents(self, module):
|
||||||
|
auth = GcpSession(module, 'storage')
|
||||||
|
data = auth.get(self.media_link(module))
|
||||||
|
return base64.b64encode(data.content.rstrip())
|
||||||
|
|
||||||
|
def fetch_resource(self, module, link, allow_not_found=True):
|
||||||
|
auth = GcpSession(module, 'storage')
|
||||||
|
return self.return_if_object(module, auth.get(link), allow_not_found)
|
||||||
|
|
||||||
|
def self_link(self, module):
|
||||||
|
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
|
||||||
|
|
||||||
|
def media_link(self, module):
|
||||||
|
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
|
||||||
|
|
||||||
|
def return_if_object(self, module, response, allow_not_found=False):
|
||||||
|
# If not found, return nothing.
|
||||||
|
if allow_not_found and response.status_code == 404:
|
||||||
|
return None
|
||||||
|
# If no content, return nothing.
|
||||||
|
if response.status_code == 204:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
module.raise_for_status(response)
|
||||||
|
result = response.json()
|
||||||
|
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
|
||||||
|
raise AnsibleError("Invalid JSON response with error: %s" % inst)
|
||||||
|
if navigate_hash(result, ['error', 'errors']):
|
||||||
|
raise AnsibleError(navigate_hash(result, ['error', 'errors']))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def object_headers(self, module):
|
||||||
|
return {
|
||||||
|
"name": module.params['src'],
|
||||||
|
"Content-Type": mimetypes.guess_type(module.params['src'])[0],
|
||||||
|
"Content-Length": str(os.path.getsize(module.params['src'])),
|
||||||
|
}
|
||||||
|
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
params = {
|
||||||
|
'bucket': kwargs.get('bucket', None),
|
||||||
|
'src': kwargs.get('src', None),
|
||||||
|
'projects': kwargs.get('projects', None),
|
||||||
|
'scopes': kwargs.get('scopes', None),
|
||||||
|
'zones': kwargs.get('zones', None),
|
||||||
|
'auth_kind': kwargs.get('auth_kind', None),
|
||||||
|
'service_account_file': kwargs.get('service_account_file', None),
|
||||||
|
'service_account_email': kwargs.get('service_account_email', None),
|
||||||
|
}
|
||||||
|
|
||||||
|
if not params['scopes']:
|
||||||
|
params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
|
||||||
|
|
||||||
|
fake_module = GcpMockModule(params)
|
||||||
|
|
||||||
|
# Check if files exist.
|
||||||
|
remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
|
||||||
|
if not remote_object:
|
||||||
|
raise AnsibleError("File does not exist in bucket")
|
||||||
|
|
||||||
|
result = self.get_file_contents(fake_module)
|
||||||
|
return [result]
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
if not HAS_GOOGLE_CLOUD_COLLECTION:
|
||||||
|
raise AnsibleError("community.general.gcp_storage_file needs a supported version of the google.cloud collection installed")
|
||||||
|
if not HAS_REQUESTS:
|
||||||
|
raise AnsibleError("community.general.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
|
||||||
|
return GcpFileLookup().run(terms, variables=variables, **kwargs)
|
||||||
650
plugins/lookup/hashi_vault.py
Normal file
650
plugins/lookup/hashi_vault.py
Normal file
@@ -0,0 +1,650 @@
|
|||||||
|
# (c) 2020, Brian Scholer (@briantist)
|
||||||
|
# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
|
||||||
|
# (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
lookup: hashi_vault
|
||||||
|
author:
|
||||||
|
- Jonathan Davila (!UNKNOWN) <jdavila(at)ansible.com>
|
||||||
|
- Brian Scholer (@briantist)
|
||||||
|
short_description: Retrieve secrets from HashiCorp's Vault
|
||||||
|
requirements:
|
||||||
|
- hvac (python library)
|
||||||
|
- hvac 0.7.0+ (for namespace support)
|
||||||
|
- hvac 0.9.6+ (to avoid all deprecation warnings)
|
||||||
|
- botocore (only if inferring aws params from boto)
|
||||||
|
- boto3 (only if using a boto profile)
|
||||||
|
description:
|
||||||
|
- Retrieve secrets from HashiCorp's Vault.
|
||||||
|
notes:
|
||||||
|
- Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
|
||||||
|
- As of community.general 0.2.0, only the latest version of a secret is returned when specifying a KV v2 path.
|
||||||
|
- As of community.general 0.2.0, all options can be supplied via term string (space delimited key=value pairs) or by parameters (see examples).
|
||||||
|
- As of community.general 0.2.0, when C(secret) is the first option in the term string, C(secret=) is not required (see examples).
|
||||||
|
options:
|
||||||
|
secret:
|
||||||
|
description: Vault path to the secret being requested in the format C(path[:field]).
|
||||||
|
required: True
|
||||||
|
token:
|
||||||
|
description:
|
||||||
|
- Vault token. If using token auth and no token is supplied, explicitly or through env, then the plugin will check
|
||||||
|
- for a token file, as determined by C(token_path) and C(token_file).
|
||||||
|
env:
|
||||||
|
- name: VAULT_TOKEN
|
||||||
|
token_path:
|
||||||
|
description: If no token is specified, will try to read the token file from this path.
|
||||||
|
env:
|
||||||
|
- name: VAULT_TOKEN_PATH
|
||||||
|
version_added: 1.2.0
|
||||||
|
ini:
|
||||||
|
- section: lookup_hashi_vault
|
||||||
|
key: token_path
|
||||||
|
version_added: '0.2.0'
|
||||||
|
token_file:
|
||||||
|
description: If no token is specified, will try to read the token from this file in C(token_path).
|
||||||
|
env:
|
||||||
|
- name: VAULT_TOKEN_FILE
|
||||||
|
version_added: 1.2.0
|
||||||
|
ini:
|
||||||
|
- section: lookup_hashi_vault
|
||||||
|
key: token_file
|
||||||
|
default: '.vault-token'
|
||||||
|
version_added: '0.2.0'
|
||||||
|
url:
|
||||||
|
description: URL to the Vault service.
|
||||||
|
env:
|
||||||
|
- name: VAULT_ADDR
|
||||||
|
ini:
|
||||||
|
- section: lookup_hashi_vault
|
||||||
|
key: url
|
||||||
|
version_added: '0.2.0'
|
||||||
|
default: 'http://127.0.0.1:8200'
|
||||||
|
username:
|
||||||
|
description: Authentication user name.
|
||||||
|
password:
|
||||||
|
description: Authentication password.
|
||||||
|
role_id:
|
||||||
|
description: Vault Role ID. Used in approle and aws_iam_login auth methods.
|
||||||
|
env:
|
||||||
|
- name: VAULT_ROLE_ID
|
||||||
|
ini:
|
||||||
|
- section: lookup_hashi_vault
|
||||||
|
key: role_id
|
||||||
|
version_added: '0.2.0'
|
||||||
|
secret_id:
|
||||||
|
description: Secret ID to be used for Vault AppRole authentication.
|
||||||
|
env:
|
||||||
|
- name: VAULT_SECRET_ID
|
||||||
|
auth_method:
|
||||||
|
description:
|
||||||
|
- Authentication method to be used.
|
||||||
|
- C(userpass) is added in Ansible 2.8.
|
||||||
|
- C(aws_iam_login) is added in community.general 0.2.0.
|
||||||
|
- C(jwt) is added in community.general 1.3.0.
|
||||||
|
env:
|
||||||
|
- name: VAULT_AUTH_METHOD
|
||||||
|
ini:
|
||||||
|
- section: lookup_hashi_vault
|
||||||
|
key: auth_method
|
||||||
|
version_added: '0.2.0'
|
||||||
|
choices:
|
||||||
|
- token
|
||||||
|
- userpass
|
||||||
|
- ldap
|
||||||
|
- approle
|
||||||
|
- aws_iam_login
|
||||||
|
- jwt
|
||||||
|
default: token
|
||||||
|
return_format:
|
||||||
|
description:
|
||||||
|
- Controls how multiple key/value pairs in a path are treated on return.
|
||||||
|
- C(dict) returns a single dict containing the key/value pairs (same behavior as before community.general 0.2.0).
|
||||||
|
- C(values) returns a list of all the values only. Use when you don't care about the keys.
|
||||||
|
- C(raw) returns the actual API result, which includes metadata and may have the data nested in other keys.
|
||||||
|
choices:
|
||||||
|
- dict
|
||||||
|
- values
|
||||||
|
- raw
|
||||||
|
default: dict
|
||||||
|
aliases: [ as ]
|
||||||
|
version_added: '0.2.0'
|
||||||
|
mount_point:
|
||||||
|
description: Vault mount point, only required if you have a custom mount point. Does not apply to token authentication.
|
||||||
|
jwt:
|
||||||
|
description: The JSON Web Token (JWT) to use for JWT authentication to Vault.
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_HASHI_VAULT_JWT
|
||||||
|
version_added: 1.3.0
|
||||||
|
ca_cert:
|
||||||
|
description: Path to certificate to use for authentication.
|
||||||
|
aliases: [ cacert ]
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
|
||||||
|
- Will be populated with the inverse of C(VAULT_SKIP_VERIFY) if that is set and I(validate_certs) is not explicitly
|
||||||
|
provided (added in community.general 1.3.0).
|
||||||
|
- Will default to C(true) if neither I(validate_certs) or C(VAULT_SKIP_VERIFY) are set.
|
||||||
|
type: boolean
|
||||||
|
namespace:
|
||||||
|
description:
|
||||||
|
- Vault namespace where secrets reside. This option requires HVAC 0.7.0+ and Vault 0.11+.
|
||||||
|
- Optionally, this may be achieved by prefixing the authentication mount point and/or secret path with the namespace
|
||||||
|
(e.g C(mynamespace/secret/mysecret)).
|
||||||
|
env:
|
||||||
|
- name: VAULT_NAMESPACE
|
||||||
|
version_added: 1.2.0
|
||||||
|
aws_profile:
|
||||||
|
description: The AWS profile
|
||||||
|
type: str
|
||||||
|
aliases: [ boto_profile ]
|
||||||
|
env:
|
||||||
|
- name: AWS_DEFAULT_PROFILE
|
||||||
|
- name: AWS_PROFILE
|
||||||
|
version_added: '0.2.0'
|
||||||
|
aws_access_key:
|
||||||
|
description: The AWS access key to use.
|
||||||
|
type: str
|
||||||
|
aliases: [ aws_access_key_id ]
|
||||||
|
env:
|
||||||
|
- name: EC2_ACCESS_KEY
|
||||||
|
- name: AWS_ACCESS_KEY
|
||||||
|
- name: AWS_ACCESS_KEY_ID
|
||||||
|
version_added: '0.2.0'
|
||||||
|
aws_secret_key:
|
||||||
|
description: The AWS secret key that corresponds to the access key.
|
||||||
|
type: str
|
||||||
|
aliases: [ aws_secret_access_key ]
|
||||||
|
env:
|
||||||
|
- name: EC2_SECRET_KEY
|
||||||
|
- name: AWS_SECRET_KEY
|
||||||
|
- name: AWS_SECRET_ACCESS_KEY
|
||||||
|
version_added: '0.2.0'
|
||||||
|
aws_security_token:
|
||||||
|
description: The AWS security token if using temporary access and secret keys.
|
||||||
|
type: str
|
||||||
|
env:
|
||||||
|
- name: EC2_SECURITY_TOKEN
|
||||||
|
- name: AWS_SESSION_TOKEN
|
||||||
|
- name: AWS_SECURITY_TOKEN
|
||||||
|
version_added: '0.2.0'
|
||||||
|
region:
|
||||||
|
description: The AWS region for which to create the connection.
|
||||||
|
type: str
|
||||||
|
env:
|
||||||
|
- name: EC2_REGION
|
||||||
|
- name: AWS_REGION
|
||||||
|
version_added: '0.2.0'
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
|
||||||
|
|
||||||
|
- name: Return all secrets from a path
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
|
||||||
|
|
||||||
|
- name: Vault that requires authentication via LDAP
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas') }}"
|
||||||
|
|
||||||
|
- name: Vault that requires authentication via username and password
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=psw url=http://myvault:8200') }}"
|
||||||
|
|
||||||
|
- name: Connect to Vault using TLS
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 validate_certs=False') }}"
|
||||||
|
|
||||||
|
- name: using certificate auth
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/hi:value token=xxxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem') }}"
|
||||||
|
|
||||||
|
- name: Authenticate with a Vault app role
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid') }}"
|
||||||
|
|
||||||
|
- name: Return all secrets from a path in a namespace
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 namespace=teama/admins') }}"
|
||||||
|
|
||||||
|
# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
|
||||||
|
# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
|
||||||
|
- name: Return latest KV v2 secret from path
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
|
||||||
|
|
||||||
|
# The following examples work in collection releases after community.general 0.2.0
|
||||||
|
|
||||||
|
- name: secret= is not required if secret is first
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello token=<token> url=http://myvault_url:8200') }}"
|
||||||
|
|
||||||
|
- name: options can be specified as parameters rather than put in term string
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello', token=my_token_var, url='http://myvault_url:8200') }}"
|
||||||
|
|
||||||
|
# return_format (or its alias 'as') can control how secrets are returned to you
|
||||||
|
- name: return secrets as a dict (default)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
my_secrets: "{{ lookup('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200') }}"
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: "{{ my_secrets['secret_key'] }}"
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: "Secret '{{ item.key }}' has value '{{ item.value }}'"
|
||||||
|
loop: "{{ my_secrets | dict2items }}"
|
||||||
|
|
||||||
|
- name: return secrets as values only
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "A secret value: {{ item }}"
|
||||||
|
loop: "{{ query('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200', return_format='values') }}"
|
||||||
|
|
||||||
|
- name: return raw secret from API, including metadata
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
my_secret: "{{ lookup('community.general.hashi_vault', 'secret/data/hello:value', token=my_token_var, url='http://myvault_url:8200', as='raw') }}"
|
||||||
|
- ansible.builtin.debug:
|
||||||
|
msg: "This is version {{ my_secret['metadata']['version'] }} of hello:value. The secret data is {{ my_secret['data']['data']['value'] }}"
|
||||||
|
|
||||||
|
# AWS IAM authentication method
|
||||||
|
# uses Ansible standard AWS options
|
||||||
|
|
||||||
|
- name: authenticate with aws_iam_login
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='aws_iam_login', role_id='myroleid', profile=my_boto_profile) }}"
|
||||||
|
|
||||||
|
# The following examples work in collection releases after community.general 1.3.0
|
||||||
|
|
||||||
|
- name: Authenticate with a JWT
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='jwt', role_id='myroleid', jwt='myjwt', url='https://myvault:8200')}}"
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = """
|
||||||
|
_raw:
|
||||||
|
description:
|
||||||
|
- secrets(s) requested
|
||||||
|
type: list
|
||||||
|
elements: dict
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
from ansible.module_utils.parsing.convert_bool import boolean
|
||||||
|
|
||||||
|
HAS_HVAC = False
|
||||||
|
try:
|
||||||
|
import hvac
|
||||||
|
HAS_HVAC = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_HVAC = False
|
||||||
|
|
||||||
|
HAS_BOTOCORE = False
|
||||||
|
try:
|
||||||
|
# import boto3
|
||||||
|
import botocore
|
||||||
|
HAS_BOTOCORE = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTOCORE = False
|
||||||
|
|
||||||
|
HAS_BOTO3 = False
|
||||||
|
try:
|
||||||
|
import boto3
|
||||||
|
# import botocore
|
||||||
|
HAS_BOTO3 = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTO3 = False
|
||||||
|
|
||||||
|
|
||||||
|
class HashiVault:
|
||||||
|
def get_options(self, *option_names, **kwargs):
|
||||||
|
ret = {}
|
||||||
|
include_falsey = kwargs.get('include_falsey', False)
|
||||||
|
for option in option_names:
|
||||||
|
val = self.options.get(option)
|
||||||
|
if val or include_falsey:
|
||||||
|
ret[option] = val
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.options = kwargs
|
||||||
|
|
||||||
|
# check early that auth method is actually available
|
||||||
|
self.auth_function = 'auth_' + self.options['auth_method']
|
||||||
|
if not (hasattr(self, self.auth_function) and callable(getattr(self, self.auth_function))):
|
||||||
|
raise AnsibleError(
|
||||||
|
"Authentication method '%s' is not implemented. ('%s' member function not found)" % (self.options['auth_method'], self.auth_function)
|
||||||
|
)
|
||||||
|
|
||||||
|
client_args = {
|
||||||
|
'url': self.options['url'],
|
||||||
|
'verify': self.options['ca_cert']
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.options.get('namespace'):
|
||||||
|
client_args['namespace'] = self.options['namespace']
|
||||||
|
|
||||||
|
# this is the only auth_method-specific thing here, because if we're using a token, we need it now
|
||||||
|
if self.options['auth_method'] == 'token':
|
||||||
|
client_args['token'] = self.options.get('token')
|
||||||
|
|
||||||
|
self.client = hvac.Client(**client_args)
|
||||||
|
|
||||||
|
# Check for old version, before auth_methods class (added in 0.7.0):
|
||||||
|
# https://github.com/hvac/hvac/releases/tag/v0.7.0
|
||||||
|
#
|
||||||
|
# hvac is moving auth methods into the auth_methods class
|
||||||
|
# which lives in the client.auth member.
|
||||||
|
#
|
||||||
|
# Attempting to find which backends were moved into the class when (this is primarily for warnings):
|
||||||
|
# 0.7.0 -- github, ldap, mfa, azure?, gcp
|
||||||
|
# 0.7.1 -- okta
|
||||||
|
# 0.8.0 -- kubernetes
|
||||||
|
# 0.9.0 -- azure?, radius
|
||||||
|
# 0.9.3 -- aws
|
||||||
|
# 0.9.6 -- userpass
|
||||||
|
self.hvac_has_auth_methods = hasattr(self.client, 'auth')
|
||||||
|
|
||||||
|
# We've already checked to ensure a method exists for a particular auth_method, of the form:
|
||||||
|
#
|
||||||
|
# auth_<method_name>
|
||||||
|
#
|
||||||
|
def authenticate(self):
|
||||||
|
getattr(self, self.auth_function)()
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
'''gets a secret. should always return a list'''
|
||||||
|
secret = self.options['secret']
|
||||||
|
field = self.options['secret_field']
|
||||||
|
return_as = self.options['return_format']
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = self.client.read(secret)
|
||||||
|
except hvac.exceptions.Forbidden:
|
||||||
|
raise AnsibleError("Forbidden: Permission Denied to secret '%s'." % secret)
|
||||||
|
|
||||||
|
if data is None:
|
||||||
|
raise AnsibleError("The secret '%s' doesn't seem to exist." % secret)
|
||||||
|
|
||||||
|
if return_as == 'raw':
|
||||||
|
return [data]
|
||||||
|
|
||||||
|
# Check response for KV v2 fields and flatten nested secret data.
|
||||||
|
# https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
|
||||||
|
try:
|
||||||
|
# sentinel field checks
|
||||||
|
check_dd = data['data']['data']
|
||||||
|
check_md = data['data']['metadata']
|
||||||
|
# unwrap nested data
|
||||||
|
data = data['data']
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if return_as == 'values':
|
||||||
|
return list(data['data'].values())
|
||||||
|
|
||||||
|
# everything after here implements return_as == 'dict'
|
||||||
|
if not field:
|
||||||
|
return [data['data']]
|
||||||
|
|
||||||
|
if field not in data['data']:
|
||||||
|
raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (secret, field))
|
||||||
|
|
||||||
|
return [data['data'][field]]
|
||||||
|
|
||||||
|
# begin auth implementation methods
|
||||||
|
#
|
||||||
|
# To add new backends, 3 things should be added:
|
||||||
|
#
|
||||||
|
# 1. Add a new validate_auth_<method_name> method to the LookupModule, which is responsible for validating
|
||||||
|
# that it has the necessary options and whatever else it needs.
|
||||||
|
#
|
||||||
|
# 2. Add a new auth_<method_name> method to this class. These implementations are faily minimal as they should
|
||||||
|
# already have everything they need. This is also the place to check for deprecated auth methods as hvac
|
||||||
|
# continues to move backends into the auth_methods class.
|
||||||
|
#
|
||||||
|
# 3. Update the avail_auth_methods list in the LookupModules auth_methods() method (for now this is static).
|
||||||
|
#
|
||||||
|
def auth_token(self):
|
||||||
|
if not self.client.is_authenticated():
|
||||||
|
raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup.")
|
||||||
|
|
||||||
|
def auth_userpass(self):
|
||||||
|
params = self.get_options('username', 'password', 'mount_point')
|
||||||
|
if self.hvac_has_auth_methods and hasattr(self.client.auth.userpass, 'login'):
|
||||||
|
self.client.auth.userpass.login(**params)
|
||||||
|
else:
|
||||||
|
Display().warning("HVAC should be updated to version 0.9.6 or higher. Deprecated method 'auth_userpass' will be used.")
|
||||||
|
self.client.auth_userpass(**params)
|
||||||
|
|
||||||
|
def auth_ldap(self):
|
||||||
|
params = self.get_options('username', 'password', 'mount_point')
|
||||||
|
# not hasattr(self.client, 'auth')
|
||||||
|
if self.hvac_has_auth_methods and hasattr(self.client.auth.ldap, 'login'):
|
||||||
|
self.client.auth.ldap.login(**params)
|
||||||
|
else:
|
||||||
|
Display().warning("HVAC should be updated to version 0.7.0 or higher. Deprecated method 'auth_ldap' will be used.")
|
||||||
|
self.client.auth_ldap(**params)
|
||||||
|
|
||||||
|
def auth_approle(self):
|
||||||
|
params = self.get_options('role_id', 'secret_id', 'mount_point')
|
||||||
|
self.client.auth_approle(**params)
|
||||||
|
|
||||||
|
def auth_aws_iam_login(self):
|
||||||
|
params = self.options['iam_login_credentials']
|
||||||
|
if self.hvac_has_auth_methods and hasattr(self.client.auth.aws, 'iam_login'):
|
||||||
|
self.client.auth.aws.iam_login(**params)
|
||||||
|
else:
|
||||||
|
Display().warning("HVAC should be updated to version 0.9.3 or higher. Deprecated method 'auth_aws_iam' will be used.")
|
||||||
|
self.client.auth_aws_iam(**params)
|
||||||
|
|
||||||
|
def auth_jwt(self):
|
||||||
|
params = self.get_options('role_id', 'jwt', 'mount_point')
|
||||||
|
params['role'] = params.pop('role_id')
|
||||||
|
if self.hvac_has_auth_methods and hasattr(self.client.auth, 'jwt') and hasattr(self.client.auth.jwt, 'jwt_login'):
|
||||||
|
response = self.client.auth.jwt.jwt_login(**params)
|
||||||
|
# must manually set the client token with JWT login
|
||||||
|
# see https://github.com/hvac/hvac/issues/644
|
||||||
|
self.client.token = response['auth']['client_token']
|
||||||
|
else:
|
||||||
|
raise AnsibleError("JWT authentication requires HVAC version 0.10.5 or higher.")
|
||||||
|
|
||||||
|
# end auth implementation methods
|
||||||
|
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
if not HAS_HVAC:
|
||||||
|
raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
for term in terms:
|
||||||
|
opts = kwargs.copy()
|
||||||
|
opts.update(self.parse_term(term))
|
||||||
|
self.set_options(direct=opts)
|
||||||
|
self.process_options()
|
||||||
|
# FUTURE: Create one object, authenticate once, and re-use it,
|
||||||
|
# for gets, for better use during with_ loops.
|
||||||
|
client = HashiVault(**self._options)
|
||||||
|
client.authenticate()
|
||||||
|
ret.extend(client.get())
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def parse_term(self, term):
|
||||||
|
'''parses a term string into options'''
|
||||||
|
param_dict = {}
|
||||||
|
|
||||||
|
for i, param in enumerate(term.split()):
|
||||||
|
try:
|
||||||
|
key, value = param.split('=', 1)
|
||||||
|
except ValueError:
|
||||||
|
if (i == 0):
|
||||||
|
# allow secret to be specified as value only if it's first
|
||||||
|
key = 'secret'
|
||||||
|
value = param
|
||||||
|
else:
|
||||||
|
raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % term)
|
||||||
|
param_dict[key] = value
|
||||||
|
return param_dict
|
||||||
|
|
||||||
|
def process_options(self):
|
||||||
|
'''performs deep validation and value loading for options'''
|
||||||
|
|
||||||
|
# ca_cert to verify
|
||||||
|
self.boolean_or_cacert()
|
||||||
|
|
||||||
|
# auth methods
|
||||||
|
self.auth_methods()
|
||||||
|
|
||||||
|
# secret field splitter
|
||||||
|
self.field_ops()
|
||||||
|
|
||||||
|
# begin options processing methods
|
||||||
|
|
||||||
|
def boolean_or_cacert(self):
|
||||||
|
# This is needed because of this (https://hvac.readthedocs.io/en/stable/source/hvac_v1.html):
|
||||||
|
#
|
||||||
|
# # verify (Union[bool,str]) - Either a boolean to indicate whether TLS verification should
|
||||||
|
# # be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification.
|
||||||
|
#
|
||||||
|
'''' return a bool or cacert '''
|
||||||
|
ca_cert = self.get_option('ca_cert')
|
||||||
|
|
||||||
|
validate_certs = self.get_option('validate_certs')
|
||||||
|
|
||||||
|
if validate_certs is None:
|
||||||
|
# Validate certs option was not explicitly set
|
||||||
|
|
||||||
|
# Check if VAULT_SKIP_VERIFY is set
|
||||||
|
vault_skip_verify = os.environ.get('VAULT_SKIP_VERIFY')
|
||||||
|
|
||||||
|
if vault_skip_verify is not None:
|
||||||
|
# VAULT_SKIP_VERIFY is set
|
||||||
|
try:
|
||||||
|
# Check that we have a boolean value
|
||||||
|
vault_skip_verify = boolean(vault_skip_verify)
|
||||||
|
# Use the inverse of VAULT_SKIP_VERIFY
|
||||||
|
validate_certs = not vault_skip_verify
|
||||||
|
except TypeError:
|
||||||
|
# Not a boolean value fallback to default value (True)
|
||||||
|
validate_certs = True
|
||||||
|
else:
|
||||||
|
validate_certs = True
|
||||||
|
|
||||||
|
if not (validate_certs and ca_cert):
|
||||||
|
self.set_option('ca_cert', validate_certs)
|
||||||
|
|
||||||
|
def field_ops(self):
|
||||||
|
# split secret and field
|
||||||
|
secret = self.get_option('secret')
|
||||||
|
|
||||||
|
s_f = secret.rsplit(':', 1)
|
||||||
|
self.set_option('secret', s_f[0])
|
||||||
|
if len(s_f) >= 2:
|
||||||
|
field = s_f[1]
|
||||||
|
else:
|
||||||
|
field = None
|
||||||
|
self.set_option('secret_field', field)
|
||||||
|
|
||||||
|
def auth_methods(self):
|
||||||
|
# enforce and set the list of available auth methods
|
||||||
|
# TODO: can this be read from the choices: field in documentation?
|
||||||
|
avail_auth_methods = ['token', 'approle', 'userpass', 'ldap', 'aws_iam_login', 'jwt']
|
||||||
|
self.set_option('avail_auth_methods', avail_auth_methods)
|
||||||
|
auth_method = self.get_option('auth_method')
|
||||||
|
|
||||||
|
if auth_method not in avail_auth_methods:
|
||||||
|
raise AnsibleError(
|
||||||
|
"Authentication method '%s' not supported. Available options are %r" % (auth_method, avail_auth_methods)
|
||||||
|
)
|
||||||
|
|
||||||
|
# run validator if available
|
||||||
|
auth_validator = 'validate_auth_' + auth_method
|
||||||
|
if hasattr(self, auth_validator) and callable(getattr(self, auth_validator)):
|
||||||
|
getattr(self, auth_validator)(auth_method)
|
||||||
|
|
||||||
|
# end options processing methods
|
||||||
|
|
||||||
|
# begin auth method validators
|
||||||
|
|
||||||
|
def validate_by_required_fields(self, auth_method, *field_names):
|
||||||
|
missing = [field for field in field_names if not self.get_option(field)]
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
raise AnsibleError("Authentication method %s requires options %r to be set, but these are missing: %r" % (auth_method, field_names, missing))
|
||||||
|
|
||||||
|
def validate_auth_userpass(self, auth_method):
|
||||||
|
self.validate_by_required_fields(auth_method, 'username', 'password')
|
||||||
|
|
||||||
|
def validate_auth_ldap(self, auth_method):
|
||||||
|
self.validate_by_required_fields(auth_method, 'username', 'password')
|
||||||
|
|
||||||
|
def validate_auth_approle(self, auth_method):
|
||||||
|
self.validate_by_required_fields(auth_method, 'role_id')
|
||||||
|
|
||||||
|
def validate_auth_token(self, auth_method):
|
||||||
|
if auth_method == 'token':
|
||||||
|
if not self.get_option('token_path'):
|
||||||
|
# generally we want env vars defined in the spec, but in this case we want
|
||||||
|
# the env var HOME to have lower precedence than any other value source,
|
||||||
|
# including ini, so we're doing it here after all other processing has taken place
|
||||||
|
self.set_option('token_path', os.environ.get('HOME'))
|
||||||
|
if not self.get_option('token') and self.get_option('token_path'):
|
||||||
|
token_filename = os.path.join(
|
||||||
|
self.get_option('token_path'),
|
||||||
|
self.get_option('token_file')
|
||||||
|
)
|
||||||
|
if os.path.exists(token_filename):
|
||||||
|
with open(token_filename) as token_file:
|
||||||
|
self.set_option('token', token_file.read().strip())
|
||||||
|
|
||||||
|
if not self.get_option('token'):
|
||||||
|
raise AnsibleError("No Vault Token specified or discovered.")
|
||||||
|
|
||||||
|
def validate_auth_aws_iam_login(self, auth_method):
|
||||||
|
params = {
|
||||||
|
'access_key': self.get_option('aws_access_key'),
|
||||||
|
'secret_key': self.get_option('aws_secret_key')
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.get_option('role_id'):
|
||||||
|
params['role'] = self.get_option('role_id')
|
||||||
|
|
||||||
|
if self.get_option('region'):
|
||||||
|
params['region'] = self.get_option('region')
|
||||||
|
|
||||||
|
if not (params['access_key'] and params['secret_key']):
|
||||||
|
profile = self.get_option('aws_profile')
|
||||||
|
if profile:
|
||||||
|
# try to load boto profile
|
||||||
|
if not HAS_BOTO3:
|
||||||
|
raise AnsibleError("boto3 is required for loading a boto profile.")
|
||||||
|
session_credentials = boto3.session.Session(profile_name=profile).get_credentials()
|
||||||
|
else:
|
||||||
|
# try to load from IAM credentials
|
||||||
|
if not HAS_BOTOCORE:
|
||||||
|
raise AnsibleError("botocore is required for loading IAM role credentials.")
|
||||||
|
session_credentials = botocore.session.get_session().get_credentials()
|
||||||
|
|
||||||
|
if not session_credentials:
|
||||||
|
raise AnsibleError("No AWS credentials supplied or available.")
|
||||||
|
|
||||||
|
params['access_key'] = session_credentials.access_key
|
||||||
|
params['secret_key'] = session_credentials.secret_key
|
||||||
|
if session_credentials.token:
|
||||||
|
params['session_token'] = session_credentials.token
|
||||||
|
|
||||||
|
self.set_option('iam_login_credentials', params)
|
||||||
|
|
||||||
|
def validate_auth_jwt(self, auth_method):
|
||||||
|
self.validate_by_required_fields(auth_method, 'role_id', 'jwt')
|
||||||
|
|
||||||
|
# end auth method validators
|
||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author:
|
author:
|
||||||
- Juan Manuel Parrilla (@jparrill)
|
- Juan Manuel Parrilla (@jparrill)
|
||||||
name: hiera
|
lookup: hiera
|
||||||
short_description: get info from hiera data
|
short_description: get info from hiera data
|
||||||
requirements:
|
requirements:
|
||||||
- hiera (command line utility)
|
- hiera (command line utility)
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: keyring
|
lookup: keyring
|
||||||
author:
|
author:
|
||||||
- Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
|
- Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: lastpass
|
lookup: lastpass
|
||||||
author:
|
author:
|
||||||
- Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
|
- Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: lmdb_kv
|
lookup: lmdb_kv
|
||||||
author:
|
author:
|
||||||
- Jan-Piet Mens (@jpmens)
|
- Jan-Piet Mens (@jpmens)
|
||||||
version_added: '0.2.0'
|
version_added: '0.2.0'
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author:
|
author:
|
||||||
- Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
|
- Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
|
||||||
name: manifold
|
lookup: manifold
|
||||||
short_description: get credentials from Manifold.co
|
short_description: get credentials from Manifold.co
|
||||||
description:
|
description:
|
||||||
- Retrieves resources' credentials from Manifold.co
|
- Retrieves resources' credentials from Manifold.co
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: nios
|
lookup: nios
|
||||||
short_description: Query Infoblox NIOS objects
|
short_description: Query Infoblox NIOS objects
|
||||||
description:
|
description:
|
||||||
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
|
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: nios_next_ip
|
lookup: nios_next_ip
|
||||||
short_description: Return the next available IP address for a network
|
short_description: Return the next available IP address for a network
|
||||||
description:
|
description:
|
||||||
- Uses the Infoblox WAPI API to return the next available IP addresses
|
- Uses the Infoblox WAPI API to return the next available IP addresses
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: nios_next_network
|
lookup: nios_next_network
|
||||||
short_description: Return the next available network range for a network-container
|
short_description: Return the next available network range for a network-container
|
||||||
description:
|
description:
|
||||||
- Uses the Infoblox WAPI API to return the next available network addresses for
|
- Uses the Infoblox WAPI API to return the next available network addresses for
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user