mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-30 10:26:52 +00:00
Compare commits
139 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f0c1b1065a | ||
|
|
a44356c966 | ||
|
|
33f9f0b05f | ||
|
|
f0f0704d64 | ||
|
|
55fe140230 | ||
|
|
ac543f5ef0 | ||
|
|
dbc0fe8859 | ||
|
|
42a1318fe3 | ||
|
|
d25352dc06 | ||
|
|
55682c52df | ||
|
|
46781d9fd1 | ||
|
|
4545d1c91e | ||
|
|
6570dfeb7d | ||
|
|
94c368f7df | ||
|
|
4cba1e60d9 | ||
|
|
321fb6c974 | ||
|
|
eb4d7a4199 | ||
|
|
4b07d45b7e | ||
|
|
d4a33433b4 | ||
|
|
e30b91cb8d | ||
|
|
b2b65c431b | ||
|
|
9ade4f6dd6 | ||
|
|
635d4f2138 | ||
|
|
6549e41ab8 | ||
|
|
6faface39e | ||
|
|
3b893ec421 | ||
|
|
65805e2dd6 | ||
|
|
297b50fb96 | ||
|
|
2edadb42fb | ||
|
|
4e1bf2d4ba | ||
|
|
b1a4a0ff21 | ||
|
|
e74ea7c8b8 | ||
|
|
6590f5e082 | ||
|
|
7483f71d31 | ||
|
|
6b215e3a9c | ||
|
|
3723e458d3 | ||
|
|
0f8bb43723 | ||
|
|
f33530dd61 | ||
|
|
8f3043058e | ||
|
|
3987b8a291 | ||
|
|
f7403a0b34 | ||
|
|
0a676406b3 | ||
|
|
5a7d234d80 | ||
|
|
fb9730f75e | ||
|
|
928aeafe1d | ||
|
|
5b68665571 | ||
|
|
e6b84acd1e | ||
|
|
c242993291 | ||
|
|
4f3de5658e | ||
|
|
301fcc3b7e | ||
|
|
0f0e9b2dca | ||
|
|
ed0636dc27 | ||
|
|
057321c6c6 | ||
|
|
1a4814de53 | ||
|
|
89b67a014b | ||
|
|
57bfbdc407 | ||
|
|
e19dffbf29 | ||
|
|
113e7cdfa0 | ||
|
|
c12be67a69 | ||
|
|
3a076fd585 | ||
|
|
4ef05a6483 | ||
|
|
936dd28395 | ||
|
|
e3b47899c5 | ||
|
|
fd8193e0bd | ||
|
|
fa477ebb35 | ||
|
|
43e766dd44 | ||
|
|
b25e0f360c | ||
|
|
658e95c5ca | ||
|
|
26c2876f50 | ||
|
|
62043463f3 | ||
|
|
f1dab6d4a7 | ||
|
|
d43764da79 | ||
|
|
de2feb2567 | ||
|
|
6e56bae0f3 | ||
|
|
1f7047e725 | ||
|
|
b2e4485567 | ||
|
|
b78254fe24 | ||
|
|
38aa0ec8ad | ||
|
|
42f28048a8 | ||
|
|
b699aaff7b | ||
|
|
af85b6c203 | ||
|
|
ec2e7cad3e | ||
|
|
7753fa4219 | ||
|
|
69ea487005 | ||
|
|
048f15fe68 | ||
|
|
aa1aa1d540 | ||
|
|
e78517ca93 | ||
|
|
bf185573a6 | ||
|
|
145435cdd9 | ||
|
|
6013c77c2b | ||
|
|
ad5482f63d | ||
|
|
f5594aefd5 | ||
|
|
ab5b379b30 | ||
|
|
1c5e44c649 | ||
|
|
23da67cc72 | ||
|
|
4032dd6b08 | ||
|
|
4cb6f39a80 | ||
|
|
3539957bac | ||
|
|
e05769d4bf | ||
|
|
19c03cff96 | ||
|
|
703660c81d | ||
|
|
fd32af1ac3 | ||
|
|
80fbcf2f98 | ||
|
|
a722e038cc | ||
|
|
19c8d2164d | ||
|
|
d4656ffca2 | ||
|
|
b49607f12d | ||
|
|
af0ce4284f | ||
|
|
f5f862617a | ||
|
|
a1a4ba4337 | ||
|
|
b0b783f8ff | ||
|
|
e670ca666a | ||
|
|
49b991527e | ||
|
|
e6cc671a0d | ||
|
|
797ea23e50 | ||
|
|
4d23b7a48b | ||
|
|
020b47a1a9 | ||
|
|
0da9d956a0 | ||
|
|
5691e3aff3 | ||
|
|
007333dbfe | ||
|
|
05666b0e4d | ||
|
|
c934d9aeb5 | ||
|
|
5b15e4089a | ||
|
|
a6379e45ce | ||
|
|
b95176dbc8 | ||
|
|
b752fea121 | ||
|
|
cf50990fed | ||
|
|
45343e6bc0 | ||
|
|
51540f6345 | ||
|
|
74eba52028 | ||
|
|
b920e8abf2 | ||
|
|
75c0004e1e | ||
|
|
be42fd4af7 | ||
|
|
1c05908ff6 | ||
|
|
ea42b75378 | ||
|
|
0330f4b52c | ||
|
|
1d8c659ba2 | ||
|
|
e784254679 | ||
|
|
d5e1edd284 |
@@ -24,15 +24,14 @@ schedules:
|
|||||||
always: true
|
always: true
|
||||||
branches:
|
branches:
|
||||||
include:
|
include:
|
||||||
|
- stable-2
|
||||||
- stable-3
|
- stable-3
|
||||||
- stable-4
|
|
||||||
- cron: 0 11 * * 0
|
- cron: 0 11 * * 0
|
||||||
displayName: Weekly (old stable branches)
|
displayName: Weekly (old stable branches)
|
||||||
always: true
|
always: true
|
||||||
branches:
|
branches:
|
||||||
include:
|
include:
|
||||||
- stable-1
|
- stable-1
|
||||||
- stable-2
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
- name: checkoutPath
|
- name: checkoutPath
|
||||||
@@ -69,19 +68,6 @@ stages:
|
|||||||
- test: 3
|
- test: 3
|
||||||
- test: 4
|
- test: 4
|
||||||
- test: extra
|
- test: extra
|
||||||
- stage: Sanity_2_12
|
|
||||||
displayName: Sanity 2.12
|
|
||||||
dependsOn: []
|
|
||||||
jobs:
|
|
||||||
- template: templates/matrix.yml
|
|
||||||
parameters:
|
|
||||||
nameFormat: Test {0}
|
|
||||||
testFormat: 2.12/sanity/{0}
|
|
||||||
targets:
|
|
||||||
- test: 1
|
|
||||||
- test: 2
|
|
||||||
- test: 3
|
|
||||||
- test: 4
|
|
||||||
- stage: Sanity_2_11
|
- stage: Sanity_2_11
|
||||||
displayName: Sanity 2.11
|
displayName: Sanity 2.11
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -130,22 +116,6 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: devel/units/{0}/1
|
testFormat: devel/units/{0}/1
|
||||||
targets:
|
|
||||||
- test: 2.7
|
|
||||||
- test: 3.5
|
|
||||||
- test: 3.6
|
|
||||||
- test: 3.7
|
|
||||||
- test: 3.8
|
|
||||||
- test: 3.9
|
|
||||||
- test: '3.10'
|
|
||||||
- stage: Units_2_12
|
|
||||||
displayName: Units 2.12
|
|
||||||
dependsOn: []
|
|
||||||
jobs:
|
|
||||||
- template: templates/matrix.yml
|
|
||||||
parameters:
|
|
||||||
nameFormat: Python {0}
|
|
||||||
testFormat: 2.12/units/{0}/1
|
|
||||||
targets:
|
targets:
|
||||||
- test: 2.6
|
- test: 2.6
|
||||||
- test: 2.7
|
- test: 2.7
|
||||||
@@ -153,6 +123,7 @@ stages:
|
|||||||
- test: 3.6
|
- test: 3.6
|
||||||
- test: 3.7
|
- test: 3.7
|
||||||
- test: 3.8
|
- test: 3.8
|
||||||
|
- test: 3.9
|
||||||
- test: '3.10'
|
- test: '3.10'
|
||||||
- stage: Units_2_11
|
- stage: Units_2_11
|
||||||
displayName: Units 2.11
|
displayName: Units 2.11
|
||||||
@@ -179,8 +150,13 @@ stages:
|
|||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.10/units/{0}/1
|
testFormat: 2.10/units/{0}/1
|
||||||
targets:
|
targets:
|
||||||
|
- test: 2.6
|
||||||
- test: 2.7
|
- test: 2.7
|
||||||
|
- test: 3.5
|
||||||
- test: 3.6
|
- test: 3.6
|
||||||
|
- test: 3.7
|
||||||
|
- test: 3.8
|
||||||
|
- test: 3.9
|
||||||
- stage: Units_2_9
|
- stage: Units_2_9
|
||||||
displayName: Units 2.9
|
displayName: Units 2.9
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -210,8 +186,8 @@ stages:
|
|||||||
test: macos/11.1
|
test: macos/11.1
|
||||||
- name: RHEL 7.9
|
- name: RHEL 7.9
|
||||||
test: rhel/7.9
|
test: rhel/7.9
|
||||||
- name: RHEL 8.5
|
- name: RHEL 8.3
|
||||||
test: rhel/8.5
|
test: rhel/8.3
|
||||||
- name: FreeBSD 12.2
|
- name: FreeBSD 12.2
|
||||||
test: freebsd/12.2
|
test: freebsd/12.2
|
||||||
- name: FreeBSD 13.0
|
- name: FreeBSD 13.0
|
||||||
@@ -220,23 +196,6 @@ stages:
|
|||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- stage: Remote_2_12
|
|
||||||
displayName: Remote 2.12
|
|
||||||
dependsOn: []
|
|
||||||
jobs:
|
|
||||||
- template: templates/matrix.yml
|
|
||||||
parameters:
|
|
||||||
testFormat: 2.12/{0}
|
|
||||||
targets:
|
|
||||||
- name: macOS 11.1
|
|
||||||
test: macos/11.1
|
|
||||||
- name: RHEL 8.4
|
|
||||||
test: rhel/8.4
|
|
||||||
- name: FreeBSD 13.0
|
|
||||||
test: freebsd/13.0
|
|
||||||
groups:
|
|
||||||
- 1
|
|
||||||
- 2
|
|
||||||
- stage: Remote_2_11
|
- stage: Remote_2_11
|
||||||
displayName: Remote 2.11
|
displayName: Remote 2.11
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -245,6 +204,8 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.11/{0}
|
testFormat: 2.11/{0}
|
||||||
targets:
|
targets:
|
||||||
|
- name: macOS 11.1
|
||||||
|
test: macos/11.1
|
||||||
- name: RHEL 7.9
|
- name: RHEL 7.9
|
||||||
test: rhel/7.9
|
test: rhel/7.9
|
||||||
- name: RHEL 8.3
|
- name: RHEL 8.3
|
||||||
@@ -266,6 +227,14 @@ stages:
|
|||||||
test: osx/10.11
|
test: osx/10.11
|
||||||
- name: macOS 10.15
|
- name: macOS 10.15
|
||||||
test: macos/10.15
|
test: macos/10.15
|
||||||
|
- name: macOS 11.1
|
||||||
|
test: macos/11.1
|
||||||
|
- name: RHEL 7.8
|
||||||
|
test: rhel/7.8
|
||||||
|
- name: RHEL 8.2
|
||||||
|
test: rhel/8.2
|
||||||
|
- name: FreeBSD 12.1
|
||||||
|
test: freebsd/12.1
|
||||||
groups:
|
groups:
|
||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
@@ -279,8 +248,6 @@ stages:
|
|||||||
targets:
|
targets:
|
||||||
- name: RHEL 8.2
|
- name: RHEL 8.2
|
||||||
test: rhel/8.2
|
test: rhel/8.2
|
||||||
- name: RHEL 7.8
|
|
||||||
test: rhel/7.8
|
|
||||||
- name: FreeBSD 12.0
|
- name: FreeBSD 12.0
|
||||||
test: freebsd/12.0
|
test: freebsd/12.0
|
||||||
groups:
|
groups:
|
||||||
@@ -296,12 +263,16 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: devel/linux/{0}
|
testFormat: devel/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
|
- name: CentOS 6
|
||||||
|
test: centos6
|
||||||
- name: CentOS 7
|
- name: CentOS 7
|
||||||
test: centos7
|
test: centos7
|
||||||
|
- name: CentOS 8
|
||||||
|
test: centos8
|
||||||
|
- name: Fedora 33
|
||||||
|
test: fedora33
|
||||||
- name: Fedora 34
|
- name: Fedora 34
|
||||||
test: fedora34
|
test: fedora34
|
||||||
- name: Fedora 35
|
|
||||||
test: fedora35
|
|
||||||
- name: openSUSE 15 py2
|
- name: openSUSE 15 py2
|
||||||
test: opensuse15py2
|
test: opensuse15py2
|
||||||
- name: openSUSE 15 py3
|
- name: openSUSE 15 py3
|
||||||
@@ -314,28 +285,6 @@ stages:
|
|||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- stage: Docker_2_12
|
|
||||||
displayName: Docker 2.12
|
|
||||||
dependsOn: []
|
|
||||||
jobs:
|
|
||||||
- template: templates/matrix.yml
|
|
||||||
parameters:
|
|
||||||
testFormat: 2.12/linux/{0}
|
|
||||||
targets:
|
|
||||||
- name: CentOS 6
|
|
||||||
test: centos6
|
|
||||||
- name: CentOS 8
|
|
||||||
test: centos8
|
|
||||||
- name: Fedora 34
|
|
||||||
test: fedora34
|
|
||||||
- name: openSUSE 15 py3
|
|
||||||
test: opensuse15
|
|
||||||
- name: Ubuntu 20.04
|
|
||||||
test: ubuntu2004
|
|
||||||
groups:
|
|
||||||
- 1
|
|
||||||
- 2
|
|
||||||
- 3
|
|
||||||
- stage: Docker_2_11
|
- stage: Docker_2_11
|
||||||
displayName: Docker 2.11
|
displayName: Docker 2.11
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -344,12 +293,14 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.11/linux/{0}
|
testFormat: 2.11/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: CentOS 7
|
- name: CentOS 8
|
||||||
test: centos7
|
test: centos8
|
||||||
- name: Fedora 33
|
- name: Fedora 33
|
||||||
test: fedora33
|
test: fedora33
|
||||||
- name: openSUSE 15 py2
|
- name: openSUSE 15 py3
|
||||||
test: opensuse15py2
|
test: opensuse15
|
||||||
|
- name: Ubuntu 20.04
|
||||||
|
test: ubuntu2004
|
||||||
groups:
|
groups:
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
@@ -361,8 +312,12 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.10/linux/{0}
|
testFormat: 2.10/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
|
- name: CentOS 8
|
||||||
|
test: centos8
|
||||||
- name: Fedora 32
|
- name: Fedora 32
|
||||||
test: fedora32
|
test: fedora32
|
||||||
|
- name: openSUSE 15 py3
|
||||||
|
test: opensuse15
|
||||||
- name: Ubuntu 16.04
|
- name: Ubuntu 16.04
|
||||||
test: ubuntu1604
|
test: ubuntu1604
|
||||||
groups:
|
groups:
|
||||||
@@ -376,6 +331,8 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.9/linux/{0}
|
testFormat: 2.9/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
|
- name: CentOS 8
|
||||||
|
test: centos8
|
||||||
- name: Fedora 31
|
- name: Fedora 31
|
||||||
test: fedora31
|
test: fedora31
|
||||||
- name: openSUSE 15 py3
|
- name: openSUSE 15 py3
|
||||||
@@ -393,17 +350,6 @@ stages:
|
|||||||
parameters:
|
parameters:
|
||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: devel/cloud/{0}/1
|
testFormat: devel/cloud/{0}/1
|
||||||
targets:
|
|
||||||
- test: 2.7
|
|
||||||
- test: 3.9
|
|
||||||
- stage: Cloud_2_12
|
|
||||||
displayName: Cloud 2.12
|
|
||||||
dependsOn: []
|
|
||||||
jobs:
|
|
||||||
- template: templates/matrix.yml
|
|
||||||
parameters:
|
|
||||||
nameFormat: Python {0}
|
|
||||||
testFormat: 2.12/cloud/{0}/1
|
|
||||||
targets:
|
targets:
|
||||||
- test: 3.8
|
- test: 3.8
|
||||||
- stage: Cloud_2_11
|
- stage: Cloud_2_11
|
||||||
@@ -415,6 +361,7 @@ stages:
|
|||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.11/cloud/{0}/1
|
testFormat: 2.11/cloud/{0}/1
|
||||||
targets:
|
targets:
|
||||||
|
- test: 2.7
|
||||||
- test: 3.6
|
- test: 3.6
|
||||||
- stage: Cloud_2_10
|
- stage: Cloud_2_10
|
||||||
displayName: Cloud 2.10
|
displayName: Cloud 2.10
|
||||||
@@ -425,7 +372,7 @@ stages:
|
|||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.10/cloud/{0}/1
|
testFormat: 2.10/cloud/{0}/1
|
||||||
targets:
|
targets:
|
||||||
- test: 3.5
|
- test: 3.6
|
||||||
- stage: Cloud_2_9
|
- stage: Cloud_2_9
|
||||||
displayName: Cloud 2.9
|
displayName: Cloud 2.9
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -435,7 +382,7 @@ stages:
|
|||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.9/cloud/{0}/1
|
testFormat: 2.9/cloud/{0}/1
|
||||||
targets:
|
targets:
|
||||||
- test: 2.7
|
- test: 3.6
|
||||||
- stage: Summary
|
- stage: Summary
|
||||||
condition: succeededOrFailed()
|
condition: succeededOrFailed()
|
||||||
dependsOn:
|
dependsOn:
|
||||||
@@ -443,26 +390,21 @@ stages:
|
|||||||
- Sanity_2_9
|
- Sanity_2_9
|
||||||
- Sanity_2_10
|
- Sanity_2_10
|
||||||
- Sanity_2_11
|
- Sanity_2_11
|
||||||
- Sanity_2_12
|
|
||||||
- Units_devel
|
- Units_devel
|
||||||
- Units_2_9
|
- Units_2_9
|
||||||
- Units_2_10
|
- Units_2_10
|
||||||
- Units_2_11
|
- Units_2_11
|
||||||
- Units_2_12
|
|
||||||
- Remote_devel
|
- Remote_devel
|
||||||
- Remote_2_9
|
- Remote_2_9
|
||||||
- Remote_2_10
|
- Remote_2_10
|
||||||
- Remote_2_11
|
- Remote_2_11
|
||||||
- Remote_2_12
|
|
||||||
- Docker_devel
|
- Docker_devel
|
||||||
- Docker_2_9
|
- Docker_2_9
|
||||||
- Docker_2_10
|
- Docker_2_10
|
||||||
- Docker_2_11
|
- Docker_2_11
|
||||||
- Docker_2_12
|
|
||||||
- Cloud_devel
|
- Cloud_devel
|
||||||
- Cloud_2_9
|
- Cloud_2_9
|
||||||
- Cloud_2_10
|
- Cloud_2_10
|
||||||
- Cloud_2_11
|
- Cloud_2_11
|
||||||
- Cloud_2_12
|
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/coverage.yml
|
- template: templates/coverage.yml
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ mkdir "${agent_temp_directory}/coverage/"
|
|||||||
|
|
||||||
options=(--venv --venv-system-site-packages --color -v)
|
options=(--venv --venv-system-site-packages --color -v)
|
||||||
|
|
||||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||||
|
|
||||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||||
|
|||||||
@@ -1,101 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""
|
|
||||||
Upload code coverage reports to codecov.io.
|
|
||||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
|
||||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import dataclasses
|
|
||||||
import pathlib
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
import typing as t
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass(frozen=True)
|
|
||||||
class CoverageFile:
|
|
||||||
name: str
|
|
||||||
path: pathlib.Path
|
|
||||||
flags: t.List[str]
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass(frozen=True)
|
|
||||||
class Args:
|
|
||||||
dry_run: bool
|
|
||||||
path: pathlib.Path
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args() -> Args:
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
|
||||||
parser.add_argument('path', type=pathlib.Path)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Store arguments in a typed dataclass
|
|
||||||
fields = dataclasses.fields(Args)
|
|
||||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
|
||||||
|
|
||||||
return Args(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
|
||||||
processed = []
|
|
||||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
|
||||||
name = file.stem.replace('coverage=', '')
|
|
||||||
|
|
||||||
# Get flags from name
|
|
||||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
|
||||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
|
||||||
|
|
||||||
processed.append(CoverageFile(name, file, flags))
|
|
||||||
|
|
||||||
return tuple(processed)
|
|
||||||
|
|
||||||
|
|
||||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
|
||||||
for file in files:
|
|
||||||
cmd = [
|
|
||||||
str(codecov_bin),
|
|
||||||
'--name', file.name,
|
|
||||||
'--file', str(file.path),
|
|
||||||
]
|
|
||||||
for flag in file.flags:
|
|
||||||
cmd.extend(['--flags', flag])
|
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
print(f'DRY-RUN: Would run command: {cmd}')
|
|
||||||
continue
|
|
||||||
|
|
||||||
subprocess.run(cmd, check=True)
|
|
||||||
|
|
||||||
|
|
||||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
|
||||||
if dry_run:
|
|
||||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
|
||||||
return
|
|
||||||
|
|
||||||
with urllib.request.urlopen(url) as resp:
|
|
||||||
with dest.open('w+b') as f:
|
|
||||||
# Read data in chunks rather than all at once
|
|
||||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
|
||||||
|
|
||||||
dest.chmod(flags)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = parse_args()
|
|
||||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
|
||||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
|
||||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
|
||||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
|
||||||
|
|
||||||
files = process_files(args.path)
|
|
||||||
upload_files(codecov_bin, files, args.dry_run)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
27
.azure-pipelines/scripts/publish-codecov.sh
Executable file
27
.azure-pipelines/scripts/publish-codecov.sh
Executable file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Upload code coverage reports to codecov.io.
|
||||||
|
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||||
|
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||||
|
|
||||||
|
set -o pipefail -eu
|
||||||
|
|
||||||
|
output_path="$1"
|
||||||
|
|
||||||
|
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
|
||||||
|
|
||||||
|
for file in "${output_path}"/reports/coverage*.xml; do
|
||||||
|
name="${file}"
|
||||||
|
name="${name##*/}" # remove path
|
||||||
|
name="${name##coverage=}" # remove 'coverage=' prefix if present
|
||||||
|
name="${name%.xml}" # remove '.xml' suffix
|
||||||
|
|
||||||
|
bash codecov.sh \
|
||||||
|
-f "${file}" \
|
||||||
|
-n "${name}" \
|
||||||
|
-X coveragepy \
|
||||||
|
-X gcov \
|
||||||
|
-X fix \
|
||||||
|
-X search \
|
||||||
|
-X xcode \
|
||||||
|
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
|
||||||
|
done
|
||||||
@@ -12,4 +12,4 @@ if ! ansible-test --help >/dev/null 2>&1; then
|
|||||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ jobs:
|
|||||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||||
displayName: Publish to Azure Pipelines
|
displayName: Publish to Azure Pipelines
|
||||||
condition: gt(variables.coverageFileCount, 0)
|
condition: gt(variables.coverageFileCount, 0)
|
||||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
- bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
|
||||||
displayName: Publish to codecov.io
|
displayName: Publish to codecov.io
|
||||||
condition: gt(variables.coverageFileCount, 0)
|
condition: gt(variables.coverageFileCount, 0)
|
||||||
continueOnError: true
|
continueOnError: true
|
||||||
|
|||||||
395
.github/BOTMETA.yml
vendored
395
.github/BOTMETA.yml
vendored
@@ -1,9 +1,5 @@
|
|||||||
notifications: true
|
|
||||||
automerge: true
|
automerge: true
|
||||||
files:
|
files:
|
||||||
plugins/:
|
|
||||||
supershipit: quidame
|
|
||||||
changelogs/: {}
|
|
||||||
changelogs/fragments/:
|
changelogs/fragments/:
|
||||||
support: community
|
support: community
|
||||||
$actions:
|
$actions:
|
||||||
@@ -14,69 +10,17 @@ files:
|
|||||||
maintainers: nitzmahone samdoran aminvakil
|
maintainers: nitzmahone samdoran aminvakil
|
||||||
$becomes/:
|
$becomes/:
|
||||||
labels: become
|
labels: become
|
||||||
$becomes/doas.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/dzdo.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/ksu.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/machinectl.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/pbrun.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/pfexec.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/pmrun.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$becomes/sesu.py:
|
|
||||||
maintainers: nekonyuu
|
|
||||||
$becomes/sudosu.py:
|
|
||||||
maintainers: dagwieers
|
|
||||||
$caches/:
|
|
||||||
labels: cache
|
|
||||||
$caches/memcached.py: {}
|
|
||||||
$caches/pickle.py:
|
|
||||||
maintainers: bcoca
|
|
||||||
$caches/redis.py: {}
|
|
||||||
$caches/yaml.py:
|
|
||||||
maintainers: bcoca
|
|
||||||
$callbacks/:
|
$callbacks/:
|
||||||
labels: callbacks
|
labels: callbacks
|
||||||
$callbacks/cgroup_memory_recap.py: {}
|
|
||||||
$callbacks/context_demo.py: {}
|
|
||||||
$callbacks/counter_enabled.py: {}
|
|
||||||
$callbacks/dense.py:
|
|
||||||
maintainers: dagwieers
|
|
||||||
$callbacks/diy.py:
|
|
||||||
maintainers: theque5t
|
|
||||||
$callbacks/elastic.py:
|
|
||||||
maintainers: v1v
|
|
||||||
keywords: apm observability
|
|
||||||
$callbacks/hipchat.py: {}
|
|
||||||
$callbacks/jabber.py: {}
|
|
||||||
$callbacks/loganalytics.py:
|
$callbacks/loganalytics.py:
|
||||||
maintainers: zhcli
|
maintainers: zhcli
|
||||||
$callbacks/logdna.py: {}
|
|
||||||
$callbacks/logentries.py: {}
|
|
||||||
$callbacks/log_plays.py: {}
|
|
||||||
$callbacks/logstash.py:
|
$callbacks/logstash.py:
|
||||||
maintainers: ujenmr
|
maintainers: ujenmr
|
||||||
$callbacks/mail.py:
|
|
||||||
maintainers: dagwieers
|
|
||||||
$callbacks/nrdp.py:
|
|
||||||
maintainers: rverchere
|
|
||||||
$callbacks/null.py: {}
|
|
||||||
$callbacks/opentelemetry.py:
|
|
||||||
maintainers: v1v
|
|
||||||
keywords: opentelemetry observability
|
|
||||||
$callbacks/say.py:
|
$callbacks/say.py:
|
||||||
notify: chris-short
|
notify: chris-short
|
||||||
maintainers: $team_macos
|
maintainers: $team_macos
|
||||||
labels: macos say
|
labels: macos say
|
||||||
keywords: brew cask darwin homebrew macosx macports osx
|
keywords: brew cask darwin homebrew macosx macports osx
|
||||||
$callbacks/selective.py: {}
|
|
||||||
$callbacks/slack.py: {}
|
|
||||||
$callbacks/splunk.py: {}
|
|
||||||
$callbacks/sumologic.py:
|
$callbacks/sumologic.py:
|
||||||
maintainers: ryancurrah
|
maintainers: ryancurrah
|
||||||
labels: sumologic
|
labels: sumologic
|
||||||
@@ -85,26 +29,16 @@ files:
|
|||||||
$callbacks/unixy.py:
|
$callbacks/unixy.py:
|
||||||
maintainers: akatch
|
maintainers: akatch
|
||||||
labels: unixy
|
labels: unixy
|
||||||
$callbacks/yaml.py: {}
|
|
||||||
$connections/:
|
$connections/:
|
||||||
labels: connections
|
labels: connections
|
||||||
$connections/chroot.py: {}
|
$connections/kubectl.py:
|
||||||
$connections/funcd.py:
|
maintainers: chouseknecht fabianvf flaper87 maxamillion
|
||||||
maintainers: mscherer
|
labels: k8s kubectl
|
||||||
$connections/iocage.py: {}
|
|
||||||
$connections/jail.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$connections/lxc.py: {}
|
|
||||||
$connections/lxd.py:
|
$connections/lxd.py:
|
||||||
maintainers: mattclay
|
maintainers: mattclay
|
||||||
labels: lxd
|
labels: lxd
|
||||||
$connections/qubes.py:
|
|
||||||
maintainers: kushaldas
|
|
||||||
$connections/saltstack.py:
|
$connections/saltstack.py:
|
||||||
maintainers: mscherer
|
|
||||||
labels: saltstack
|
labels: saltstack
|
||||||
$connections/zone.py:
|
|
||||||
maintainers: $team_ansible_core
|
|
||||||
$doc_fragments/:
|
$doc_fragments/:
|
||||||
labels: docs_fragments
|
labels: docs_fragments
|
||||||
$doc_fragments/hpe3par.py:
|
$doc_fragments/hpe3par.py:
|
||||||
@@ -124,112 +58,65 @@ files:
|
|||||||
maintainers: giner
|
maintainers: giner
|
||||||
$filters/from_csv.py:
|
$filters/from_csv.py:
|
||||||
maintainers: Ajpantuso
|
maintainers: Ajpantuso
|
||||||
$filters/groupby:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$filters/hashids:
|
$filters/hashids:
|
||||||
maintainers: Ajpantuso
|
maintainers: Ajpantuso
|
||||||
$filters/jc.py:
|
$filters/jc.py:
|
||||||
maintainers: kellyjonbrazil
|
maintainers: kellyjonbrazil
|
||||||
$filters/json_query.py: {}
|
|
||||||
$filters/list.py:
|
$filters/list.py:
|
||||||
maintainers: vbotka
|
maintainers: vbotka
|
||||||
$filters/path_join_shim.py:
|
$filters/path_join_shim.py:
|
||||||
maintainers: felixfontein
|
maintainers: felixfontein
|
||||||
$filters/random_mac.py: {}
|
|
||||||
$filters/time.py:
|
$filters/time.py:
|
||||||
maintainers: resmo
|
maintainers: resmo
|
||||||
$filters/unicode_normalize.py:
|
|
||||||
maintainers: Ajpantuso
|
|
||||||
$filters/version_sort.py:
|
$filters/version_sort.py:
|
||||||
maintainers: ericzolf
|
maintainers: ericzolf
|
||||||
|
$httpapis/:
|
||||||
|
maintainers: $team_networking
|
||||||
|
labels: networking
|
||||||
|
$httpapis/ftd.py:
|
||||||
|
maintainers: $team_networking annikulin
|
||||||
|
labels: cisco ftd networking
|
||||||
|
keywords: firepower ftd
|
||||||
$inventories/:
|
$inventories/:
|
||||||
labels: inventories
|
labels: inventories
|
||||||
$inventories/cobbler.py:
|
|
||||||
maintainers: opoplawski
|
|
||||||
$inventories/gitlab_runners.py:
|
|
||||||
maintainers: morph027
|
|
||||||
$inventories/linode.py:
|
$inventories/linode.py:
|
||||||
maintainers: $team_linode
|
maintainers: $team_linode
|
||||||
labels: cloud linode
|
labels: cloud linode
|
||||||
keywords: linode dynamic inventory script
|
keywords: linode dynamic inventory script
|
||||||
$inventories/lxd.py:
|
$inventories/lxd.py:
|
||||||
maintainers: conloos
|
maintainers: conloos
|
||||||
$inventories/nmap.py: {}
|
|
||||||
$inventories/online.py:
|
|
||||||
maintainers: remyleone
|
|
||||||
$inventories/opennebula.py:
|
|
||||||
maintainers: feldsam
|
|
||||||
labels: cloud opennebula
|
|
||||||
keywords: opennebula dynamic inventory script
|
|
||||||
$inventories/proxmox.py:
|
$inventories/proxmox.py:
|
||||||
maintainers: $team_virt ilijamt
|
maintainers: $team_virt ilijamt
|
||||||
$inventories/xen_orchestra.py:
|
|
||||||
maintainers: shinuza
|
|
||||||
$inventories/icinga2.py:
|
|
||||||
maintainers: bongoeadgc6
|
|
||||||
$inventories/scaleway.py:
|
$inventories/scaleway.py:
|
||||||
maintainers: $team_scaleway
|
maintainers: $team_scaleway
|
||||||
labels: cloud scaleway
|
labels: cloud scaleway
|
||||||
$inventories/stackpath_compute.py:
|
|
||||||
maintainers: shayrybak
|
|
||||||
$inventories/virtualbox.py: {}
|
|
||||||
$lookups/:
|
$lookups/:
|
||||||
labels: lookups
|
labels: lookups
|
||||||
$lookups/cartesian.py: {}
|
|
||||||
$lookups/chef_databag.py: {}
|
|
||||||
$lookups/collection_version.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$lookups/consul_kv.py: {}
|
|
||||||
$lookups/credstash.py: {}
|
|
||||||
$lookups/cyberarkpassword.py:
|
|
||||||
notify: cyberark-bizdev
|
|
||||||
labels: cyberarkpassword
|
|
||||||
$lookups/dependent.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$lookups/dig.py:
|
|
||||||
maintainers: jpmens
|
|
||||||
labels: dig
|
|
||||||
$lookups/dnstxt.py:
|
|
||||||
maintainers: jpmens
|
|
||||||
$lookups/dsv.py:
|
|
||||||
maintainers: amigus endlesstrax
|
|
||||||
$lookups/etcd3.py:
|
|
||||||
maintainers: eric-belhomme
|
|
||||||
$lookups/etcd.py:
|
|
||||||
maintainers: jpmens
|
|
||||||
$lookups/filetree.py:
|
|
||||||
maintainers: dagwieers
|
|
||||||
$lookups/flattened.py: {}
|
|
||||||
$lookups/hiera.py:
|
|
||||||
maintainers: jparrill
|
|
||||||
$lookups/keyring.py: {}
|
|
||||||
$lookups/lastpass.py: {}
|
|
||||||
$lookups/lmdb_kv.py:
|
|
||||||
maintainers: jpmens
|
|
||||||
$lookups/manifold.py:
|
|
||||||
maintainers: galanoff
|
|
||||||
labels: manifold
|
|
||||||
$lookups/onepass:
|
$lookups/onepass:
|
||||||
maintainers: samdoran
|
maintainers: samdoran
|
||||||
labels: onepassword
|
labels: onepassword
|
||||||
$lookups/onepassword.py:
|
$lookups/conjur_variable.py:
|
||||||
maintainers: azenk scottsb
|
notify: cyberark-bizdev
|
||||||
$lookups/onepassword_raw.py:
|
maintainers: $team_cyberark_conjur
|
||||||
maintainers: azenk scottsb
|
labels: conjur_variable
|
||||||
$lookups/passwordstore.py: {}
|
$lookups/cyberarkpassword.py:
|
||||||
$lookups/random_pet.py:
|
notify: cyberark-bizdev
|
||||||
maintainers: Akasurde
|
labels: cyberarkpassword
|
||||||
|
$lookups/dig.py:
|
||||||
|
maintainers: jpmens
|
||||||
|
labels: dig
|
||||||
|
$lookups/tss.py:
|
||||||
|
maintainers: amigus
|
||||||
|
$lookups/dsv.py:
|
||||||
|
maintainers: amigus
|
||||||
|
$lookups/manifold.py:
|
||||||
|
maintainers: galanoff
|
||||||
|
labels: manifold
|
||||||
|
$lookups/nios:
|
||||||
|
maintainers: $team_networking sganesh-infoblox
|
||||||
|
labels: infoblox networking
|
||||||
$lookups/random_string.py:
|
$lookups/random_string.py:
|
||||||
maintainers: Akasurde
|
maintainers: Akasurde
|
||||||
$lookups/random_words.py:
|
|
||||||
maintainers: konstruktoid
|
|
||||||
$lookups/redis.py:
|
|
||||||
maintainers: $team_ansible_core jpmens
|
|
||||||
$lookups/revbitspss.py:
|
|
||||||
maintainers: RevBits
|
|
||||||
$lookups/shelvefile.py: {}
|
|
||||||
$lookups/tss.py:
|
|
||||||
maintainers: amigus endlesstrax
|
|
||||||
$module_utils/:
|
$module_utils/:
|
||||||
labels: module_utils
|
labels: module_utils
|
||||||
$module_utils/gitlab.py:
|
$module_utils/gitlab.py:
|
||||||
@@ -258,6 +145,9 @@ files:
|
|||||||
$module_utils/module_helper.py:
|
$module_utils/module_helper.py:
|
||||||
maintainers: russoz
|
maintainers: russoz
|
||||||
labels: module_helper
|
labels: module_helper
|
||||||
|
$module_utils/net_tools/nios/api.py:
|
||||||
|
maintainers: $team_networking sganesh-infoblox
|
||||||
|
labels: infoblox networking
|
||||||
$module_utils/oracle/oci_utils.py:
|
$module_utils/oracle/oci_utils.py:
|
||||||
maintainers: $team_oracle
|
maintainers: $team_oracle
|
||||||
labels: cloud
|
labels: cloud
|
||||||
@@ -267,13 +157,11 @@ files:
|
|||||||
$module_utils/redfish_utils.py:
|
$module_utils/redfish_utils.py:
|
||||||
maintainers: $team_redfish
|
maintainers: $team_redfish
|
||||||
labels: redfish_utils
|
labels: redfish_utils
|
||||||
$module_utils/remote_management/lxca/common.py:
|
$module_utils/remote_management/lxca/common.py: navalkp prabhosa
|
||||||
maintainers: navalkp prabhosa
|
|
||||||
$module_utils/scaleway.py:
|
$module_utils/scaleway.py:
|
||||||
maintainers: $team_scaleway
|
maintainers: $team_scaleway
|
||||||
labels: cloud scaleway
|
labels: cloud scaleway
|
||||||
$module_utils/storage/hpe3par/hpe3par.py:
|
$module_utils/storage/hpe3par/hpe3par.py: farhan7500 gautamphegde
|
||||||
maintainers: farhan7500 gautamphegde
|
|
||||||
$module_utils/utm_utils.py:
|
$module_utils/utm_utils.py:
|
||||||
maintainers: $team_e_spirit
|
maintainers: $team_e_spirit
|
||||||
labels: utm_utils
|
labels: utm_utils
|
||||||
@@ -304,27 +192,33 @@ files:
|
|||||||
maintainers: zbal
|
maintainers: zbal
|
||||||
$modules/cloud/lxc/lxc_container.py:
|
$modules/cloud/lxc/lxc_container.py:
|
||||||
maintainers: cloudnull
|
maintainers: cloudnull
|
||||||
|
$modules/cloud/lxc/lxc_profile.py:
|
||||||
|
maintainers: conloos
|
||||||
$modules/cloud/lxd/:
|
$modules/cloud/lxd/:
|
||||||
ignore: hnakamur
|
ignore: hnakamur
|
||||||
$modules/cloud/lxd/lxd_profile.py:
|
|
||||||
maintainers: conloos
|
|
||||||
$modules/cloud/memset/:
|
$modules/cloud/memset/:
|
||||||
maintainers: glitchcrab
|
maintainers: glitchcrab
|
||||||
$modules/cloud/misc/cloud_init_data_facts.py:
|
$modules/cloud/misc/cloud_init_data_facts.py:
|
||||||
maintainers: resmo
|
maintainers: resmo
|
||||||
$modules/cloud/misc/proxmox:
|
$modules/cloud/misc/proxmox.py:
|
||||||
|
maintainers: $team_virt UnderGreen
|
||||||
|
labels: proxmox virt
|
||||||
|
ignore: skvidal
|
||||||
|
keywords: kvm libvirt proxmox qemu
|
||||||
|
$modules/cloud/misc/proxmox_kvm.py:
|
||||||
|
maintainers: $team_virt helldorado
|
||||||
|
labels: proxmox_kvm virt
|
||||||
|
ignore: skvidal
|
||||||
|
keywords: kvm libvirt proxmox qemu
|
||||||
|
$modules/cloud/misc/proxmox_snap.py:
|
||||||
maintainers: $team_virt
|
maintainers: $team_virt
|
||||||
labels: proxmox virt
|
labels: proxmox virt
|
||||||
keywords: kvm libvirt proxmox qemu
|
keywords: kvm libvirt proxmox qemu
|
||||||
$modules/cloud/misc/proxmox.py:
|
|
||||||
maintainers: UnderGreen
|
|
||||||
ignore: skvidal
|
|
||||||
$modules/cloud/misc/proxmox_kvm.py:
|
|
||||||
maintainers: helldorado
|
|
||||||
ignore: skvidal
|
|
||||||
$modules/cloud/misc/proxmox_template.py:
|
$modules/cloud/misc/proxmox_template.py:
|
||||||
maintainers: UnderGreen
|
maintainers: $team_virt UnderGreen
|
||||||
|
labels: proxmox_template virt
|
||||||
ignore: skvidal
|
ignore: skvidal
|
||||||
|
keywords: kvm libvirt proxmox qemu
|
||||||
$modules/cloud/misc/rhevm.py:
|
$modules/cloud/misc/rhevm.py:
|
||||||
maintainers: $team_virt TimothyVandenbrande
|
maintainers: $team_virt TimothyVandenbrande
|
||||||
labels: rhevm virt
|
labels: rhevm virt
|
||||||
@@ -341,7 +235,7 @@ files:
|
|||||||
$modules/cloud/oneandone/:
|
$modules/cloud/oneandone/:
|
||||||
maintainers: aajdinov edevenport
|
maintainers: aajdinov edevenport
|
||||||
$modules/cloud/online/:
|
$modules/cloud/online/:
|
||||||
maintainers: remyleone
|
maintainers: sieben
|
||||||
$modules/cloud/opennebula/:
|
$modules/cloud/opennebula/:
|
||||||
maintainers: $team_opennebula
|
maintainers: $team_opennebula
|
||||||
$modules/cloud/opennebula/one_host.py:
|
$modules/cloud/opennebula/one_host.py:
|
||||||
@@ -366,40 +260,16 @@ files:
|
|||||||
maintainers: omgjlk sivel
|
maintainers: omgjlk sivel
|
||||||
$modules/cloud/rackspace/:
|
$modules/cloud/rackspace/:
|
||||||
ignore: ryansb sivel
|
ignore: ryansb sivel
|
||||||
$modules/cloud/rackspace/rax_cbs.py:
|
|
||||||
maintainers: claco
|
|
||||||
$modules/cloud/rackspace/rax_cbs_attachments.py:
|
|
||||||
maintainers: claco
|
|
||||||
$modules/cloud/rackspace/rax_cdb.py:
|
|
||||||
maintainers: jails
|
|
||||||
$modules/cloud/rackspace/rax_cdb_user.py:
|
|
||||||
maintainers: jails
|
|
||||||
$modules/cloud/rackspace/rax_cdb_database.py:
|
|
||||||
maintainers: jails
|
|
||||||
$modules/cloud/rackspace/rax_clb.py:
|
$modules/cloud/rackspace/rax_clb.py:
|
||||||
maintainers: claco
|
maintainers: claco
|
||||||
$modules/cloud/rackspace/rax_clb_nodes.py:
|
$modules/cloud/rackspace/rax_clb_nodes.py:
|
||||||
maintainers: neuroid
|
maintainers: neuroid
|
||||||
$modules/cloud/rackspace/rax_clb_ssl.py:
|
$modules/cloud/rackspace/rax_clb_ssl.py:
|
||||||
maintainers: smashwilson
|
maintainers: smashwilson
|
||||||
$modules/cloud/rackspace/rax_files.py:
|
|
||||||
maintainers: angstwad
|
|
||||||
$modules/cloud/rackspace/rax_files_objects.py:
|
|
||||||
maintainers: angstwad
|
|
||||||
$modules/cloud/rackspace/rax_identity.py:
|
$modules/cloud/rackspace/rax_identity.py:
|
||||||
maintainers: claco
|
maintainers: claco
|
||||||
$modules/cloud/rackspace/rax_network.py:
|
$modules/cloud/rackspace/rax_network.py:
|
||||||
maintainers: claco omgjlk
|
maintainers: claco omgjlk
|
||||||
$modules/cloud/rackspace/rax_mon_alarm.py:
|
|
||||||
maintainers: smashwilson
|
|
||||||
$modules/cloud/rackspace/rax_mon_check.py:
|
|
||||||
maintainers: smashwilson
|
|
||||||
$modules/cloud/rackspace/rax_mon_entity.py:
|
|
||||||
maintainers: smashwilson
|
|
||||||
$modules/cloud/rackspace/rax_mon_notification.py:
|
|
||||||
maintainers: smashwilson
|
|
||||||
$modules/cloud/rackspace/rax_mon_notification_plan.py:
|
|
||||||
maintainers: smashwilson
|
|
||||||
$modules/cloud/rackspace/rax_queue.py:
|
$modules/cloud/rackspace/rax_queue.py:
|
||||||
maintainers: claco
|
maintainers: claco
|
||||||
$modules/cloud/scaleway/:
|
$modules/cloud/scaleway/:
|
||||||
@@ -411,17 +281,13 @@ files:
|
|||||||
$modules/cloud/scaleway/scaleway_ip_info.py:
|
$modules/cloud/scaleway/scaleway_ip_info.py:
|
||||||
maintainers: Spredzy
|
maintainers: Spredzy
|
||||||
$modules/cloud/scaleway/scaleway_organization_info.py:
|
$modules/cloud/scaleway/scaleway_organization_info.py:
|
||||||
maintainers: Spredzy
|
maintainers: sieben
|
||||||
$modules/cloud/scaleway/scaleway_security_group.py:
|
$modules/cloud/scaleway/scaleway_security_group.py:
|
||||||
maintainers: DenBeke
|
maintainers: DenBeke
|
||||||
$modules/cloud/scaleway/scaleway_security_group_info.py:
|
$modules/cloud/scaleway/scaleway_security_group_info.py:
|
||||||
maintainers: Spredzy
|
maintainers: sieben
|
||||||
$modules/cloud/scaleway/scaleway_security_group_rule.py:
|
$modules/cloud/scaleway/scaleway_security_group_rule.py:
|
||||||
maintainers: DenBeke
|
maintainers: DenBeke
|
||||||
$modules/cloud/scaleway/scaleway_server_info.py:
|
|
||||||
maintainers: Spredzy
|
|
||||||
$modules/cloud/scaleway/scaleway_snapshot_info.py:
|
|
||||||
maintainers: Spredzy
|
|
||||||
$modules/cloud/scaleway/scaleway_volume.py:
|
$modules/cloud/scaleway/scaleway_volume.py:
|
||||||
labels: scaleway_volume
|
labels: scaleway_volume
|
||||||
ignore: hekonsek
|
ignore: hekonsek
|
||||||
@@ -473,22 +339,11 @@ files:
|
|||||||
maintainers: john-westcott-iv
|
maintainers: john-westcott-iv
|
||||||
$modules/database/misc/redis.py:
|
$modules/database/misc/redis.py:
|
||||||
maintainers: slok
|
maintainers: slok
|
||||||
$modules/database/misc/redis_info.py:
|
|
||||||
maintainers: levonet
|
|
||||||
$modules/database/misc/redis_data_info.py:
|
|
||||||
maintainers: paginabianca
|
|
||||||
$modules/database/misc/redis_data.py:
|
|
||||||
maintainers: paginabianca
|
|
||||||
$modules/database/misc/redis_data_incr.py:
|
|
||||||
maintainers: paginabianca
|
|
||||||
$modules/database/misc/riak.py:
|
$modules/database/misc/riak.py:
|
||||||
maintainers: drewkerrigan jsmartin
|
maintainers: drewkerrigan jsmartin
|
||||||
$modules/database/mssql/mssql_db.py:
|
$modules/database/mssql/mssql_db.py:
|
||||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||||
labels: mssql_db
|
labels: mssql_db
|
||||||
$modules/database/mssql/mssql_script.py:
|
|
||||||
maintainers: kbudde
|
|
||||||
labels: mssql_script
|
|
||||||
$modules/database/saphana/hana_query.py:
|
$modules/database/saphana/hana_query.py:
|
||||||
maintainers: rainerleber
|
maintainers: rainerleber
|
||||||
$modules/database/vertica/:
|
$modules/database/vertica/:
|
||||||
@@ -499,14 +354,10 @@ files:
|
|||||||
maintainers: quidame
|
maintainers: quidame
|
||||||
$modules/files/ini_file.py:
|
$modules/files/ini_file.py:
|
||||||
maintainers: jpmens noseka1
|
maintainers: jpmens noseka1
|
||||||
$modules/files/iso_create.py:
|
|
||||||
maintainers: Tomorrow9
|
|
||||||
$modules/files/iso_extract.py:
|
$modules/files/iso_extract.py:
|
||||||
maintainers: dagwieers jhoekx ribbons
|
maintainers: dagwieers jhoekx ribbons
|
||||||
$modules/files/read_csv.py:
|
$modules/files/read_csv.py:
|
||||||
maintainers: dagwieers
|
maintainers: dagwieers
|
||||||
$modules/files/sapcar_extract.py:
|
|
||||||
maintainers: RainerLeber
|
|
||||||
$modules/files/xattr.py:
|
$modules/files/xattr.py:
|
||||||
maintainers: bcoca
|
maintainers: bcoca
|
||||||
labels: xattr
|
labels: xattr
|
||||||
@@ -524,28 +375,15 @@ files:
|
|||||||
maintainers: jparrill
|
maintainers: jparrill
|
||||||
$modules/identity/keycloak/:
|
$modules/identity/keycloak/:
|
||||||
maintainers: $team_keycloak
|
maintainers: $team_keycloak
|
||||||
$modules/identity/keycloak/keycloak_authentication.py:
|
|
||||||
maintainers: elfelip Gaetan2907
|
|
||||||
$modules/identity/keycloak/keycloak_clientscope.py:
|
|
||||||
maintainers: Gaetan2907
|
|
||||||
$modules/identity/keycloak/keycloak_client_rolemapping.py:
|
|
||||||
maintainers: Gaetan2907
|
|
||||||
$modules/identity/keycloak/keycloak_group.py:
|
$modules/identity/keycloak/keycloak_group.py:
|
||||||
maintainers: adamgoossens
|
maintainers: adamgoossens
|
||||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
|
||||||
maintainers: laurpaum
|
|
||||||
$modules/identity/keycloak/keycloak_realm.py:
|
$modules/identity/keycloak/keycloak_realm.py:
|
||||||
maintainers: kris2kris
|
maintainers: kris2kris
|
||||||
$modules/identity/keycloak/keycloak_role.py:
|
|
||||||
maintainers: laurpaum
|
|
||||||
$modules/identity/keycloak/keycloak_user_federation.py:
|
|
||||||
maintainers: laurpaum
|
|
||||||
$modules/identity/onepassword_info.py:
|
$modules/identity/onepassword_info.py:
|
||||||
maintainers: Rylon
|
maintainers: Rylon
|
||||||
$modules/identity/opendj/opendj_backendprop.py:
|
$modules/identity/opendj/opendj_backendprop.py:
|
||||||
maintainers: dj-wasabi
|
maintainers: dj-wasabi
|
||||||
$modules/monitoring/airbrake_deployment.py:
|
$modules/monitoring/airbrake_deployment.py:
|
||||||
maintainers: phumpal
|
|
||||||
labels: airbrake_deployment
|
labels: airbrake_deployment
|
||||||
ignore: bpennypacker
|
ignore: bpennypacker
|
||||||
$modules/monitoring/bigpanda.py:
|
$modules/monitoring/bigpanda.py:
|
||||||
@@ -556,8 +394,6 @@ files:
|
|||||||
maintainers: n0ts
|
maintainers: n0ts
|
||||||
labels: datadog_event
|
labels: datadog_event
|
||||||
ignore: arturaz
|
ignore: arturaz
|
||||||
$modules/monitoring/datadog/datadog_downtime.py:
|
|
||||||
maintainers: Datadog
|
|
||||||
$modules/monitoring/datadog/datadog_monitor.py:
|
$modules/monitoring/datadog/datadog_monitor.py:
|
||||||
maintainers: skornehl
|
maintainers: skornehl
|
||||||
$modules/monitoring/honeybadger_deployment.py:
|
$modules/monitoring/honeybadger_deployment.py:
|
||||||
@@ -619,14 +455,12 @@ files:
|
|||||||
labels: cloudflare_dns
|
labels: cloudflare_dns
|
||||||
$modules/net_tools/dnsimple.py:
|
$modules/net_tools/dnsimple.py:
|
||||||
maintainers: drcapulet
|
maintainers: drcapulet
|
||||||
$modules/net_tools/dnsimple_info.py:
|
|
||||||
maintainers: edhilgendorf
|
|
||||||
$modules/net_tools/dnsmadeeasy.py:
|
$modules/net_tools/dnsmadeeasy.py:
|
||||||
maintainers: briceburg
|
maintainers: briceburg
|
||||||
$modules/net_tools/gandi_livedns.py:
|
|
||||||
maintainers: gthiemonge
|
|
||||||
$modules/net_tools/haproxy.py:
|
$modules/net_tools/haproxy.py:
|
||||||
maintainers: ravibhure Normo
|
maintainers: ravibhure Normo
|
||||||
|
$modules/net_tools/:
|
||||||
|
maintainers: nerzhul
|
||||||
$modules/net_tools/infinity/infinity.py:
|
$modules/net_tools/infinity/infinity.py:
|
||||||
maintainers: MeganLiu
|
maintainers: MeganLiu
|
||||||
$modules/net_tools/ip_netns.py:
|
$modules/net_tools/ip_netns.py:
|
||||||
@@ -650,26 +484,37 @@ files:
|
|||||||
ignore: andyhky
|
ignore: andyhky
|
||||||
$modules/net_tools/netcup_dns.py:
|
$modules/net_tools/netcup_dns.py:
|
||||||
maintainers: nbuchwitz
|
maintainers: nbuchwitz
|
||||||
$modules/net_tools/nsupdate.py:
|
|
||||||
maintainers: nerzhul
|
|
||||||
$modules/net_tools/omapi_host.py:
|
$modules/net_tools/omapi_host.py:
|
||||||
maintainers: amasolov nerzhul
|
maintainers: amasolov
|
||||||
$modules/net_tools/pritunl/:
|
$modules/net_tools/nios/:
|
||||||
maintainers: Lowess
|
maintainers: $team_networking
|
||||||
|
labels: infoblox networking
|
||||||
|
$modules/net_tools/nios/nios_fixed_address.py:
|
||||||
|
maintainers: sjaiswal
|
||||||
|
$modules/net_tools/nios/nios_nsgroup.py:
|
||||||
|
maintainers: ebirn sjaiswal
|
||||||
|
$modules/net_tools/nios/nios_ptr_record.py:
|
||||||
|
maintainers: clementtrebuchet
|
||||||
|
$modules/net_tools/nios/nios_srv_record.py:
|
||||||
|
maintainers: brampling
|
||||||
|
$modules/net_tools/nios/nios_txt_record.py:
|
||||||
|
maintainers: coreywan
|
||||||
$modules/net_tools/nmcli.py:
|
$modules/net_tools/nmcli.py:
|
||||||
maintainers: alcamie101
|
maintainers: alcamie101
|
||||||
$modules/net_tools/snmp_facts.py:
|
$modules/net_tools/snmp_facts.py:
|
||||||
maintainers: ogenstad ujwalkomarla
|
maintainers: ogenstad ujwalkomarla
|
||||||
|
$modules/notification/osx_say.py:
|
||||||
|
maintainers: ansible mpdehaan
|
||||||
|
labels: _osx_say
|
||||||
|
deprecated: true
|
||||||
$modules/notification/bearychat.py:
|
$modules/notification/bearychat.py:
|
||||||
maintainers: tonyseek
|
maintainers: tonyseek
|
||||||
$modules/notification/campfire.py:
|
$modules/notification/campfire.py:
|
||||||
maintainers: fabulops
|
maintainers: fabulops
|
||||||
$modules/notification/catapult.py:
|
$modules/notification/catapult.py:
|
||||||
maintainers: Jmainguy
|
maintainers: Jmainguy
|
||||||
$modules/notification/cisco_webex.py:
|
$modules/notification/cisco_spark.py:
|
||||||
maintainers: drew-russell
|
maintainers: drew-russell
|
||||||
$modules/notification/discord.py:
|
|
||||||
maintainers: cwollinger
|
|
||||||
$modules/notification/flowdock.py:
|
$modules/notification/flowdock.py:
|
||||||
maintainers: mcodd
|
maintainers: mcodd
|
||||||
$modules/notification/grove.py:
|
$modules/notification/grove.py:
|
||||||
@@ -697,13 +542,13 @@ files:
|
|||||||
$modules/notification/pushbullet.py:
|
$modules/notification/pushbullet.py:
|
||||||
maintainers: willybarro
|
maintainers: willybarro
|
||||||
$modules/notification/pushover.py:
|
$modules/notification/pushover.py:
|
||||||
maintainers: weaselkeeper wopfel
|
maintainers: weaselkeeper
|
||||||
$modules/notification/rocketchat.py:
|
$modules/notification/rocketchat.py:
|
||||||
maintainers: Deepakkothandan
|
maintainers: Deepakkothandan
|
||||||
labels: rocketchat
|
labels: rocketchat
|
||||||
ignore: ramondelafuente
|
ignore: ramondelafuente
|
||||||
$modules/notification/say.py:
|
$modules/notification/say.py:
|
||||||
maintainers: $team_ansible_core mpdehaan
|
maintainers: ansible mpdehaan
|
||||||
$modules/notification/sendgrid.py:
|
$modules/notification/sendgrid.py:
|
||||||
maintainers: makaimc
|
maintainers: makaimc
|
||||||
$modules/notification/slack.py:
|
$modules/notification/slack.py:
|
||||||
@@ -711,13 +556,11 @@ files:
|
|||||||
$modules/notification/syslogger.py:
|
$modules/notification/syslogger.py:
|
||||||
maintainers: garbled1
|
maintainers: garbled1
|
||||||
$modules/notification/telegram.py:
|
$modules/notification/telegram.py:
|
||||||
maintainers: tyouxa loms lomserman
|
maintainers: tyouxa loms
|
||||||
$modules/notification/twilio.py:
|
$modules/notification/twilio.py:
|
||||||
maintainers: makaimc
|
maintainers: makaimc
|
||||||
$modules/notification/typetalk.py:
|
$modules/notification/typetalk.py:
|
||||||
maintainers: tksmd
|
maintainers: tksmd
|
||||||
$modules/packaging/language/ansible_galaxy_install.py:
|
|
||||||
maintainers: russoz
|
|
||||||
$modules/packaging/language/bower.py:
|
$modules/packaging/language/bower.py:
|
||||||
maintainers: mwarkentin
|
maintainers: mwarkentin
|
||||||
$modules/packaging/language/bundler.py:
|
$modules/packaging/language/bundler.py:
|
||||||
@@ -730,7 +573,7 @@ files:
|
|||||||
$modules/packaging/language/easy_install.py:
|
$modules/packaging/language/easy_install.py:
|
||||||
maintainers: mattupstate
|
maintainers: mattupstate
|
||||||
$modules/packaging/language/gem.py:
|
$modules/packaging/language/gem.py:
|
||||||
maintainers: $team_ansible_core johanwiren
|
maintainers: ansible johanwiren
|
||||||
labels: gem
|
labels: gem
|
||||||
$modules/packaging/language/maven_artifact.py:
|
$modules/packaging/language/maven_artifact.py:
|
||||||
maintainers: tumbl3w33d turb
|
maintainers: tumbl3w33d turb
|
||||||
@@ -745,22 +588,16 @@ files:
|
|||||||
ignore: jle64
|
ignore: jle64
|
||||||
$modules/packaging/language/pip_package_info.py:
|
$modules/packaging/language/pip_package_info.py:
|
||||||
maintainers: bcoca matburt maxamillion
|
maintainers: bcoca matburt maxamillion
|
||||||
$modules/packaging/language/pipx.py:
|
|
||||||
maintainers: russoz
|
|
||||||
$modules/packaging/language/yarn.py:
|
$modules/packaging/language/yarn.py:
|
||||||
maintainers: chrishoffman verkaufer
|
maintainers: chrishoffman verkaufer
|
||||||
$modules/packaging/os/apk.py:
|
$modules/packaging/os/apk.py:
|
||||||
maintainers: tdtrask
|
maintainers: tdtrask
|
||||||
labels: apk
|
labels: apk
|
||||||
ignore: kbrebanov
|
ignore: kbrebanov
|
||||||
$modules/packaging/os/apt_repo.py:
|
|
||||||
maintainers: obirvalger
|
|
||||||
$modules/packaging/os/apt_rpm.py:
|
$modules/packaging/os/apt_rpm.py:
|
||||||
maintainers: evgkrsk
|
maintainers: evgkrsk
|
||||||
$modules/packaging/os/copr.py:
|
$modules/packaging/os/copr.py:
|
||||||
maintainers: schlupov
|
maintainers: schlupov
|
||||||
$modules/packaging/os/dnf_versionlock.py:
|
|
||||||
maintainers: moreda
|
|
||||||
$modules/packaging/os/flatpak.py:
|
$modules/packaging/os/flatpak.py:
|
||||||
maintainers: $team_flatpak
|
maintainers: $team_flatpak
|
||||||
$modules/packaging/os/flatpak_remote.py:
|
$modules/packaging/os/flatpak_remote.py:
|
||||||
@@ -857,9 +694,6 @@ files:
|
|||||||
$modules/packaging/os/snap.py:
|
$modules/packaging/os/snap.py:
|
||||||
maintainers: angristan vcarceler
|
maintainers: angristan vcarceler
|
||||||
labels: snap
|
labels: snap
|
||||||
$modules/packaging/os/snap_alias.py:
|
|
||||||
maintainers: russoz
|
|
||||||
labels: snap
|
|
||||||
$modules/packaging/os/sorcery.py:
|
$modules/packaging/os/sorcery.py:
|
||||||
maintainers: vaygr
|
maintainers: vaygr
|
||||||
$modules/packaging/os/svr4pkg.py:
|
$modules/packaging/os/svr4pkg.py:
|
||||||
@@ -951,10 +785,6 @@ files:
|
|||||||
maintainers: markuman
|
maintainers: markuman
|
||||||
$modules/source_control/gitlab/gitlab_runner.py:
|
$modules/source_control/gitlab/gitlab_runner.py:
|
||||||
maintainers: SamyCoenen
|
maintainers: SamyCoenen
|
||||||
$modules/source_control/gitlab/gitlab_user.py:
|
|
||||||
maintainers: LennertMertens stgrace
|
|
||||||
$modules/source_control/gitlab/gitlab_branch.py:
|
|
||||||
maintainers: paytroff
|
|
||||||
$modules/source_control/hg.py:
|
$modules/source_control/hg.py:
|
||||||
maintainers: yeukhon
|
maintainers: yeukhon
|
||||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||||
@@ -963,6 +793,13 @@ files:
|
|||||||
maintainers: farhan7500 gautamphegde
|
maintainers: farhan7500 gautamphegde
|
||||||
$modules/storage/ibm/:
|
$modules/storage/ibm/:
|
||||||
maintainers: tzure
|
maintainers: tzure
|
||||||
|
$modules/storage/infinidat/:
|
||||||
|
maintainers: vmalloc GR360RY
|
||||||
|
$modules/storage/netapp/:
|
||||||
|
maintainers: $team_netapp
|
||||||
|
$modules/storage/purestorage/:
|
||||||
|
maintainers: $team_purestorage
|
||||||
|
labels: pure_storage
|
||||||
$modules/storage/vexata/:
|
$modules/storage/vexata/:
|
||||||
maintainers: vexata
|
maintainers: vexata
|
||||||
$modules/storage/zfs/:
|
$modules/storage/zfs/:
|
||||||
@@ -981,8 +818,6 @@ files:
|
|||||||
maintainers: mulby
|
maintainers: mulby
|
||||||
labels: alternatives
|
labels: alternatives
|
||||||
ignore: DavidWittman
|
ignore: DavidWittman
|
||||||
$modules/system/aix_lvol.py:
|
|
||||||
maintainers: adejoux
|
|
||||||
$modules/system/awall.py:
|
$modules/system/awall.py:
|
||||||
maintainers: tdtrask
|
maintainers: tdtrask
|
||||||
$modules/system/beadm.py:
|
$modules/system/beadm.py:
|
||||||
@@ -1000,7 +835,7 @@ files:
|
|||||||
$modules/system/dpkg_divert.py:
|
$modules/system/dpkg_divert.py:
|
||||||
maintainers: quidame
|
maintainers: quidame
|
||||||
$modules/system/facter.py:
|
$modules/system/facter.py:
|
||||||
maintainers: $team_ansible_core gamethis
|
maintainers: ansible gamethis
|
||||||
labels: facter
|
labels: facter
|
||||||
$modules/system/filesystem.py:
|
$modules/system/filesystem.py:
|
||||||
maintainers: pilou- abulimov quidame
|
maintainers: pilou- abulimov quidame
|
||||||
@@ -1018,7 +853,7 @@ files:
|
|||||||
$modules/system/java_cert.py:
|
$modules/system/java_cert.py:
|
||||||
maintainers: haad absynth76
|
maintainers: haad absynth76
|
||||||
$modules/system/java_keystore.py:
|
$modules/system/java_keystore.py:
|
||||||
maintainers: Mogztter quidame
|
maintainers: Mogztter
|
||||||
$modules/system/kernel_blacklist.py:
|
$modules/system/kernel_blacklist.py:
|
||||||
maintainers: matze
|
maintainers: matze
|
||||||
$modules/system/launchd.py:
|
$modules/system/launchd.py:
|
||||||
@@ -1032,7 +867,7 @@ files:
|
|||||||
$modules/system/lvg.py:
|
$modules/system/lvg.py:
|
||||||
maintainers: abulimov
|
maintainers: abulimov
|
||||||
$modules/system/lvol.py:
|
$modules/system/lvol.py:
|
||||||
maintainers: abulimov jhoekx zigaSRC unkaputtbar112
|
maintainers: abulimov jhoekx
|
||||||
$modules/system/make.py:
|
$modules/system/make.py:
|
||||||
maintainers: LinusU
|
maintainers: LinusU
|
||||||
$modules/system/mksysb.py:
|
$modules/system/mksysb.py:
|
||||||
@@ -1045,7 +880,7 @@ files:
|
|||||||
$modules/system/nosh.py:
|
$modules/system/nosh.py:
|
||||||
maintainers: tacatac
|
maintainers: tacatac
|
||||||
$modules/system/ohai.py:
|
$modules/system/ohai.py:
|
||||||
maintainers: $team_ansible_core mpdehaan
|
maintainers: ansible mpdehaan
|
||||||
labels: ohai
|
labels: ohai
|
||||||
$modules/system/open_iscsi.py:
|
$modules/system/open_iscsi.py:
|
||||||
maintainers: srvg
|
maintainers: srvg
|
||||||
@@ -1074,8 +909,6 @@ files:
|
|||||||
ignore: ryansb
|
ignore: ryansb
|
||||||
$modules/system/runit.py:
|
$modules/system/runit.py:
|
||||||
maintainers: jsumners
|
maintainers: jsumners
|
||||||
$modules/system/sap_task_list_execute:
|
|
||||||
maintainers: rainerleber
|
|
||||||
$modules/system/sefcontext.py:
|
$modules/system/sefcontext.py:
|
||||||
maintainers: dagwieers
|
maintainers: dagwieers
|
||||||
$modules/system/selinux_permissive.py:
|
$modules/system/selinux_permissive.py:
|
||||||
@@ -1088,8 +921,6 @@ files:
|
|||||||
maintainers: $team_solaris pmarkham
|
maintainers: $team_solaris pmarkham
|
||||||
labels: solaris
|
labels: solaris
|
||||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||||
$modules/system/ssh_config.py:
|
|
||||||
maintainers: gaqzi Akasurde
|
|
||||||
$modules/system/svc.py:
|
$modules/system/svc.py:
|
||||||
maintainers: bcoca
|
maintainers: bcoca
|
||||||
$modules/system/syspatch.py:
|
$modules/system/syspatch.py:
|
||||||
@@ -1105,38 +936,31 @@ files:
|
|||||||
maintainers: ahtik ovcharenko pyykkis
|
maintainers: ahtik ovcharenko pyykkis
|
||||||
labels: ufw
|
labels: ufw
|
||||||
$modules/system/vdo.py:
|
$modules/system/vdo.py:
|
||||||
maintainers: rhawalsh bgurney-rh
|
maintainers: rhawalsh
|
||||||
$modules/system/xfconf.py:
|
$modules/system/xfconf.py:
|
||||||
maintainers: russoz jbenden
|
maintainers: russoz jbenden
|
||||||
labels: xfconf
|
labels: xfconf
|
||||||
$modules/system/xfconf_info.py:
|
|
||||||
maintainers: russoz
|
|
||||||
labels: xfconf
|
|
||||||
$modules/system/xfs_quota.py:
|
$modules/system/xfs_quota.py:
|
||||||
maintainers: bushvin
|
maintainers: bushvin
|
||||||
$modules/web_infrastructure/apache2_mod_proxy.py:
|
$modules/web_infrastructure/apache2_mod_proxy.py:
|
||||||
maintainers: oboukili
|
maintainers: oboukili
|
||||||
$modules/web_infrastructure/apache2_module.py:
|
$modules/web_infrastructure/apache2_module.py:
|
||||||
maintainers: berendt n0trax
|
maintainers: berendt n0trax robinro
|
||||||
ignore: robinro
|
|
||||||
$modules/web_infrastructure/deploy_helper.py:
|
$modules/web_infrastructure/deploy_helper.py:
|
||||||
maintainers: ramondelafuente
|
maintainers: ramondelafuente
|
||||||
$modules/web_infrastructure/django_manage.py:
|
$modules/web_infrastructure/django_manage.py:
|
||||||
maintainers: russoz
|
maintainers: scottanderson42 russoz tastychutney
|
||||||
ignore: scottanderson42 tastychutney
|
|
||||||
labels: django_manage
|
labels: django_manage
|
||||||
$modules/web_infrastructure/ejabberd_user.py:
|
$modules/web_infrastructure/ejabberd_user.py:
|
||||||
maintainers: privateip
|
maintainers: privateip
|
||||||
$modules/web_infrastructure/gunicorn.py:
|
$modules/web_infrastructure/gunicorn.py:
|
||||||
maintainers: agmezr
|
maintainers: agmezr
|
||||||
$modules/web_infrastructure/htpasswd.py:
|
$modules/web_infrastructure/htpasswd.py:
|
||||||
maintainers: $team_ansible_core
|
maintainers: ansible
|
||||||
labels: htpasswd
|
labels: htpasswd
|
||||||
$modules/web_infrastructure/jboss.py:
|
$modules/web_infrastructure/jboss.py:
|
||||||
maintainers: $team_jboss jhoekx
|
maintainers: $team_jboss jhoekx
|
||||||
labels: jboss
|
labels: jboss
|
||||||
$modules/web_infrastructure/jenkins_build.py:
|
|
||||||
maintainers: brettmilford unnecessary-username
|
|
||||||
$modules/web_infrastructure/jenkins_job.py:
|
$modules/web_infrastructure/jenkins_job.py:
|
||||||
maintainers: sermilrod
|
maintainers: sermilrod
|
||||||
$modules/web_infrastructure/jenkins_job_info.py:
|
$modules/web_infrastructure/jenkins_job_info.py:
|
||||||
@@ -1146,18 +970,12 @@ files:
|
|||||||
$modules/web_infrastructure/jenkins_script.py:
|
$modules/web_infrastructure/jenkins_script.py:
|
||||||
maintainers: hogarthj
|
maintainers: hogarthj
|
||||||
$modules/web_infrastructure/jira.py:
|
$modules/web_infrastructure/jira.py:
|
||||||
maintainers: Slezhuk tarka pertoft DWSR
|
maintainers: Slezhuk tarka
|
||||||
labels: jira
|
labels: jira
|
||||||
$modules/web_infrastructure/nginx_status_info.py:
|
$modules/web_infrastructure/nginx_status_info.py:
|
||||||
maintainers: resmo
|
maintainers: resmo
|
||||||
$modules/web_infrastructure/rundeck_acl_policy.py:
|
$modules/web_infrastructure/:
|
||||||
maintainers: nerzhul
|
maintainers: nerzhul
|
||||||
$modules/web_infrastructure/rundeck_project.py:
|
|
||||||
maintainers: nerzhul
|
|
||||||
$modules/web_infrastructure/rundeck_job_run.py:
|
|
||||||
maintainers: phsmith
|
|
||||||
$modules/web_infrastructure/rundeck_job_executions_info.py:
|
|
||||||
maintainers: phsmith
|
|
||||||
$modules/web_infrastructure/sophos_utm/:
|
$modules/web_infrastructure/sophos_utm/:
|
||||||
maintainers: $team_e_spirit
|
maintainers: $team_e_spirit
|
||||||
keywords: sophos utm
|
keywords: sophos utm
|
||||||
@@ -1167,20 +985,10 @@ files:
|
|||||||
$modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
|
$modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
|
||||||
maintainers: $team_e_spirit RickS-C137
|
maintainers: $team_e_spirit RickS-C137
|
||||||
keywords: sophos utm
|
keywords: sophos utm
|
||||||
$modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py:
|
|
||||||
maintainers: stearz
|
|
||||||
$modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py:
|
|
||||||
maintainers: stearz
|
|
||||||
$modules/web_infrastructure/sophos_utm/utm_network_interface_address.py:
|
|
||||||
maintainers: steamx
|
|
||||||
$modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py:
|
|
||||||
maintainers: steamx
|
|
||||||
$modules/web_infrastructure/supervisorctl.py:
|
$modules/web_infrastructure/supervisorctl.py:
|
||||||
maintainers: inetfuture mattupstate
|
maintainers: inetfuture mattupstate
|
||||||
$modules/web_infrastructure/taiga_issue.py:
|
$modules/web_infrastructure/taiga_issue.py:
|
||||||
maintainers: lekum
|
maintainers: lekum
|
||||||
$tests/a_module.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
#########################
|
#########################
|
||||||
tests/:
|
tests/:
|
||||||
labels: tests
|
labels: tests
|
||||||
@@ -1196,26 +1004,24 @@ files:
|
|||||||
macros:
|
macros:
|
||||||
actions: plugins/action
|
actions: plugins/action
|
||||||
becomes: plugins/become
|
becomes: plugins/become
|
||||||
caches: plugins/cache
|
|
||||||
callbacks: plugins/callback
|
callbacks: plugins/callback
|
||||||
cliconfs: plugins/cliconf
|
cliconfs: plugins/cliconf
|
||||||
connections: plugins/connection
|
connections: plugins/connection
|
||||||
doc_fragments: plugins/doc_fragments
|
doc_fragments: plugins/doc_fragments
|
||||||
filters: plugins/filter
|
filters: plugins/filter
|
||||||
|
httpapis: plugins/httpapi
|
||||||
inventories: plugins/inventory
|
inventories: plugins/inventory
|
||||||
lookups: plugins/lookup
|
lookups: plugins/lookup
|
||||||
module_utils: plugins/module_utils
|
module_utils: plugins/module_utils
|
||||||
modules: plugins/modules
|
modules: plugins/modules
|
||||||
terminals: plugins/terminal
|
terminals: plugins/terminal
|
||||||
tests: plugins/test
|
|
||||||
team_ansible_core:
|
|
||||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||||
team_consul: sgargan
|
team_consul: sgargan
|
||||||
team_cyberark_conjur: jvanderhoof ryanprior
|
team_cyberark_conjur: jvanderhoof ryanprior
|
||||||
team_e_spirit: MatrixCrawler getjack
|
team_e_spirit: MatrixCrawler getjack
|
||||||
team_flatpak: JayKayy oolongbrothers
|
team_flatpak: JayKayy oolongbrothers
|
||||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin
|
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
||||||
team_hpux: bcoca davx8342
|
team_hpux: bcoca davx8342
|
||||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||||
@@ -1224,13 +1030,14 @@ macros:
|
|||||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||||
|
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||||
team_oracle: manojmeda mross22 nalsaber
|
team_oracle: manojmeda mross22 nalsaber
|
||||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
|
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||||
team_rhn: FlossWare alikins barnabycourt vritant
|
team_rhn: FlossWare alikins barnabycourt vritant
|
||||||
team_scaleway: remyleone abarbare
|
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||||
|
|||||||
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -62,20 +62,6 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Community.general Version
|
|
||||||
description: >-
|
|
||||||
Paste verbatim output from "ansible-galaxy collection list community.general"
|
|
||||||
between tripple backticks.
|
|
||||||
value: |
|
|
||||||
```console (paste below)
|
|
||||||
$ ansible-galaxy collection list community.general
|
|
||||||
|
|
||||||
```
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
|
|||||||
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
14
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -62,20 +62,6 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Community.general Version
|
|
||||||
description: >-
|
|
||||||
Paste verbatim output from "ansible-galaxy collection list community.general"
|
|
||||||
between tripple backticks.
|
|
||||||
value: |
|
|
||||||
```console (paste below)
|
|
||||||
$ ansible-galaxy collection list community.general
|
|
||||||
|
|
||||||
```
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,7 +21,7 @@ body:
|
|||||||
placeholder: >-
|
placeholder: >-
|
||||||
I am trying to do X with the collection from the main branch on GitHub and
|
I am trying to do X with the collection from the main branch on GitHub and
|
||||||
I think that implementing a feature Y would be very helpful for me and
|
I think that implementing a feature Y would be very helpful for me and
|
||||||
every other user of community.general because of Z.
|
every other user of ansible-core because of Z.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
|||||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
|||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
interval:
|
|
||||||
schedule: "weekly"
|
|
||||||
81
.gitignore
vendored
81
.gitignore
vendored
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
|
|
||||||
### dotenv ###
|
### dotenv ###
|
||||||
.env
|
.env
|
||||||
@@ -88,7 +88,7 @@ flycheck_*.el
|
|||||||
.nfs*
|
.nfs*
|
||||||
|
|
||||||
### PyCharm+all ###
|
### PyCharm+all ###
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
# User-specific stuff
|
# User-specific stuff
|
||||||
@@ -98,9 +98,6 @@ flycheck_*.el
|
|||||||
.idea/**/dictionaries
|
.idea/**/dictionaries
|
||||||
.idea/**/shelf
|
.idea/**/shelf
|
||||||
|
|
||||||
# AWS User-specific
|
|
||||||
.idea/**/aws.xml
|
|
||||||
|
|
||||||
# Generated files
|
# Generated files
|
||||||
.idea/**/contentModel.xml
|
.idea/**/contentModel.xml
|
||||||
|
|
||||||
@@ -121,9 +118,6 @@ flycheck_*.el
|
|||||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||||
# since they will be recreated, and may cause churn. Uncomment if using
|
# since they will be recreated, and may cause churn. Uncomment if using
|
||||||
# auto-import.
|
# auto-import.
|
||||||
# .idea/artifacts
|
|
||||||
# .idea/compiler.xml
|
|
||||||
# .idea/jarRepositories.xml
|
|
||||||
# .idea/modules.xml
|
# .idea/modules.xml
|
||||||
# .idea/*.iml
|
# .idea/*.iml
|
||||||
# .idea/modules
|
# .idea/modules
|
||||||
@@ -204,6 +198,7 @@ parts/
|
|||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
wheels/
|
wheels/
|
||||||
|
pip-wheel-metadata/
|
||||||
share/python-wheels/
|
share/python-wheels/
|
||||||
*.egg-info/
|
*.egg-info/
|
||||||
.installed.cfg
|
.installed.cfg
|
||||||
@@ -230,25 +225,13 @@ htmlcov/
|
|||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
# Translations
|
||||||
*.mo
|
*.mo
|
||||||
*.pot
|
*.pot
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
# Scrapy stuff:
|
||||||
.scrapy
|
.scrapy
|
||||||
|
|
||||||
@@ -256,19 +239,9 @@ instance/
|
|||||||
docs/_build/
|
docs/_build/
|
||||||
|
|
||||||
# PyBuilder
|
# PyBuilder
|
||||||
.pybuilder/
|
|
||||||
target/
|
target/
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
# pyenv
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
.python-version
|
.python-version
|
||||||
|
|
||||||
# pipenv
|
# pipenv
|
||||||
@@ -278,24 +251,12 @@ ipython_config.py
|
|||||||
# install all needed dependencies.
|
# install all needed dependencies.
|
||||||
#Pipfile.lock
|
#Pipfile.lock
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
# celery beat schedule file
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
celerybeat-schedule
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
# SageMath parsed files
|
||||||
*.sage.py
|
*.sage.py
|
||||||
|
|
||||||
# Environments
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
.spyproject
|
.spyproject
|
||||||
@@ -303,6 +264,10 @@ venv.bak/
|
|||||||
# Rope project settings
|
# Rope project settings
|
||||||
.ropeproject
|
.ropeproject
|
||||||
|
|
||||||
|
# Mr Developer
|
||||||
|
.mr.developer.cfg
|
||||||
|
.project
|
||||||
|
|
||||||
# mkdocs documentation
|
# mkdocs documentation
|
||||||
/site
|
/site
|
||||||
|
|
||||||
@@ -314,16 +279,9 @@ dmypy.json
|
|||||||
# Pyre type checker
|
# Pyre type checker
|
||||||
.pyre/
|
.pyre/
|
||||||
|
|
||||||
# pytype static type analyzer
|
|
||||||
.pytype/
|
|
||||||
|
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
### Vim ###
|
### Vim ###
|
||||||
# Swap
|
# Swap
|
||||||
[._]*.s[a-v][a-z]
|
[._]*.s[a-v][a-z]
|
||||||
!*.svg # comment out if you don't need vector files
|
|
||||||
[._]*.sw[a-p]
|
[._]*.sw[a-p]
|
||||||
[._]s[a-rt-v][a-z]
|
[._]s[a-rt-v][a-z]
|
||||||
[._]ss[a-gi-z]
|
[._]ss[a-gi-z]
|
||||||
@@ -341,13 +299,11 @@ tags
|
|||||||
[._]*.un~
|
[._]*.un~
|
||||||
|
|
||||||
### WebStorm ###
|
### WebStorm ###
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
# User-specific stuff
|
# User-specific stuff
|
||||||
|
|
||||||
# AWS User-specific
|
|
||||||
|
|
||||||
# Generated files
|
# Generated files
|
||||||
|
|
||||||
# Sensitive or high-churn files
|
# Sensitive or high-churn files
|
||||||
@@ -358,9 +314,6 @@ tags
|
|||||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||||
# since they will be recreated, and may cause churn. Uncomment if using
|
# since they will be recreated, and may cause churn. Uncomment if using
|
||||||
# auto-import.
|
# auto-import.
|
||||||
# .idea/artifacts
|
|
||||||
# .idea/compiler.xml
|
|
||||||
# .idea/jarRepositories.xml
|
|
||||||
# .idea/modules.xml
|
# .idea/modules.xml
|
||||||
# .idea/*.iml
|
# .idea/*.iml
|
||||||
# .idea/modules
|
# .idea/modules
|
||||||
@@ -396,27 +349,15 @@ tags
|
|||||||
# *.ipr
|
# *.ipr
|
||||||
|
|
||||||
# Sonarlint plugin
|
# Sonarlint plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7973-sonarlint
|
|
||||||
.idea/**/sonarlint/
|
.idea/**/sonarlint/
|
||||||
|
|
||||||
# SonarQube Plugin
|
# SonarQube Plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
|
|
||||||
.idea/**/sonarIssues.xml
|
.idea/**/sonarIssues.xml
|
||||||
|
|
||||||
# Markdown Navigator plugin
|
# Markdown Navigator plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
|
|
||||||
.idea/**/markdown-navigator.xml
|
.idea/**/markdown-navigator.xml
|
||||||
.idea/**/markdown-navigator-enh.xml
|
|
||||||
.idea/**/markdown-navigator/
|
.idea/**/markdown-navigator/
|
||||||
|
|
||||||
# Cache file creation bug
|
|
||||||
# See https://youtrack.jetbrains.com/issue/JBR-2257
|
|
||||||
.idea/$CACHE_FILE$
|
|
||||||
|
|
||||||
# CodeStream plugin
|
|
||||||
# https://plugins.jetbrains.com/plugin/12206-codestream
|
|
||||||
.idea/codestream.xml
|
|
||||||
|
|
||||||
### Windows ###
|
### Windows ###
|
||||||
# Windows thumbnail cache files
|
# Windows thumbnail cache files
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
@@ -443,4 +384,4 @@ $RECYCLE.BIN/
|
|||||||
# Windows shortcuts
|
# Windows shortcuts
|
||||||
*.lnk
|
*.lnk
|
||||||
|
|
||||||
# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
|
|||||||
1201
CHANGELOG.rst
1201
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -23,51 +23,10 @@ Note that reviewing does not only mean code review, but also offering comments o
|
|||||||
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
||||||
|
|
||||||
* Try committing your changes with an informative but short commit message.
|
* Try committing your changes with an informative but short commit message.
|
||||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though!
|
||||||
|
* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history.
|
||||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
|
||||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
|
||||||
|
|
||||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||||
|
|
||||||
## Test pull requests
|
|
||||||
|
|
||||||
If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly.
|
|
||||||
|
|
||||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
||||||
|
|
||||||
## Creating new modules or plugins
|
|
||||||
|
|
||||||
Creating new modules and plugins requires a bit more work than other Pull Requests.
|
|
||||||
|
|
||||||
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
|
|
||||||
can only be used by very few people should better be added to more specialized collections.
|
|
||||||
|
|
||||||
2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
|
|
||||||
That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
|
|
||||||
of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
|
|
||||||
such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
|
|
||||||
|
|
||||||
3. When creating a new module or plugin, please make sure that you follow various guidelines:
|
|
||||||
|
|
||||||
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
|
|
||||||
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
|
|
||||||
the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide);
|
|
||||||
- Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed
|
|
||||||
(new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed);
|
|
||||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
|
||||||
which run in CI.
|
|
||||||
|
|
||||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
|
||||||
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
|
|
||||||
subdirectories.)
|
|
||||||
|
|
||||||
- Action plugins need to be accompanied by a module, even if the module file only contains documentation
|
|
||||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
|
||||||
than the action plugin has in `plugins/action/`.
|
|
||||||
|
|
||||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
|
||||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
|
||||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
|
||||||
|
|
||||||
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
|
|
||||||
|
|||||||
86
README.md
86
README.md
@@ -1,23 +1,17 @@
|
|||||||
# Community General Collection
|
# Community General Collection
|
||||||
|
|
||||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||||
|
|
||||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||||
|
|
||||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||||
|
|
||||||
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
||||||
|
|
||||||
## Code of Conduct
|
|
||||||
|
|
||||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
|
|
||||||
|
|
||||||
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
|
|
||||||
|
|
||||||
## Tested with Ansible
|
## Tested with Ansible
|
||||||
|
|
||||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||||
|
|
||||||
## External requirements
|
## External requirements
|
||||||
|
|
||||||
@@ -29,9 +23,7 @@ Please check the included content on the [Ansible Galaxy page for this collectio
|
|||||||
|
|
||||||
## Using this collection
|
## Using this collection
|
||||||
|
|
||||||
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
|
Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||||
|
|
||||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
|
|
||||||
|
|
||||||
ansible-galaxy collection install community.general
|
ansible-galaxy collection install community.general
|
||||||
|
|
||||||
@@ -42,79 +34,59 @@ collections:
|
|||||||
- name: community.general
|
- name: community.general
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ansible-galaxy collection install community.general --upgrade
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ansible-galaxy collection install community.general:==X.Y.Z
|
|
||||||
```
|
|
||||||
|
|
||||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||||
|
|
||||||
## Contributing to this collection
|
## Contributing to this collection
|
||||||
|
|
||||||
The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
|
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||||
|
|
||||||
We are actively accepting new contributors.
|
For example, if you are working in the `~/dev` directory:
|
||||||
|
|
||||||
All types of contributions are very welcome.
|
```
|
||||||
|
cd ~/dev
|
||||||
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md)!
|
git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
|
||||||
|
export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
||||||
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
|
```
|
||||||
|
|
||||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||||
|
|
||||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md).
|
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||||
|
|
||||||
### Running tests
|
### Running tests
|
||||||
|
|
||||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||||
|
|
||||||
## Collection maintenance
|
### Communication
|
||||||
|
|
||||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
We have a dedicated Working Group for Ansible development.
|
||||||
|
|
||||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md).
|
You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels -
|
||||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
- `#ansible` - For general use questions and support.
|
||||||
|
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core.
|
||||||
It is necessary for maintainers of this collection to be subscribed to:
|
- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections.
|
||||||
|
|
||||||
* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
|
|
||||||
* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
|
|
||||||
|
|
||||||
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
|
|
||||||
|
|
||||||
## Communication
|
|
||||||
|
|
||||||
We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
|
|
||||||
|
|
||||||
Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
|
|
||||||
|
|
||||||
We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
|
|
||||||
|
|
||||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||||
|
|
||||||
For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html)
|
||||||
|
|
||||||
## Publishing New Version
|
### Publishing New Version
|
||||||
|
|
||||||
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
|
Basic instructions without release branches:
|
||||||
|
|
||||||
|
1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
|
||||||
|
2. Run `antsibull-changelog release --collection-flatmap yes`
|
||||||
|
3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
|
||||||
|
4. Tag the commit with `<version>`. Push changes and tag to the main repository.
|
||||||
|
|
||||||
## Release notes
|
## Release notes
|
||||||
|
|
||||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-4/CHANGELOG.rst).
|
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
|
||||||
|
|
||||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
|
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
||||||
|
|
||||||
## More information
|
## More information
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -68,7 +68,7 @@ Individuals who have been asked to become a part of this group have generally be
|
|||||||
| Name | GitHub ID | IRC Nick | Other |
|
| Name | GitHub ID | IRC Nick | Other |
|
||||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||||
| Alexei Znamensky | russoz | russoz | |
|
| Alexei Znamensky | russoz | russoz | |
|
||||||
|
| Amin Vakil | aminvakil | aminvakil | |
|
||||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||||
| Andrew Pantuso | Ajpantuso | ajpantuso | |
|
|
||||||
| Felix Fontein | felixfontein | felixfontein | |
|
| Felix Fontein | felixfontein | felixfontein | |
|
||||||
| John R Barker | gundalow | gundalow | |
|
| John R Barker | gundalow | gundalow | |
|
||||||
|
|||||||
@@ -3,4 +3,3 @@ sections:
|
|||||||
- title: Guides
|
- title: Guides
|
||||||
toctree:
|
toctree:
|
||||||
- filter_guide
|
- filter_guide
|
||||||
- test_guide
|
|
||||||
|
|||||||
@@ -751,34 +751,3 @@ To extract ports from all clusters with name containing 'server1':
|
|||||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||||
|
|
||||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||||
|
|
||||||
Working with Unicode
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
|
||||||
|
|
||||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
|
||||||
|
|
||||||
.. code-block:: yaml+jinja
|
|
||||||
|
|
||||||
- name: Compare Unicode representations
|
|
||||||
debug:
|
|
||||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
|
||||||
vars:
|
|
||||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
|
||||||
without_combining_character: Mayagüez
|
|
||||||
|
|
||||||
This produces:
|
|
||||||
|
|
||||||
.. code-block:: ansible-output
|
|
||||||
|
|
||||||
TASK [Compare Unicode representations] ********************************************************
|
|
||||||
ok: [localhost] => {
|
|
||||||
"msg": true
|
|
||||||
}
|
|
||||||
|
|
||||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
|
||||||
|
|
||||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
|
||||||
|
|
||||||
.. versionadded:: 3.7.0
|
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
.. _ansible_collections.community.general.docsite.test_guide:
|
|
||||||
|
|
||||||
community.general Test (Plugin) Guide
|
|
||||||
=====================================
|
|
||||||
|
|
||||||
The :ref:`community.general collection <plugins_in_community.general>` offers currently one test plugin.
|
|
||||||
|
|
||||||
.. contents:: Topics
|
|
||||||
|
|
||||||
Feature Tests
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
|
|
||||||
|
|
||||||
.. code-block:: yaml+jinja
|
|
||||||
|
|
||||||
- name: Make sure that community.aws.route53 is available
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- >
|
|
||||||
'community.aws.route53' is community.general.a_module
|
|
||||||
|
|
||||||
- name: Make sure that community.general.does_not_exist is not a module or action plugin
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- "'community.general.does_not_exist' is not community.general.a_module"
|
|
||||||
|
|
||||||
.. versionadded:: 4.0.0
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
namespace: community
|
namespace: community
|
||||||
name: general
|
name: general
|
||||||
version: 4.2.0
|
version: 3.2.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- Ansible (https://github.com/ansible)
|
- Ansible (https://github.com/ansible)
|
||||||
|
|||||||
105
meta/runtime.yml
105
meta/runtime.yml
@@ -12,11 +12,20 @@ plugin_routing:
|
|||||||
hashi_vault:
|
hashi_vault:
|
||||||
redirect: community.hashi_vault.hashi_vault
|
redirect: community.hashi_vault.hashi_vault
|
||||||
nios:
|
nios:
|
||||||
redirect: infoblox.nios_modules.nios_lookup
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios lookup plugin has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_lookup instead.
|
||||||
nios_next_ip:
|
nios_next_ip:
|
||||||
redirect: infoblox.nios_modules.nios_next_ip
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_next_ip lookup plugin has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_next_ip instead.
|
||||||
nios_next_network:
|
nios_next_network:
|
||||||
redirect: infoblox.nios_modules.nios_next_network
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_next_network lookup plugin has been
|
||||||
|
deprecated. Please use infoblox.nios_modules.nios_next_network instead.
|
||||||
modules:
|
modules:
|
||||||
ali_instance_facts:
|
ali_instance_facts:
|
||||||
tombstone:
|
tombstone:
|
||||||
@@ -257,37 +266,85 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: Use community.general.nginx_status_info instead.
|
warning_text: Use community.general.nginx_status_info instead.
|
||||||
nios_a_record:
|
nios_a_record:
|
||||||
redirect: infoblox.nios_modules.nios_a_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_a_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_a_record instead.
|
||||||
nios_aaaa_record:
|
nios_aaaa_record:
|
||||||
redirect: infoblox.nios_modules.nios_aaaa_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_aaaa_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_aaaa_record instead.
|
||||||
nios_cname_record:
|
nios_cname_record:
|
||||||
redirect: infoblox.nios_modules.nios_cname_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_cname_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_cname_record instead.
|
||||||
nios_dns_view:
|
nios_dns_view:
|
||||||
redirect: infoblox.nios_modules.nios_dns_view
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_dns_view module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_dns_view instead.
|
||||||
nios_fixed_address:
|
nios_fixed_address:
|
||||||
redirect: infoblox.nios_modules.nios_fixed_address
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_fixed_address module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_fixed_address instead.
|
||||||
nios_host_record:
|
nios_host_record:
|
||||||
redirect: infoblox.nios_modules.nios_host_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_host_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_host_record instead.
|
||||||
nios_member:
|
nios_member:
|
||||||
redirect: infoblox.nios_modules.nios_member
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_member module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_member instead.
|
||||||
nios_mx_record:
|
nios_mx_record:
|
||||||
redirect: infoblox.nios_modules.nios_mx_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_mx_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_mx_record instead.
|
||||||
nios_naptr_record:
|
nios_naptr_record:
|
||||||
redirect: infoblox.nios_modules.nios_naptr_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_naptr_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_naptr_record instead.
|
||||||
nios_network:
|
nios_network:
|
||||||
redirect: infoblox.nios_modules.nios_network
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_network module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_network instead.
|
||||||
nios_network_view:
|
nios_network_view:
|
||||||
redirect: infoblox.nios_modules.nios_network_view
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_network_view module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_network_view instead.
|
||||||
nios_nsgroup:
|
nios_nsgroup:
|
||||||
redirect: infoblox.nios_modules.nios_nsgroup
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_nsgroup module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_nsgroup instead.
|
||||||
nios_ptr_record:
|
nios_ptr_record:
|
||||||
redirect: infoblox.nios_modules.nios_ptr_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_ptr_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_ptr_record instead.
|
||||||
nios_srv_record:
|
nios_srv_record:
|
||||||
redirect: infoblox.nios_modules.nios_srv_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_srv_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_srv_record instead.
|
||||||
nios_txt_record:
|
nios_txt_record:
|
||||||
redirect: infoblox.nios_modules.nios_txt_record
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_txt_record module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_txt_record instead.
|
||||||
nios_zone:
|
nios_zone:
|
||||||
redirect: infoblox.nios_modules.nios_zone
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios_zone module has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios_zone instead.
|
||||||
ome_device_info:
|
ome_device_info:
|
||||||
redirect: dellemc.openmanage.ome_device_info
|
redirect: dellemc.openmanage.ome_device_info
|
||||||
one_image_facts:
|
one_image_facts:
|
||||||
@@ -571,7 +628,10 @@ plugin_routing:
|
|||||||
kubevirt_vm_options:
|
kubevirt_vm_options:
|
||||||
redirect: community.kubevirt.kubevirt_vm_options
|
redirect: community.kubevirt.kubevirt_vm_options
|
||||||
nios:
|
nios:
|
||||||
redirect: infoblox.nios_modules.nios
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.nios document fragment has been deprecated.
|
||||||
|
Please use infoblox.nios_modules.nios instead.
|
||||||
postgresql:
|
postgresql:
|
||||||
redirect: community.postgresql.postgresql
|
redirect: community.postgresql.postgresql
|
||||||
module_utils:
|
module_utils:
|
||||||
@@ -590,7 +650,10 @@ plugin_routing:
|
|||||||
kubevirt:
|
kubevirt:
|
||||||
redirect: community.kubevirt.kubevirt
|
redirect: community.kubevirt.kubevirt
|
||||||
net_tools.nios.api:
|
net_tools.nios.api:
|
||||||
redirect: infoblox.nios_modules.api
|
deprecation:
|
||||||
|
removal_version: 5.0.0
|
||||||
|
warning_text: The community.general.net_tools.nios.api module_utils has been
|
||||||
|
deprecated. Please use infoblox.nios_modules.api instead.
|
||||||
postgresql:
|
postgresql:
|
||||||
redirect: community.postgresql.postgresql
|
redirect: community.postgresql.postgresql
|
||||||
remote_management.dellemc.dellemc_idrac:
|
remote_management.dellemc.dellemc_idrac:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright: (c) 2020, quidame <quidame@poivron.org>
|
# Copyright: (c) 2020, quidame <quidame@poivron.org>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
|
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
|
||||||
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
|
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
|
||||||
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
|
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
|
||||||
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils._text import to_native, to_text
|
||||||
from ansible.module_utils.common.collections import is_string
|
from ansible.module_utils.common.collections import is_string
|
||||||
from ansible.plugins.action import ActionBase
|
from ansible.plugins.action import ActionBase
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: Do As user
|
short_description: Do As user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description: User you 'become' to execute the task
|
description: User you 'become' to execute the task
|
||||||
@@ -81,7 +81,7 @@ DOCUMENTATION = '''
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.plugins.become import BecomeBase
|
from ansible.plugins.become import BecomeBase
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: Centrify's Direct Authorize
|
short_description: Centrify's Direct Authorize
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description: User you 'become' to execute the task
|
description: User you 'become' to execute the task
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: Kerberos substitute user
|
short_description: Kerberos substitute user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description: User you 'become' to execute the task
|
description: User you 'become' to execute the task
|
||||||
@@ -82,7 +82,7 @@ DOCUMENTATION = '''
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.plugins.become import BecomeBase
|
from ansible.plugins.become import BecomeBase
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: Systemd's machinectl privilege escalation
|
short_description: Systemd's machinectl privilege escalation
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description: User you 'become' to execute the task
|
description: User you 'become' to execute the task
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: PowerBroker run
|
short_description: PowerBroker run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description: User you 'become' to execute the task
|
description: User you 'become' to execute the task
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: profile based execution
|
short_description: profile based execution
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_user:
|
become_user:
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ DOCUMENTATION = '''
|
|||||||
short_description: Privilege Manager run
|
short_description: Privilege Manager run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
||||||
author: Ansible Core Team
|
author: ansible (@core)
|
||||||
options:
|
options:
|
||||||
become_exe:
|
become_exe:
|
||||||
description: Sudo executable
|
description: Sudo executable
|
||||||
|
|||||||
9
plugins/cache/memcached.py
vendored
9
plugins/cache/memcached.py
vendored
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2014, Brian Coca, Josh Drake, et al
|
# (c) 2014, Brian Coca, Josh Drake, et al
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -154,12 +153,12 @@ class CacheModuleKeys(MutableSet):
|
|||||||
def __len__(self):
|
def __len__(self):
|
||||||
return len(self._keyset)
|
return len(self._keyset)
|
||||||
|
|
||||||
def add(self, value):
|
def add(self, key):
|
||||||
self._keyset[value] = time.time()
|
self._keyset[key] = time.time()
|
||||||
self._cache.set(self.PREFIX, self._keyset)
|
self._cache.set(self.PREFIX, self._keyset)
|
||||||
|
|
||||||
def discard(self, value):
|
def discard(self, key):
|
||||||
del self._keyset[value]
|
del self._keyset[key]
|
||||||
self._cache.set(self.PREFIX, self._keyset)
|
self._cache.set(self.PREFIX, self._keyset)
|
||||||
|
|
||||||
def remove_by_timerange(self, s_min, s_max):
|
def remove_by_timerange(self, s_min, s_max):
|
||||||
|
|||||||
1
plugins/cache/pickle.py
vendored
1
plugins/cache/pickle.py
vendored
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2017, Brian Coca
|
# (c) 2017, Brian Coca
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
3
plugins/cache/redis.py
vendored
3
plugins/cache/redis.py
vendored
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2014, Brian Coca, Josh Drake, et al
|
# (c) 2014, Brian Coca, Josh Drake, et al
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -68,7 +67,7 @@ import json
|
|||||||
|
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||||
from ansible.plugins.cache import BaseCacheModule
|
from ansible.plugins.cache import BaseCacheModule
|
||||||
from ansible.release import __version__ as ansible_base_version
|
from ansible.release import __version__ as ansible_base_version
|
||||||
|
|||||||
1
plugins/cache/yaml.py
vendored
1
plugins/cache/yaml.py
vendored
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2017, Brian Coca
|
# (c) 2017, Brian Coca
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
|
# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
'''
|
'''
|
||||||
@@ -45,8 +44,6 @@ class CallbackModule(CallbackBase):
|
|||||||
_task_total = 0
|
_task_total = 0
|
||||||
_host_counter = 1
|
_host_counter = 1
|
||||||
_host_total = 0
|
_host_total = 0
|
||||||
_current_batch_total = 0
|
|
||||||
_previous_batch_total = 0
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(CallbackModule, self).__init__()
|
super(CallbackModule, self).__init__()
|
||||||
@@ -78,11 +75,8 @@ class CallbackModule(CallbackBase):
|
|||||||
self._display.banner(msg)
|
self._display.banner(msg)
|
||||||
self._play = play
|
self._play = play
|
||||||
|
|
||||||
self._previous_batch_total = self._current_batch_total
|
|
||||||
self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch'])
|
|
||||||
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
|
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
|
||||||
self._task_total = len(self._play.get_tasks()[0])
|
self._task_total = len(self._play.get_tasks()[0])
|
||||||
self._task_counter = 1
|
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
def v2_playbook_on_stats(self, stats):
|
||||||
self._display.banner("PLAY RECAP")
|
self._display.banner("PLAY RECAP")
|
||||||
@@ -150,7 +144,7 @@ class CallbackModule(CallbackBase):
|
|||||||
path = task.get_path()
|
path = task.get_path()
|
||||||
if path:
|
if path:
|
||||||
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
||||||
self._host_counter = self._previous_batch_total
|
self._host_counter = 0
|
||||||
self._task_counter += 1
|
self._task_counter += 1
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result):
|
def v2_runner_on_ok(self, result):
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2016, Dag Wieers <dag@wieers.com>
|
# (c) 2016, Dag Wieers <dag@wieers.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -792,7 +792,7 @@ from ansible.utils.color import colorize, hostcolor
|
|||||||
from ansible.template import Templar
|
from ansible.template import Templar
|
||||||
from ansible.vars.manager import VariableManager
|
from ansible.vars.manager import VariableManager
|
||||||
from ansible.plugins.callback.default import CallbackModule as Default
|
from ansible.plugins.callback.default import CallbackModule as Default
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
from ansible.module_utils._text import to_text
|
||||||
|
|
||||||
|
|
||||||
class DummyStdout(object):
|
class DummyStdout(object):
|
||||||
|
|||||||
@@ -1,423 +0,0 @@
|
|||||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
|
||||||
name: elastic
|
|
||||||
type: notification
|
|
||||||
short_description: Create distributed traces for each Ansible task in Elastic APM
|
|
||||||
version_added: 3.8.0
|
|
||||||
description:
|
|
||||||
- This callback creates distributed traces for each Ansible task in Elastic APM.
|
|
||||||
- You can configure the plugin with environment variables.
|
|
||||||
- See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
|
|
||||||
options:
|
|
||||||
hide_task_arguments:
|
|
||||||
default: false
|
|
||||||
type: bool
|
|
||||||
description:
|
|
||||||
- Hide the arguments for a task.
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
|
||||||
apm_service_name:
|
|
||||||
default: ansible
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- The service name resource attribute.
|
|
||||||
env:
|
|
||||||
- name: ELASTIC_APM_SERVICE_NAME
|
|
||||||
apm_server_url:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Use the APM server and its environment variables.
|
|
||||||
env:
|
|
||||||
- name: ELASTIC_APM_SERVER_URL
|
|
||||||
apm_secret_token:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Use the APM server token
|
|
||||||
env:
|
|
||||||
- name: ELASTIC_APM_SECRET_TOKEN
|
|
||||||
apm_api_key:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Use the APM API key
|
|
||||||
env:
|
|
||||||
- name: ELASTIC_APM_API_KEY
|
|
||||||
apm_verify_server_cert:
|
|
||||||
default: true
|
|
||||||
type: bool
|
|
||||||
description:
|
|
||||||
- Verifies the SSL certificate if an HTTPS connection.
|
|
||||||
env:
|
|
||||||
- name: ELASTIC_APM_VERIFY_SERVER_CERT
|
|
||||||
traceparent:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
|
||||||
env:
|
|
||||||
- name: TRACEPARENT
|
|
||||||
requirements:
|
|
||||||
- elastic-apm (Python library)
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
examples: |
|
|
||||||
Enable the plugin in ansible.cfg:
|
|
||||||
[defaults]
|
|
||||||
callbacks_enabled = community.general.elastic
|
|
||||||
|
|
||||||
Set the environment variable:
|
|
||||||
export ELASTIC_APM_SERVER_URL=<your APM server URL)>
|
|
||||||
export ELASTIC_APM_SERVICE_NAME=your_service_name
|
|
||||||
export ELASTIC_APM_API_KEY=your_APM_API_KEY
|
|
||||||
'''
|
|
||||||
|
|
||||||
import getpass
|
|
||||||
import socket
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
from os.path import basename
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleRuntimeError
|
|
||||||
from ansible.module_utils.six import raise_from
|
|
||||||
from ansible.plugins.callback import CallbackBase
|
|
||||||
|
|
||||||
try:
|
|
||||||
from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
|
|
||||||
except ImportError as imp_exc:
|
|
||||||
ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
|
|
||||||
else:
|
|
||||||
ELASTIC_LIBRARY_IMPORT_ERROR = None
|
|
||||||
|
|
||||||
|
|
||||||
class TaskData:
|
|
||||||
"""
|
|
||||||
Data about an individual task.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, uuid, name, path, play, action, args):
|
|
||||||
self.uuid = uuid
|
|
||||||
self.name = name
|
|
||||||
self.path = path
|
|
||||||
self.play = play
|
|
||||||
self.host_data = OrderedDict()
|
|
||||||
self.start = time.time()
|
|
||||||
self.action = action
|
|
||||||
self.args = args
|
|
||||||
|
|
||||||
def add_host(self, host):
|
|
||||||
if host.uuid in self.host_data:
|
|
||||||
if host.status == 'included':
|
|
||||||
# concatenate task include output from multiple items
|
|
||||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
self.host_data[host.uuid] = host
|
|
||||||
|
|
||||||
|
|
||||||
class HostData:
|
|
||||||
"""
|
|
||||||
Data about an individual host.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, uuid, name, status, result):
|
|
||||||
self.uuid = uuid
|
|
||||||
self.name = name
|
|
||||||
self.status = status
|
|
||||||
self.result = result
|
|
||||||
self.finish = time.time()
|
|
||||||
|
|
||||||
|
|
||||||
class ElasticSource(object):
|
|
||||||
def __init__(self, display):
|
|
||||||
self.ansible_playbook = ""
|
|
||||||
self.ansible_version = None
|
|
||||||
self.session = str(uuid.uuid4())
|
|
||||||
self.host = socket.gethostname()
|
|
||||||
try:
|
|
||||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
|
||||||
except Exception as e:
|
|
||||||
self.ip_address = None
|
|
||||||
self.user = getpass.getuser()
|
|
||||||
|
|
||||||
self._display = display
|
|
||||||
|
|
||||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
|
||||||
""" record the start of a task for one or more hosts """
|
|
||||||
|
|
||||||
uuid = task._uuid
|
|
||||||
|
|
||||||
if uuid in tasks_data:
|
|
||||||
return
|
|
||||||
|
|
||||||
name = task.get_name().strip()
|
|
||||||
path = task.get_path()
|
|
||||||
action = task.action
|
|
||||||
args = None
|
|
||||||
|
|
||||||
if not task.no_log and not hide_task_arguments:
|
|
||||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
|
||||||
|
|
||||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
|
||||||
|
|
||||||
def finish_task(self, tasks_data, status, result):
|
|
||||||
""" record the results of a task for a single host """
|
|
||||||
|
|
||||||
task_uuid = result._task._uuid
|
|
||||||
|
|
||||||
if hasattr(result, '_host') and result._host is not None:
|
|
||||||
host_uuid = result._host._uuid
|
|
||||||
host_name = result._host.name
|
|
||||||
else:
|
|
||||||
host_uuid = 'include'
|
|
||||||
host_name = 'include'
|
|
||||||
|
|
||||||
task = tasks_data[task_uuid]
|
|
||||||
|
|
||||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
|
||||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
|
||||||
|
|
||||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
|
||||||
|
|
||||||
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
|
|
||||||
apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
|
||||||
""" generate distributed traces from the collected TaskData and HostData """
|
|
||||||
|
|
||||||
tasks = []
|
|
||||||
parent_start_time = None
|
|
||||||
for task_uuid, task in tasks_data.items():
|
|
||||||
if parent_start_time is None:
|
|
||||||
parent_start_time = task.start
|
|
||||||
tasks.append(task)
|
|
||||||
|
|
||||||
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
|
|
||||||
if apm_cli:
|
|
||||||
instrument() # Only call this once, as early as possible.
|
|
||||||
if traceparent:
|
|
||||||
parent = trace_parent_from_string(traceparent)
|
|
||||||
apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
|
|
||||||
else:
|
|
||||||
apm_cli.begin_transaction("Session", start=parent_start_time)
|
|
||||||
# Populate trace metadata attributes
|
|
||||||
if self.ansible_version is not None:
|
|
||||||
label(ansible_version=self.ansible_version)
|
|
||||||
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
|
|
||||||
if self.ip_address is not None:
|
|
||||||
label(ansible_host_ip=self.ip_address)
|
|
||||||
|
|
||||||
for task_data in tasks:
|
|
||||||
for host_uuid, host_data in task_data.host_data.items():
|
|
||||||
self.create_span_data(apm_cli, task_data, host_data)
|
|
||||||
|
|
||||||
apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
|
|
||||||
|
|
||||||
def create_span_data(self, apm_cli, task_data, host_data):
|
|
||||||
""" create the span with the given TaskData and HostData """
|
|
||||||
|
|
||||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
|
||||||
|
|
||||||
message = "success"
|
|
||||||
status = "success"
|
|
||||||
enriched_error_message = None
|
|
||||||
if host_data.status == 'included':
|
|
||||||
rc = 0
|
|
||||||
else:
|
|
||||||
res = host_data.result._result
|
|
||||||
rc = res.get('rc', 0)
|
|
||||||
if host_data.status == 'failed':
|
|
||||||
message = self.get_error_message(res)
|
|
||||||
enriched_error_message = self.enrich_error_message(res)
|
|
||||||
status = "failure"
|
|
||||||
elif host_data.status == 'skipped':
|
|
||||||
if 'skip_reason' in res:
|
|
||||||
message = res['skip_reason']
|
|
||||||
else:
|
|
||||||
message = 'skipped'
|
|
||||||
status = "unknown"
|
|
||||||
|
|
||||||
with capture_span(task_data.name,
|
|
||||||
start=task_data.start,
|
|
||||||
span_type="ansible.task.run",
|
|
||||||
duration=host_data.finish - task_data.start,
|
|
||||||
labels={"ansible.task.args": task_data.args,
|
|
||||||
"ansible.task.message": message,
|
|
||||||
"ansible.task.module": task_data.action,
|
|
||||||
"ansible.task.name": name,
|
|
||||||
"ansible.task.result": rc,
|
|
||||||
"ansible.task.host.name": host_data.name,
|
|
||||||
"ansible.task.host.status": host_data.status}) as span:
|
|
||||||
span.outcome = status
|
|
||||||
if 'failure' in status:
|
|
||||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
|
|
||||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
|
||||||
|
|
||||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
|
||||||
if apm_server_url:
|
|
||||||
return Client(service_name=apm_service_name,
|
|
||||||
server_url=apm_server_url,
|
|
||||||
verify_server_cert=False,
|
|
||||||
secret_token=apm_secret_token,
|
|
||||||
api_key=apm_api_key,
|
|
||||||
use_elastic_traceparent_header=True,
|
|
||||||
debug=True)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_error_message(result):
|
|
||||||
if result.get('exception') is not None:
|
|
||||||
return ElasticSource._last_line(result['exception'])
|
|
||||||
return result.get('msg', 'failed')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _last_line(text):
|
|
||||||
lines = text.strip().split('\n')
|
|
||||||
return lines[-1]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def enrich_error_message(result):
|
|
||||||
message = result.get('msg', 'failed')
|
|
||||||
exception = result.get('exception')
|
|
||||||
stderr = result.get('stderr')
|
|
||||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
|
||||||
"""
|
|
||||||
This callback creates distributed traces with Elastic APM.
|
|
||||||
"""
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
|
||||||
CALLBACK_TYPE = 'notification'
|
|
||||||
CALLBACK_NAME = 'community.general.elastic'
|
|
||||||
CALLBACK_NEEDS_ENABLED = True
|
|
||||||
|
|
||||||
def __init__(self, display=None):
|
|
||||||
super(CallbackModule, self).__init__(display=display)
|
|
||||||
self.hide_task_arguments = None
|
|
||||||
self.apm_service_name = None
|
|
||||||
self.ansible_playbook = None
|
|
||||||
self.traceparent = False
|
|
||||||
self.play_name = None
|
|
||||||
self.tasks_data = None
|
|
||||||
self.errors = 0
|
|
||||||
self.disabled = False
|
|
||||||
|
|
||||||
if ELASTIC_LIBRARY_IMPORT_ERROR:
|
|
||||||
raise_from(
|
|
||||||
AnsibleError('The `elastic-apm` must be installed to use this plugin'),
|
|
||||||
ELASTIC_LIBRARY_IMPORT_ERROR)
|
|
||||||
|
|
||||||
self.tasks_data = OrderedDict()
|
|
||||||
|
|
||||||
self.elastic = ElasticSource(display=self._display)
|
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
|
||||||
var_options=var_options,
|
|
||||||
direct=direct)
|
|
||||||
|
|
||||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
|
||||||
|
|
||||||
self.apm_service_name = self.get_option('apm_service_name')
|
|
||||||
if not self.apm_service_name:
|
|
||||||
self.apm_service_name = 'ansible'
|
|
||||||
|
|
||||||
self.apm_server_url = self.get_option('apm_server_url')
|
|
||||||
self.apm_secret_token = self.get_option('apm_secret_token')
|
|
||||||
self.apm_api_key = self.get_option('apm_api_key')
|
|
||||||
self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
|
|
||||||
self.traceparent = self.get_option('traceparent')
|
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
|
||||||
self.ansible_playbook = basename(playbook._file_name)
|
|
||||||
|
|
||||||
def v2_playbook_on_play_start(self, play):
|
|
||||||
self.play_name = play.get_name()
|
|
||||||
|
|
||||||
def v2_runner_on_no_hosts(self, task):
|
|
||||||
self.elastic.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
|
||||||
self.elastic.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_cleanup_task_start(self, task):
|
|
||||||
self.elastic.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_handler_task_start(self, task):
|
|
||||||
self.elastic.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
|
||||||
self.errors += 1
|
|
||||||
self.elastic.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'failed',
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result):
|
|
||||||
self.elastic.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'ok',
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_skipped(self, result):
|
|
||||||
self.elastic.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'skipped',
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_include(self, included_file):
|
|
||||||
self.elastic.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'included',
|
|
||||||
included_file
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
|
||||||
if self.errors == 0:
|
|
||||||
status = "success"
|
|
||||||
else:
|
|
||||||
status = "failure"
|
|
||||||
self.elastic.generate_distributed_traces(
|
|
||||||
self.tasks_data,
|
|
||||||
status,
|
|
||||||
time.time(),
|
|
||||||
self.traceparent,
|
|
||||||
self.apm_service_name,
|
|
||||||
self.apm_server_url,
|
|
||||||
self.apm_verify_server_cert,
|
|
||||||
self.apm_secret_token,
|
|
||||||
self.apm_api_key
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
|
||||||
self.errors += 1
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2014, Matt Martz <matt@sivel.net>
|
# (C) 2014, Matt Martz <matt@sivel.net>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
|
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
|
||||||
# Copyright (c) 2017 Ansible Project
|
# Copyright (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -32,7 +31,7 @@ import time
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from ansible.utils.path import makedirs_safe
|
from ansible.utils.path import makedirs_safe
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2018, Samir Musali <samir.musali@logdna.com>
|
# (c) 2018, Samir Musali <samir.musali@logdna.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -78,7 +77,7 @@ def get_mac():
|
|||||||
|
|
||||||
# Getting hostname of system:
|
# Getting hostname of system:
|
||||||
def get_hostname():
|
def get_hostname():
|
||||||
return str(socket.gethostname()).split('.local', 1)[0]
|
return str(socket.gethostname()).split('.local')[0]
|
||||||
|
|
||||||
|
|
||||||
# Getting IP of system:
|
# Getting IP of system:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
|
# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -112,7 +111,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_FLATDICT = False
|
HAS_FLATDICT = False
|
||||||
|
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
from ansible.module_utils._text import to_bytes, to_text
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
# Todo:
|
# Todo:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
||||||
# (C) 2017 Ansible Project
|
# (C) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -94,7 +93,6 @@ ansible.cfg: |
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
from ansible import context
|
|
||||||
import socket
|
import socket
|
||||||
import uuid
|
import uuid
|
||||||
import logging
|
import logging
|
||||||
@@ -153,11 +151,11 @@ class CallbackModule(CallbackBase):
|
|||||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||||
self.ls_pre_command).read()
|
self.ls_pre_command).read()
|
||||||
|
|
||||||
if context.CLIARGS is not None:
|
if self._options is not None:
|
||||||
self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
|
self.base_data['ansible_checkmode'] = self._options.check
|
||||||
self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
|
self.base_data['ansible_tags'] = self._options.tags
|
||||||
self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
|
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||||
self.base_data['inventory'] = context.CLIARGS.get('inventory')
|
self.base_data['inventory'] = self._options.inventory
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ import re
|
|||||||
import smtplib
|
import smtplib
|
||||||
|
|
||||||
from ansible.module_utils.six import string_types
|
from ansible.module_utils.six import string_types
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
|
|||||||
@@ -10,23 +10,22 @@ DOCUMENTATION = '''
|
|||||||
name: nrdp
|
name: nrdp
|
||||||
type: notification
|
type: notification
|
||||||
author: "Remi VERCHERE (@rverchere)"
|
author: "Remi VERCHERE (@rverchere)"
|
||||||
short_description: Post task results to a Nagios server through nrdp
|
short_description: post task result to a nagios server through nrdp
|
||||||
description:
|
description:
|
||||||
- This callback send playbook result to Nagios.
|
- this callback send playbook result to nagios
|
||||||
- Nagios shall use NRDP to recive passive events.
|
- nagios shall use NRDP to recive passive events
|
||||||
- The passive check is sent to a dedicated host/service for Ansible.
|
- the passive check is sent to a dedicated host/service for ansible
|
||||||
options:
|
options:
|
||||||
url:
|
url:
|
||||||
description: URL of the nrdp server.
|
description: url of the nrdp server
|
||||||
required: true
|
required: True
|
||||||
env:
|
env:
|
||||||
- name : NRDP_URL
|
- name : NRDP_URL
|
||||||
ini:
|
ini:
|
||||||
- section: callback_nrdp
|
- section: callback_nrdp
|
||||||
key: url
|
key: url
|
||||||
type: string
|
|
||||||
validate_certs:
|
validate_certs:
|
||||||
description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.)
|
description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url)
|
||||||
env:
|
env:
|
||||||
- name: NRDP_VALIDATE_CERTS
|
- name: NRDP_VALIDATE_CERTS
|
||||||
ini:
|
ini:
|
||||||
@@ -34,43 +33,38 @@ DOCUMENTATION = '''
|
|||||||
key: validate_nrdp_certs
|
key: validate_nrdp_certs
|
||||||
- section: callback_nrdp
|
- section: callback_nrdp
|
||||||
key: validate_certs
|
key: validate_certs
|
||||||
type: boolean
|
default: False
|
||||||
default: false
|
|
||||||
aliases: [ validate_nrdp_certs ]
|
aliases: [ validate_nrdp_certs ]
|
||||||
token:
|
token:
|
||||||
description: Token to be allowed to push nrdp events.
|
description: token to be allowed to push nrdp events
|
||||||
required: true
|
required: True
|
||||||
env:
|
env:
|
||||||
- name: NRDP_TOKEN
|
- name: NRDP_TOKEN
|
||||||
ini:
|
ini:
|
||||||
- section: callback_nrdp
|
- section: callback_nrdp
|
||||||
key: token
|
key: token
|
||||||
type: string
|
|
||||||
hostname:
|
hostname:
|
||||||
description: Hostname where the passive check is linked to.
|
description: hostname where the passive check is linked to
|
||||||
required: true
|
required: True
|
||||||
env:
|
env:
|
||||||
- name : NRDP_HOSTNAME
|
- name : NRDP_HOSTNAME
|
||||||
ini:
|
ini:
|
||||||
- section: callback_nrdp
|
- section: callback_nrdp
|
||||||
key: hostname
|
key: hostname
|
||||||
type: string
|
|
||||||
servicename:
|
servicename:
|
||||||
description: Service where the passive check is linked to.
|
description: service where the passive check is linked to
|
||||||
required: true
|
required: True
|
||||||
env:
|
env:
|
||||||
- name : NRDP_SERVICENAME
|
- name : NRDP_SERVICENAME
|
||||||
ini:
|
ini:
|
||||||
- section: callback_nrdp
|
- section: callback_nrdp
|
||||||
key: servicename
|
key: servicename
|
||||||
type: string
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
@@ -144,7 +138,7 @@ class CallbackModule(CallbackBase):
|
|||||||
body = {
|
body = {
|
||||||
'cmd': 'submitcheck',
|
'cmd': 'submitcheck',
|
||||||
'token': self.token,
|
'token': self.token,
|
||||||
'XMLDATA': to_bytes(xmldata)
|
'XMLDATA': bytes(xmldata)
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,515 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
|
||||||
name: opentelemetry
|
|
||||||
type: notification
|
|
||||||
short_description: Create distributed traces with OpenTelemetry
|
|
||||||
version_added: 3.7.0
|
|
||||||
description:
|
|
||||||
- This callback creates distributed traces for each Ansible task with OpenTelemetry.
|
|
||||||
- You can configure the OpenTelemetry exporter and SDK with environment variables.
|
|
||||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
|
|
||||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
|
|
||||||
options:
|
|
||||||
hide_task_arguments:
|
|
||||||
default: false
|
|
||||||
type: bool
|
|
||||||
description:
|
|
||||||
- Hide the arguments for a task.
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
|
||||||
enable_from_environment:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Whether to enable this callback only if the given environment variable exists and it is set to C(true).
|
|
||||||
- This is handy when you use Configuration as Code and want to send distributed traces
|
|
||||||
if running in the CI rather when running Ansible locally.
|
|
||||||
- For such, it evaluates the given I(enable_from_environment) value as environment variable
|
|
||||||
and if set to true this plugin will be enabled.
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
|
|
||||||
version_added: 3.8.0
|
|
||||||
otel_service_name:
|
|
||||||
default: ansible
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- The service name resource attribute.
|
|
||||||
env:
|
|
||||||
- name: OTEL_SERVICE_NAME
|
|
||||||
traceparent:
|
|
||||||
default: None
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
|
||||||
env:
|
|
||||||
- name: TRACEPARENT
|
|
||||||
requirements:
|
|
||||||
- opentelemetry-api (Python library)
|
|
||||||
- opentelemetry-exporter-otlp (Python library)
|
|
||||||
- opentelemetry-sdk (Python library)
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
examples: |
|
|
||||||
Enable the plugin in ansible.cfg:
|
|
||||||
[defaults]
|
|
||||||
callbacks_enabled = community.general.opentelemetry
|
|
||||||
|
|
||||||
Set the environment variable:
|
|
||||||
export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
|
|
||||||
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
|
|
||||||
export OTEL_SERVICE_NAME=your_service_name
|
|
||||||
'''
|
|
||||||
|
|
||||||
import getpass
|
|
||||||
import os
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
from os.path import basename
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.module_utils.six import raise_from
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
|
||||||
from ansible.plugins.callback import CallbackBase
|
|
||||||
|
|
||||||
try:
|
|
||||||
from opentelemetry import trace
|
|
||||||
from opentelemetry.trace import SpanKind
|
|
||||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
|
||||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
|
||||||
from opentelemetry.trace.status import Status, StatusCode
|
|
||||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
||||||
from opentelemetry.sdk.trace import TracerProvider
|
|
||||||
from opentelemetry.sdk.trace.export import (
|
|
||||||
BatchSpanProcessor
|
|
||||||
)
|
|
||||||
from opentelemetry.util._time import _time_ns
|
|
||||||
except ImportError as imp_exc:
|
|
||||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
|
||||||
else:
|
|
||||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
|
||||||
|
|
||||||
|
|
||||||
class TaskData:
|
|
||||||
"""
|
|
||||||
Data about an individual task.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, uuid, name, path, play, action, args):
|
|
||||||
self.uuid = uuid
|
|
||||||
self.name = name
|
|
||||||
self.path = path
|
|
||||||
self.play = play
|
|
||||||
self.host_data = OrderedDict()
|
|
||||||
if sys.version_info >= (3, 7):
|
|
||||||
self.start = time.time_ns()
|
|
||||||
else:
|
|
||||||
self.start = _time_ns()
|
|
||||||
self.action = action
|
|
||||||
self.args = args
|
|
||||||
|
|
||||||
def add_host(self, host):
|
|
||||||
if host.uuid in self.host_data:
|
|
||||||
if host.status == 'included':
|
|
||||||
# concatenate task include output from multiple items
|
|
||||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
self.host_data[host.uuid] = host
|
|
||||||
|
|
||||||
|
|
||||||
class HostData:
|
|
||||||
"""
|
|
||||||
Data about an individual host.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, uuid, name, status, result):
|
|
||||||
self.uuid = uuid
|
|
||||||
self.name = name
|
|
||||||
self.status = status
|
|
||||||
self.result = result
|
|
||||||
if sys.version_info >= (3, 7):
|
|
||||||
self.finish = time.time_ns()
|
|
||||||
else:
|
|
||||||
self.finish = _time_ns()
|
|
||||||
|
|
||||||
|
|
||||||
class OpenTelemetrySource(object):
|
|
||||||
def __init__(self, display):
|
|
||||||
self.ansible_playbook = ""
|
|
||||||
self.ansible_version = None
|
|
||||||
self.session = str(uuid.uuid4())
|
|
||||||
self.host = socket.gethostname()
|
|
||||||
try:
|
|
||||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
|
||||||
except Exception as e:
|
|
||||||
self.ip_address = None
|
|
||||||
self.user = getpass.getuser()
|
|
||||||
|
|
||||||
self._display = display
|
|
||||||
|
|
||||||
def traceparent_context(self, traceparent):
|
|
||||||
carrier = dict()
|
|
||||||
carrier['traceparent'] = traceparent
|
|
||||||
return TraceContextTextMapPropagator().extract(carrier=carrier)
|
|
||||||
|
|
||||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
|
||||||
""" record the start of a task for one or more hosts """
|
|
||||||
|
|
||||||
uuid = task._uuid
|
|
||||||
|
|
||||||
if uuid in tasks_data:
|
|
||||||
return
|
|
||||||
|
|
||||||
name = task.get_name().strip()
|
|
||||||
path = task.get_path()
|
|
||||||
action = task.action
|
|
||||||
args = None
|
|
||||||
|
|
||||||
if not task.no_log and not hide_task_arguments:
|
|
||||||
args = task.args
|
|
||||||
|
|
||||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
|
||||||
|
|
||||||
def finish_task(self, tasks_data, status, result):
|
|
||||||
""" record the results of a task for a single host """
|
|
||||||
|
|
||||||
task_uuid = result._task._uuid
|
|
||||||
|
|
||||||
if hasattr(result, '_host') and result._host is not None:
|
|
||||||
host_uuid = result._host._uuid
|
|
||||||
host_name = result._host.name
|
|
||||||
else:
|
|
||||||
host_uuid = 'include'
|
|
||||||
host_name = 'include'
|
|
||||||
|
|
||||||
task = tasks_data[task_uuid]
|
|
||||||
|
|
||||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
|
||||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
|
||||||
|
|
||||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
|
||||||
|
|
||||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
|
|
||||||
""" generate distributed traces from the collected TaskData and HostData """
|
|
||||||
|
|
||||||
tasks = []
|
|
||||||
parent_start_time = None
|
|
||||||
for task_uuid, task in tasks_data.items():
|
|
||||||
if parent_start_time is None:
|
|
||||||
parent_start_time = task.start
|
|
||||||
tasks.append(task)
|
|
||||||
|
|
||||||
trace.set_tracer_provider(
|
|
||||||
TracerProvider(
|
|
||||||
resource=Resource.create({SERVICE_NAME: otel_service_name})
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
processor = BatchSpanProcessor(OTLPSpanExporter())
|
|
||||||
|
|
||||||
trace.get_tracer_provider().add_span_processor(processor)
|
|
||||||
|
|
||||||
tracer = trace.get_tracer(__name__)
|
|
||||||
|
|
||||||
with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
|
|
||||||
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
|
|
||||||
parent.set_status(status)
|
|
||||||
# Populate trace metadata attributes
|
|
||||||
if self.ansible_version is not None:
|
|
||||||
parent.set_attribute("ansible.version", self.ansible_version)
|
|
||||||
parent.set_attribute("ansible.session", self.session)
|
|
||||||
parent.set_attribute("ansible.host.name", self.host)
|
|
||||||
if self.ip_address is not None:
|
|
||||||
parent.set_attribute("ansible.host.ip", self.ip_address)
|
|
||||||
parent.set_attribute("ansible.host.user", self.user)
|
|
||||||
for task in tasks:
|
|
||||||
for host_uuid, host_data in task.host_data.items():
|
|
||||||
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
|
|
||||||
self.update_span_data(task, host_data, span)
|
|
||||||
|
|
||||||
def update_span_data(self, task_data, host_data, span):
|
|
||||||
""" update the span with the given TaskData and HostData """
|
|
||||||
|
|
||||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
|
||||||
|
|
||||||
message = 'success'
|
|
||||||
res = {}
|
|
||||||
rc = 0
|
|
||||||
status = Status(status_code=StatusCode.OK)
|
|
||||||
if host_data.status != 'included':
|
|
||||||
# Support loops
|
|
||||||
if 'results' in host_data.result._result:
|
|
||||||
if host_data.status == 'failed':
|
|
||||||
message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action)
|
|
||||||
enriched_error_message = self.enrich_error_message_from_results(host_data.result._result['results'], task_data.action)
|
|
||||||
else:
|
|
||||||
res = host_data.result._result
|
|
||||||
rc = res.get('rc', 0)
|
|
||||||
message = self.get_error_message(res)
|
|
||||||
enriched_error_message = self.enrich_error_message(res)
|
|
||||||
|
|
||||||
if host_data.status == 'failed':
|
|
||||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
|
||||||
# Record an exception with the task message
|
|
||||||
span.record_exception(BaseException(enriched_error_message))
|
|
||||||
elif host_data.status == 'skipped':
|
|
||||||
message = res['skip_reason'] if 'skip_reason' in res else 'skipped'
|
|
||||||
status = Status(status_code=StatusCode.UNSET)
|
|
||||||
elif host_data.status == 'ignored':
|
|
||||||
status = Status(status_code=StatusCode.UNSET)
|
|
||||||
|
|
||||||
span.set_status(status)
|
|
||||||
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
|
|
||||||
names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
|
|
||||||
values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
|
|
||||||
self.set_span_attribute(span, ("ansible.task.args.name"), names)
|
|
||||||
self.set_span_attribute(span, ("ansible.task.args.value"), values)
|
|
||||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
|
||||||
self.set_span_attribute(span, "ansible.task.message", message)
|
|
||||||
self.set_span_attribute(span, "ansible.task.name", name)
|
|
||||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
|
||||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
|
||||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
|
||||||
# This will allow to enrich the service map
|
|
||||||
self.add_attributes_for_service_map_if_possible(span, task_data)
|
|
||||||
span.end(end_time=host_data.finish)
|
|
||||||
|
|
||||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
|
||||||
""" update the span attribute with the given attribute and value if not None """
|
|
||||||
|
|
||||||
if span is None and self._display is not None:
|
|
||||||
self._display.warning('span object is None. Please double check if that is expected.')
|
|
||||||
else:
|
|
||||||
if attributeValue is not None:
|
|
||||||
span.set_attribute(attributeName, attributeValue)
|
|
||||||
|
|
||||||
def add_attributes_for_service_map_if_possible(self, span, task_data):
|
|
||||||
"""Update the span attributes with the service that the task interacted with, if possible."""
|
|
||||||
|
|
||||||
redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
|
|
||||||
if redacted_url:
|
|
||||||
self.set_span_attribute(span, "http.url", redacted_url.geturl())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_and_redact_url_if_possible(args):
|
|
||||||
"""Parse and redact the url, if possible."""
|
|
||||||
|
|
||||||
try:
|
|
||||||
parsed_url = urlparse(OpenTelemetrySource.url_from_args(args))
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if OpenTelemetrySource.is_valid_url(parsed_url):
|
|
||||||
return OpenTelemetrySource.redact_user_password(parsed_url)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def url_from_args(args):
|
|
||||||
# the order matters
|
|
||||||
url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url")
|
|
||||||
for arg in url_args:
|
|
||||||
if args.get(arg):
|
|
||||||
return args.get(arg)
|
|
||||||
return ""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def redact_user_password(url):
|
|
||||||
return url._replace(netloc=url.hostname) if url.password else url
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_valid_url(url):
|
|
||||||
if all([url.scheme, url.netloc, url.hostname]):
|
|
||||||
return "{{" not in url.hostname
|
|
||||||
return False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def transform_ansible_unicode_to_str(value):
|
|
||||||
parsed_url = urlparse(str(value))
|
|
||||||
if OpenTelemetrySource.is_valid_url(parsed_url):
|
|
||||||
return OpenTelemetrySource.redact_user_password(parsed_url).geturl()
|
|
||||||
return str(value)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_error_message(result):
|
|
||||||
if result.get('exception') is not None:
|
|
||||||
return OpenTelemetrySource._last_line(result['exception'])
|
|
||||||
return result.get('msg', 'failed')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_error_message_from_results(results, action):
|
|
||||||
for result in results:
|
|
||||||
if result.get('failed', False):
|
|
||||||
return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _last_line(text):
|
|
||||||
lines = text.strip().split('\n')
|
|
||||||
return lines[-1]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def enrich_error_message(result):
|
|
||||||
message = result.get('msg', 'failed')
|
|
||||||
exception = result.get('exception')
|
|
||||||
stderr = result.get('stderr')
|
|
||||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def enrich_error_message_from_results(results, action):
|
|
||||||
message = ""
|
|
||||||
for result in results:
|
|
||||||
if result.get('failed', False):
|
|
||||||
message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
|
|
||||||
return message
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
|
||||||
"""
|
|
||||||
This callback creates distributed traces.
|
|
||||||
"""
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
|
||||||
CALLBACK_TYPE = 'notification'
|
|
||||||
CALLBACK_NAME = 'community.general.opentelemetry'
|
|
||||||
CALLBACK_NEEDS_ENABLED = True
|
|
||||||
|
|
||||||
def __init__(self, display=None):
|
|
||||||
super(CallbackModule, self).__init__(display=display)
|
|
||||||
self.hide_task_arguments = None
|
|
||||||
self.otel_service_name = None
|
|
||||||
self.ansible_playbook = None
|
|
||||||
self.play_name = None
|
|
||||||
self.tasks_data = None
|
|
||||||
self.errors = 0
|
|
||||||
self.disabled = False
|
|
||||||
self.traceparent = False
|
|
||||||
|
|
||||||
if OTEL_LIBRARY_IMPORT_ERROR:
|
|
||||||
raise_from(
|
|
||||||
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
|
|
||||||
OTEL_LIBRARY_IMPORT_ERROR)
|
|
||||||
|
|
||||||
self.tasks_data = OrderedDict()
|
|
||||||
|
|
||||||
self.opentelemetry = OpenTelemetrySource(display=self._display)
|
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
|
||||||
var_options=var_options,
|
|
||||||
direct=direct)
|
|
||||||
|
|
||||||
environment_variable = self.get_option('enable_from_environment')
|
|
||||||
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
|
|
||||||
self.disabled = True
|
|
||||||
self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
|
|
||||||
"Disabling the `opentelemetry` callback plugin.".format(environment_variable))
|
|
||||||
|
|
||||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
|
||||||
|
|
||||||
self.otel_service_name = self.get_option('otel_service_name')
|
|
||||||
|
|
||||||
if not self.otel_service_name:
|
|
||||||
self.otel_service_name = 'ansible'
|
|
||||||
|
|
||||||
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
|
||||||
self.traceparent = self.get_option('traceparent')
|
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
|
||||||
self.ansible_playbook = basename(playbook._file_name)
|
|
||||||
|
|
||||||
def v2_playbook_on_play_start(self, play):
|
|
||||||
self.play_name = play.get_name()
|
|
||||||
|
|
||||||
def v2_runner_on_no_hosts(self, task):
|
|
||||||
self.opentelemetry.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
|
||||||
self.opentelemetry.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_cleanup_task_start(self, task):
|
|
||||||
self.opentelemetry.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_handler_task_start(self, task):
|
|
||||||
self.opentelemetry.start_task(
|
|
||||||
self.tasks_data,
|
|
||||||
self.hide_task_arguments,
|
|
||||||
self.play_name,
|
|
||||||
task
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
|
||||||
if ignore_errors:
|
|
||||||
status = 'ignored'
|
|
||||||
else:
|
|
||||||
status = 'failed'
|
|
||||||
self.errors += 1
|
|
||||||
|
|
||||||
self.opentelemetry.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
status,
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result):
|
|
||||||
self.opentelemetry.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'ok',
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_skipped(self, result):
|
|
||||||
self.opentelemetry.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'skipped',
|
|
||||||
result
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_include(self, included_file):
|
|
||||||
self.opentelemetry.finish_task(
|
|
||||||
self.tasks_data,
|
|
||||||
'included',
|
|
||||||
included_file
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
|
||||||
if self.errors == 0:
|
|
||||||
status = Status(status_code=StatusCode.OK)
|
|
||||||
else:
|
|
||||||
status = Status(status_code=StatusCode.ERROR)
|
|
||||||
self.opentelemetry.generate_distributed_traces(
|
|
||||||
self.otel_service_name,
|
|
||||||
self.ansible_playbook,
|
|
||||||
self.tasks_data,
|
|
||||||
status,
|
|
||||||
self.traceparent
|
|
||||||
)
|
|
||||||
|
|
||||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
|
||||||
self.errors += 1
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) Fastly, inc 2016
|
# (c) Fastly, inc 2016
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -41,7 +40,7 @@ import difflib
|
|||||||
|
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
from ansible.module_utils._text import to_text
|
||||||
|
|
||||||
try:
|
try:
|
||||||
codeCodes = C.COLOR_CODES
|
codeCodes = C.COLOR_CODES
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (C) 2014-2015, Matt Martz <matt@sivel.net>
|
# (C) 2014-2015, Matt Martz <matt@sivel.net>
|
||||||
# (C) 2017 Ansible Project
|
# (C) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -59,7 +58,7 @@ import os
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from ansible import context
|
from ansible import context
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
from ansible.module_utils._text import to_text
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
|
|||||||
@@ -68,16 +68,6 @@ DOCUMENTATION = '''
|
|||||||
type: bool
|
type: bool
|
||||||
default: false
|
default: false
|
||||||
version_added: 2.0.0
|
version_added: 2.0.0
|
||||||
batch:
|
|
||||||
description:
|
|
||||||
- Correlation ID which can be set across multiple playbook executions.
|
|
||||||
env:
|
|
||||||
- name: SPLUNK_BATCH
|
|
||||||
ini:
|
|
||||||
- section: callback_splunk
|
|
||||||
key: batch
|
|
||||||
type: str
|
|
||||||
version_added: 3.3.0
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
@@ -117,7 +107,7 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||||
self.user = getpass.getuser()
|
self.user = getpass.getuser()
|
||||||
|
|
||||||
def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime):
|
def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
|
||||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||||
self.ansible_check_mode = True
|
self.ansible_check_mode = True
|
||||||
|
|
||||||
@@ -136,8 +126,6 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
data = {}
|
data = {}
|
||||||
data['uuid'] = result._task._uuid
|
data['uuid'] = result._task._uuid
|
||||||
data['session'] = self.session
|
data['session'] = self.session
|
||||||
if batch is not None:
|
|
||||||
data['batch'] = batch
|
|
||||||
data['status'] = state
|
data['status'] = state
|
||||||
|
|
||||||
if include_milliseconds:
|
if include_milliseconds:
|
||||||
@@ -187,7 +175,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken = None
|
self.authtoken = None
|
||||||
self.validate_certs = None
|
self.validate_certs = None
|
||||||
self.include_milliseconds = None
|
self.include_milliseconds = None
|
||||||
self.batch = None
|
|
||||||
self.splunk = SplunkHTTPCollectorSource()
|
self.splunk = SplunkHTTPCollectorSource()
|
||||||
|
|
||||||
def _runtime(self, result):
|
def _runtime(self, result):
|
||||||
@@ -225,8 +212,6 @@ class CallbackModule(CallbackBase):
|
|||||||
|
|
||||||
self.include_milliseconds = self.get_option('include_milliseconds')
|
self.include_milliseconds = self.get_option('include_milliseconds')
|
||||||
|
|
||||||
self.batch = self.get_option('batch')
|
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
self.splunk.ansible_playbook = basename(playbook._file_name)
|
self.splunk.ansible_playbook = basename(playbook._file_name)
|
||||||
|
|
||||||
@@ -242,7 +227,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
self.include_milliseconds,
|
||||||
self.batch,
|
|
||||||
'OK',
|
'OK',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -254,7 +238,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
self.include_milliseconds,
|
||||||
self.batch,
|
|
||||||
'SKIPPED',
|
'SKIPPED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -266,7 +249,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
self.include_milliseconds,
|
||||||
self.batch,
|
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -278,7 +260,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
self.include_milliseconds,
|
||||||
self.batch,
|
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -290,7 +271,6 @@ class CallbackModule(CallbackBase):
|
|||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
self.include_milliseconds,
|
self.include_milliseconds,
|
||||||
self.batch,
|
|
||||||
'UNREACHABLE',
|
'UNREACHABLE',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright: (c) 2017, Allyson Bowles <@akatch>
|
# Copyright: (c) 2017, Allyson Bowles <@akatch>
|
||||||
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -23,7 +22,7 @@ DOCUMENTATION = '''
|
|||||||
from os.path import basename
|
from os.path import basename
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible import context
|
from ansible import context
|
||||||
from ansible.module_utils.common.text.converters import to_text
|
from ansible.module_utils._text import to_text
|
||||||
from ansible.utils.color import colorize, hostcolor
|
from ansible.utils.color import colorize, hostcolor
|
||||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -26,7 +25,7 @@ import re
|
|||||||
import string
|
import string
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
from ansible.module_utils._text import to_bytes, to_text
|
||||||
from ansible.module_utils.six import string_types
|
from ansible.module_utils.six import string_types
|
||||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||||
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
|
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
|
||||||
@@ -42,29 +41,28 @@ def should_use_block(value):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
class MyDumper(AnsibleDumper):
|
def my_represent_scalar(self, tag, value, style=None):
|
||||||
def represent_scalar(self, tag, value, style=None):
|
"""Uses block style for multi-line strings"""
|
||||||
"""Uses block style for multi-line strings"""
|
if style is None:
|
||||||
if style is None:
|
if should_use_block(value):
|
||||||
if should_use_block(value):
|
style = '|'
|
||||||
style = '|'
|
# we care more about readable than accuracy, so...
|
||||||
# we care more about readable than accuracy, so...
|
# ...no trailing space
|
||||||
# ...no trailing space
|
value = value.rstrip()
|
||||||
value = value.rstrip()
|
# ...and non-printable characters
|
||||||
# ...and non-printable characters
|
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
# ...tabs prevent blocks from expanding
|
||||||
# ...tabs prevent blocks from expanding
|
value = value.expandtabs()
|
||||||
value = value.expandtabs()
|
# ...and odd bits of whitespace
|
||||||
# ...and odd bits of whitespace
|
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
# ...as does trailing space
|
||||||
# ...as does trailing space
|
value = re.sub(r' +\n', '\n', value)
|
||||||
value = re.sub(r' +\n', '\n', value)
|
else:
|
||||||
else:
|
style = self.default_style
|
||||||
style = self.default_style
|
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
if self.alias_key is not None:
|
||||||
if self.alias_key is not None:
|
self.represented_objects[self.alias_key] = node
|
||||||
self.represented_objects[self.alias_key] = node
|
return node
|
||||||
return node
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(Default):
|
class CallbackModule(Default):
|
||||||
@@ -80,6 +78,7 @@ class CallbackModule(Default):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(CallbackModule, self).__init__()
|
super(CallbackModule, self).__init__()
|
||||||
|
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
|
||||||
|
|
||||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||||
if result.get('_ansible_no_log', False):
|
if result.get('_ansible_no_log', False):
|
||||||
@@ -121,7 +120,7 @@ class CallbackModule(Default):
|
|||||||
|
|
||||||
if abridged_result:
|
if abridged_result:
|
||||||
dumped += '\n'
|
dumped += '\n'
|
||||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
|
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
||||||
|
|
||||||
# indent by a couple of spaces
|
# indent by a couple of spaces
|
||||||
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
#
|
#
|
||||||
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
@@ -55,7 +54,7 @@ from ansible.errors import AnsibleError
|
|||||||
from ansible.module_utils.basic import is_executable
|
from ansible.module_utils.basic import is_executable
|
||||||
from ansible.module_utils.common.process import get_bin_path
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
from ansible.module_utils._text import to_bytes, to_native
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
@@ -9,7 +8,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
author: Michael Scherer (@msherer) <misc@zarb.org>
|
||||||
name: funcd
|
name: funcd
|
||||||
short_description: Use funcd to connect to target
|
short_description: Use funcd to connect to target
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on jail.py
|
# Based on jail.py
|
||||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
@@ -33,7 +32,7 @@ DOCUMENTATION = '''
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
|
from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
|
# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
|
# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
|
||||||
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
@@ -39,7 +38,7 @@ import traceback
|
|||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
|
# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
|
||||||
# Copyright (c) 2017 Ansible Project
|
# Copyright (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -44,7 +43,7 @@ except ImportError:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
from ansible import errors
|
from ansible import errors
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
from ansible.module_utils._text import to_bytes, to_native
|
||||||
from ansible.plugins.connection import ConnectionBase
|
from ansible.plugins.connection import ConnectionBase
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2016 Matt Clay <matt@mystile.com>
|
# (c) 2016 Matt Clay <matt@mystile.com>
|
||||||
# (c) 2017 Ansible Project
|
# (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
@@ -47,7 +46,7 @@ from distutils.spawn import find_executable
|
|||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
from ansible.module_utils._text import to_bytes, to_text
|
||||||
from ansible.plugins.connection import ConnectionBase
|
from ansible.plugins.connection import ConnectionBase
|
||||||
|
|
||||||
|
|
||||||
@@ -89,9 +88,9 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend(["--project", self.get_option("project")])
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"exec",
|
"exec",
|
||||||
"%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
|
"%s:%s" % (self.get_option("remote"), self._host),
|
||||||
"--",
|
"--",
|
||||||
self.get_option("executable"), "-c", cmd
|
self._play_context.executable, "-c", cmd
|
||||||
])
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
@@ -126,7 +125,7 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"file", "push",
|
"file", "push",
|
||||||
in_path,
|
in_path,
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
|
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
||||||
])
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
@@ -145,7 +144,7 @@ class Connection(ConnectionBase):
|
|||||||
local_cmd.extend(["--project", self.get_option("project")])
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
local_cmd.extend([
|
local_cmd.extend([
|
||||||
"file", "pull",
|
"file", "pull",
|
||||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
|
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
||||||
out_path
|
out_path
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on the buildah connection plugin
|
# Based on the buildah connection plugin
|
||||||
# Copyright (c) 2017 Ansible Project
|
# Copyright (c) 2017 Ansible Project
|
||||||
# 2018 Kushal Das
|
# 2018 Kushal Das
|
||||||
@@ -40,7 +39,7 @@ DOCUMENTATION = '''
|
|||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.plugins.connection import ConnectionBase, ensure_connect
|
from ansible.plugins.connection import ConnectionBase, ensure_connect
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
from ansible.errors import AnsibleConnectionFailure
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
# Based on func.py
|
# Based on func.py
|
||||||
@@ -51,7 +50,7 @@ class Connection(ConnectionBase):
|
|||||||
self._connected = True
|
self._connected = True
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
def exec_command(self, cmd, sudoable=False, in_data=None):
|
||||||
""" run a command on the remote minion """
|
""" run a command on the remote minion """
|
||||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
|
# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
@@ -34,7 +33,7 @@ import traceback
|
|||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
from ansible.module_utils.six.moves import shlex_quote
|
||||||
from ansible.module_utils.common.text.converters import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
|||||||
138
plugins/doc_fragments/_netapp.py
Normal file
138
plugins/doc_fragments/_netapp.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
- See respective platform section for more details
|
||||||
|
requirements:
|
||||||
|
- See respective platform section for more details
|
||||||
|
notes:
|
||||||
|
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Documentation fragment for ONTAP (na_cdot)
|
||||||
|
ONTAP = r'''
|
||||||
|
options:
|
||||||
|
hostname:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- The hostname or IP address of the ONTAP instance.
|
||||||
|
username:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
||||||
|
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
||||||
|
aliases: ['user']
|
||||||
|
password:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- Password for the specified user.
|
||||||
|
aliases: ['pass']
|
||||||
|
requirements:
|
||||||
|
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
|
||||||
|
- Ansible 2.2
|
||||||
|
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Documentation fragment for SolidFire
|
||||||
|
SOLIDFIRE = r'''
|
||||||
|
options:
|
||||||
|
hostname:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- The hostname or IP address of the SolidFire cluster.
|
||||||
|
username:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation
|
||||||
|
U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
|
||||||
|
aliases: ['user']
|
||||||
|
password:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- Password for the specified user.
|
||||||
|
aliases: ['pass']
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- The modules were developed with SolidFire 10.1
|
||||||
|
- solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Documentation fragment for ONTAP (na_ontap)
|
||||||
|
NA_ONTAP = r'''
|
||||||
|
options:
|
||||||
|
hostname:
|
||||||
|
description:
|
||||||
|
- The hostname or IP address of the ONTAP instance.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
|
||||||
|
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
aliases: [ user ]
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- Password for the specified user.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
aliases: [ pass ]
|
||||||
|
https:
|
||||||
|
description:
|
||||||
|
- Enable and disable https
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- If set to C(no), the SSL certificates will not be validated.
|
||||||
|
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
|
||||||
|
type: bool
|
||||||
|
default: yes
|
||||||
|
http_port:
|
||||||
|
description:
|
||||||
|
- Override the default port (80 or 443) with this port
|
||||||
|
type: int
|
||||||
|
ontapi:
|
||||||
|
description:
|
||||||
|
- The ontap api version to use
|
||||||
|
type: int
|
||||||
|
use_rest:
|
||||||
|
description:
|
||||||
|
- REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
|
||||||
|
- Always -- will always use the REST API
|
||||||
|
- Never -- will always use the ZAPI
|
||||||
|
- Auto -- will try to use the REST Api
|
||||||
|
default: Auto
|
||||||
|
choices: ['Never', 'Always', 'Auto']
|
||||||
|
type: str
|
||||||
|
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
|
||||||
|
- Ansible 2.6
|
||||||
|
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
|
||||||
|
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
|
||||||
|
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
|
||||||
|
|
||||||
|
'''
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
client_id:
|
|
||||||
description:
|
|
||||||
- The OAuth consumer key.
|
|
||||||
- If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
|
|
||||||
type: str
|
|
||||||
client_secret:
|
|
||||||
description:
|
|
||||||
- The OAuth consumer secret.
|
|
||||||
- If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
|
|
||||||
type: str
|
|
||||||
user:
|
|
||||||
description:
|
|
||||||
- The username.
|
|
||||||
- If not set the environment variable C(BITBUCKET_USERNAME) will be used.
|
|
||||||
type: str
|
|
||||||
version_added: 4.0.0
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- The App password.
|
|
||||||
- If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
|
|
||||||
type: str
|
|
||||||
version_added: 4.0.0
|
|
||||||
notes:
|
|
||||||
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
|
|
||||||
- Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
|
|
||||||
- If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
|
|
||||||
'''
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
requirements:
|
|
||||||
- requests (Python library U(https://pypi.org/project/requests/))
|
|
||||||
|
|
||||||
options:
|
|
||||||
api_token:
|
|
||||||
description:
|
|
||||||
- GitLab access token with API permissions.
|
|
||||||
type: str
|
|
||||||
api_oauth_token:
|
|
||||||
description:
|
|
||||||
- GitLab OAuth token for logging in.
|
|
||||||
type: str
|
|
||||||
version_added: 4.2.0
|
|
||||||
api_job_token:
|
|
||||||
description:
|
|
||||||
- GitLab CI job token for logging in.
|
|
||||||
type: str
|
|
||||||
version_added: 4.2.0
|
|
||||||
'''
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
|
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
|
||||||
# GNU General Public License v3.0+
|
# GNU General Public License v3.0+
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright: (c) 2018, Huawei Inc.
|
# Copyright: (c) 2018, Huawei Inc.
|
||||||
# GNU General Public License v3.0+
|
# GNU General Public License v3.0+
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
103
plugins/doc_fragments/nios.py
Normal file
103
plugins/doc_fragments/nios.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment(object):
|
||||||
|
|
||||||
|
# Standard files documentation fragment
|
||||||
|
DOCUMENTATION = r'''
|
||||||
|
options:
|
||||||
|
provider:
|
||||||
|
description:
|
||||||
|
- A dict object containing connection details.
|
||||||
|
type: dict
|
||||||
|
suboptions:
|
||||||
|
host:
|
||||||
|
description:
|
||||||
|
- Specifies the DNS host name or address for connecting to the remote
|
||||||
|
instance of NIOS WAPI over REST
|
||||||
|
- Value can also be specified using C(INFOBLOX_HOST) environment
|
||||||
|
variable.
|
||||||
|
type: str
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- Configures the username to use to authenticate the connection to
|
||||||
|
the remote instance of NIOS.
|
||||||
|
- Value can also be specified using C(INFOBLOX_USERNAME) environment
|
||||||
|
variable.
|
||||||
|
type: str
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- Specifies the password to use to authenticate the connection to
|
||||||
|
the remote instance of NIOS.
|
||||||
|
- Value can also be specified using C(INFOBLOX_PASSWORD) environment
|
||||||
|
variable.
|
||||||
|
type: str
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Boolean value to enable or disable verifying SSL certificates
|
||||||
|
- Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
|
||||||
|
variable.
|
||||||
|
type: bool
|
||||||
|
default: no
|
||||||
|
aliases: [ ssl_verify ]
|
||||||
|
http_request_timeout:
|
||||||
|
description:
|
||||||
|
- The amount of time before to wait before receiving a response
|
||||||
|
- Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
|
||||||
|
variable.
|
||||||
|
type: int
|
||||||
|
default: 10
|
||||||
|
max_retries:
|
||||||
|
description:
|
||||||
|
- Configures the number of attempted retries before the connection
|
||||||
|
is declared usable
|
||||||
|
- Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
|
||||||
|
variable.
|
||||||
|
type: int
|
||||||
|
default: 3
|
||||||
|
wapi_version:
|
||||||
|
description:
|
||||||
|
- Specifies the version of WAPI to use
|
||||||
|
- Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
|
||||||
|
variable.
|
||||||
|
- Until ansible 2.8 the default WAPI was 1.4
|
||||||
|
type: str
|
||||||
|
default: '2.1'
|
||||||
|
max_results:
|
||||||
|
description:
|
||||||
|
- Specifies the maximum number of objects to be returned,
|
||||||
|
if set to a negative number the appliance will return an error when the
|
||||||
|
number of returned objects would exceed the setting.
|
||||||
|
- Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
|
||||||
|
variable.
|
||||||
|
type: int
|
||||||
|
default: 1000
|
||||||
|
http_pool_connections:
|
||||||
|
description:
|
||||||
|
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: int
|
||||||
|
default: 10
|
||||||
|
http_pool_maxsize:
|
||||||
|
description:
|
||||||
|
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: int
|
||||||
|
default: 10
|
||||||
|
silent_ssl_warnings:
|
||||||
|
description:
|
||||||
|
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
notes:
|
||||||
|
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
||||||
|
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
||||||
|
|
||||||
|
'''
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Oracle and/or its affiliates.
|
# Copyright (c) 2018, Oracle and/or its affiliates.
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# Common parameters for Redis modules
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
login_host:
|
|
||||||
description:
|
|
||||||
- Specify the target host running the database.
|
|
||||||
default: localhost
|
|
||||||
type: str
|
|
||||||
login_port:
|
|
||||||
description:
|
|
||||||
- Specify the port to connect to.
|
|
||||||
default: 6379
|
|
||||||
type: int
|
|
||||||
login_user:
|
|
||||||
description:
|
|
||||||
- Specify the user to authenticate with.
|
|
||||||
- Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
|
|
||||||
type: str
|
|
||||||
login_password:
|
|
||||||
description:
|
|
||||||
- Specify the password to authenticate with.
|
|
||||||
- Usually not used when target is localhost.
|
|
||||||
type: str
|
|
||||||
tls:
|
|
||||||
description:
|
|
||||||
- Specify whether or not to use TLS for the connection.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Specify whether or not to validate TLS certificates.
|
|
||||||
- This should only be turned off for personally controlled sites or with
|
|
||||||
C(localhost) as target.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
ca_certs:
|
|
||||||
description:
|
|
||||||
- Path to root certificates file. If not set and I(tls) is
|
|
||||||
set to C(true), certifi ca-certificates will be used.
|
|
||||||
type: str
|
|
||||||
requirements: [ "redis", "certifi" ]
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- Requires the C(redis) Python package on the remote host. You can
|
|
||||||
install it with pip (C(pip install redis)) or with a package manager.
|
|
||||||
Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
|
|
||||||
'''
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
url:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Rundeck instance URL.
|
|
||||||
required: true
|
|
||||||
api_version:
|
|
||||||
type: int
|
|
||||||
description:
|
|
||||||
- Rundeck API version to be used.
|
|
||||||
- API version must be at least 14.
|
|
||||||
default: 39
|
|
||||||
api_token:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Rundeck User API Token.
|
|
||||||
required: true
|
|
||||||
'''
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
|
# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
|
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from ansible.errors import AnsibleFilterError
|
from ansible.errors import AnsibleFilterError
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils._text import to_native
|
||||||
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||||
DialectNotAvailableError,
|
DialectNotAvailableError,
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
|
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
|
||||||
#
|
#
|
||||||
# This file is part of Ansible
|
# This file is part of Ansible
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
|
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
|
||||||
#
|
#
|
||||||
# This file is part of Ansible
|
# This file is part of Ansible
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# (c) 2020 Ansible Project
|
# (c) 2020 Ansible Project
|
||||||
#
|
#
|
||||||
# This file is part of Ansible
|
# This file is part of Ansible
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from unicodedata import normalize
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
|
|
||||||
from ansible.module_utils.six import text_type
|
|
||||||
|
|
||||||
|
|
||||||
def unicode_normalize(data, form='NFC'):
|
|
||||||
"""Applies normalization to 'unicode' strings.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
data: A unicode string piped into the Jinja filter
|
|
||||||
form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
|
|
||||||
See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A normalized unicode string of the specified 'form'.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not isinstance(data, text_type):
|
|
||||||
raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
|
|
||||||
|
|
||||||
if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
|
|
||||||
raise AnsibleFilterError("%s is not a valid form" % form)
|
|
||||||
|
|
||||||
return normalize(form, data)
|
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
'unicode_normalize': unicode_normalize,
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
|
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ from distutils.version import LooseVersion
|
|||||||
import socket
|
import socket
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||||
from ansible.module_utils.six import iteritems
|
from ansible.module_utils.six import iteritems
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
|
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ keyed_groups:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible.module_utils.common.text.converters import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,259 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
|
|
||||||
# Copyright (c) 2021 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
name: icinga2
|
|
||||||
short_description: Icinga2 inventory source
|
|
||||||
version_added: 3.7.0
|
|
||||||
author:
|
|
||||||
- Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
|
|
||||||
description:
|
|
||||||
- Get inventory hosts from the Icinga2 API.
|
|
||||||
- "Uses a configuration file as an inventory source, it must end in
|
|
||||||
C(.icinga2.yml) or C(.icinga2.yaml)."
|
|
||||||
options:
|
|
||||||
plugin:
|
|
||||||
description: Name of the plugin.
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
choices: ['community.general.icinga2']
|
|
||||||
url:
|
|
||||||
description: Root URL of Icinga2 API.
|
|
||||||
type: string
|
|
||||||
required: true
|
|
||||||
user:
|
|
||||||
description: Username to query the API.
|
|
||||||
type: string
|
|
||||||
required: true
|
|
||||||
password:
|
|
||||||
description: Password to query the API.
|
|
||||||
type: string
|
|
||||||
required: true
|
|
||||||
host_filter:
|
|
||||||
description:
|
|
||||||
- An Icinga2 API valid host filter. Leave blank for no filtering
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
validate_certs:
|
|
||||||
description: Enables or disables SSL certificate verification.
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
inventory_attr:
|
|
||||||
description:
|
|
||||||
- Allows the override of the inventory name based on different attributes.
|
|
||||||
- This allows for changing the way limits are used.
|
|
||||||
- The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
|
|
||||||
type: string
|
|
||||||
default: address
|
|
||||||
choices: ['name', 'display_name', 'address']
|
|
||||||
version_added: 4.2.0
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = r'''
|
|
||||||
# my.icinga2.yml
|
|
||||||
plugin: community.general.icinga2
|
|
||||||
url: http://localhost:5665
|
|
||||||
user: ansible
|
|
||||||
password: secure
|
|
||||||
host_filter: \"linux-servers\" in host.groups
|
|
||||||
validate_certs: false
|
|
||||||
inventory_attr: name
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleParserError
|
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
|
||||||
from ansible.module_utils.urls import open_url
|
|
||||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
|
||||||
''' Host inventory parser for ansible using Icinga2 as source. '''
|
|
||||||
|
|
||||||
NAME = 'community.general.icinga2'
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
|
|
||||||
super(InventoryModule, self).__init__()
|
|
||||||
|
|
||||||
# from config
|
|
||||||
self.icinga2_url = None
|
|
||||||
self.icinga2_user = None
|
|
||||||
self.icinga2_password = None
|
|
||||||
self.ssl_verify = None
|
|
||||||
self.host_filter = None
|
|
||||||
self.inventory_attr = None
|
|
||||||
|
|
||||||
self.cache_key = None
|
|
||||||
self.use_cache = None
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
|
||||||
valid = False
|
|
||||||
if super(InventoryModule, self).verify_file(path):
|
|
||||||
if path.endswith(('icinga2.yaml', 'icinga2.yml')):
|
|
||||||
valid = True
|
|
||||||
else:
|
|
||||||
self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
|
|
||||||
return valid
|
|
||||||
|
|
||||||
def _api_connect(self):
|
|
||||||
self.headers = {
|
|
||||||
'User-Agent': "ansible-icinga2-inv",
|
|
||||||
'Accept': "application/json",
|
|
||||||
}
|
|
||||||
api_status_url = self.icinga2_url + "/status"
|
|
||||||
request_args = {
|
|
||||||
'headers': self.headers,
|
|
||||||
'url_username': self.icinga2_user,
|
|
||||||
'url_password': self.icinga2_password,
|
|
||||||
'validate_certs': self.ssl_verify
|
|
||||||
}
|
|
||||||
open_url(api_status_url, **request_args)
|
|
||||||
|
|
||||||
def _post_request(self, request_url, data=None):
|
|
||||||
self.display.vvv("Requested URL: %s" % request_url)
|
|
||||||
request_args = {
|
|
||||||
'headers': self.headers,
|
|
||||||
'url_username': self.icinga2_user,
|
|
||||||
'url_password': self.icinga2_password,
|
|
||||||
'validate_certs': self.ssl_verify
|
|
||||||
}
|
|
||||||
if data is not None:
|
|
||||||
request_args['data'] = json.dumps(data)
|
|
||||||
self.display.vvv("Request Args: %s" % request_args)
|
|
||||||
try:
|
|
||||||
response = open_url(request_url, **request_args)
|
|
||||||
except HTTPError as e:
|
|
||||||
try:
|
|
||||||
error_body = json.loads(e.read().decode())
|
|
||||||
self.display.vvv("Error returned: {0}".format(error_body))
|
|
||||||
except Exception:
|
|
||||||
error_body = {"status": None}
|
|
||||||
if e.code == 404 and error_body.get('status') == "No objects found.":
|
|
||||||
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
|
|
||||||
raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
|
|
||||||
|
|
||||||
response_body = response.read()
|
|
||||||
json_data = json.loads(response_body.decode('utf-8'))
|
|
||||||
self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
|
|
||||||
if 200 <= response.status <= 299:
|
|
||||||
return json_data
|
|
||||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
|
||||||
raise AnsibleParserError(
|
|
||||||
"API returned no data -- Response: %s - %s"
|
|
||||||
% (response.status, json_data['status']))
|
|
||||||
if response.status == 401:
|
|
||||||
raise AnsibleParserError(
|
|
||||||
"API was unable to complete query -- Response: %s - %s"
|
|
||||||
% (response.status, json_data['status']))
|
|
||||||
if response.status == 500:
|
|
||||||
raise AnsibleParserError(
|
|
||||||
"API Response - %s - %s"
|
|
||||||
% (json_data['status'], json_data['errors']))
|
|
||||||
raise AnsibleParserError(
|
|
||||||
"Unexpected data returned - %s - %s"
|
|
||||||
% (json_data['status'], json_data['errors']))
|
|
||||||
|
|
||||||
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
|
|
||||||
query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
|
|
||||||
self.headers['X-HTTP-Method-Override'] = 'GET'
|
|
||||||
data_dict = dict()
|
|
||||||
if hosts:
|
|
||||||
data_dict['hosts'] = hosts
|
|
||||||
if attrs is not None:
|
|
||||||
data_dict['attrs'] = attrs
|
|
||||||
if joins is not None:
|
|
||||||
data_dict['joins'] = joins
|
|
||||||
if host_filter is not None:
|
|
||||||
data_dict['filter'] = host_filter.replace("\\\"", "\"")
|
|
||||||
self.display.vvv(host_filter)
|
|
||||||
host_dict = self._post_request(query_hosts_url, data_dict)
|
|
||||||
return host_dict['results']
|
|
||||||
|
|
||||||
def get_inventory_from_icinga(self):
|
|
||||||
"""Query for all hosts """
|
|
||||||
self.display.vvv("Querying Icinga2 for inventory")
|
|
||||||
query_args = {
|
|
||||||
"attrs": ["address", "display_name", "state_type", "state", "groups"],
|
|
||||||
}
|
|
||||||
if self.host_filter is not None:
|
|
||||||
query_args['host_filter'] = self.host_filter
|
|
||||||
# Icinga2 API Call
|
|
||||||
results_json = self._query_hosts(**query_args)
|
|
||||||
# Manipulate returned API data to Ansible inventory spec
|
|
||||||
ansible_inv = self._convert_inv(results_json)
|
|
||||||
return ansible_inv
|
|
||||||
|
|
||||||
def _populate(self):
|
|
||||||
groups = self._to_json(self.get_inventory_from_icinga())
|
|
||||||
return groups
|
|
||||||
|
|
||||||
def _to_json(self, in_dict):
|
|
||||||
"""Convert dictionary to JSON"""
|
|
||||||
return json.dumps(in_dict, sort_keys=True, indent=2)
|
|
||||||
|
|
||||||
def _convert_inv(self, json_data):
|
|
||||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
|
||||||
groups_dict = {"_meta": {"hostvars": {}}}
|
|
||||||
for entry in json_data:
|
|
||||||
host_attrs = entry['attrs']
|
|
||||||
if self.inventory_attr == "name":
|
|
||||||
host_name = entry.get('name')
|
|
||||||
if self.inventory_attr == "address":
|
|
||||||
# When looking for address for inventory, if missing fallback to object name
|
|
||||||
if host_attrs.get('address', '') != '':
|
|
||||||
host_name = host_attrs.get('address')
|
|
||||||
else:
|
|
||||||
host_name = entry.get('name')
|
|
||||||
if self.inventory_attr == "display_name":
|
|
||||||
host_name = host_attrs.get('display_name')
|
|
||||||
if host_attrs['state'] == 0:
|
|
||||||
host_attrs['state'] = 'on'
|
|
||||||
else:
|
|
||||||
host_attrs['state'] = 'off'
|
|
||||||
host_groups = host_attrs.get('groups')
|
|
||||||
self.inventory.add_host(host_name)
|
|
||||||
for group in host_groups:
|
|
||||||
if group not in self.inventory.groups.keys():
|
|
||||||
self.inventory.add_group(group)
|
|
||||||
self.inventory.add_child(group, host_name)
|
|
||||||
# If the address attribute is populated, override ansible_host with the value
|
|
||||||
if host_attrs.get('address') != '':
|
|
||||||
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
|
|
||||||
self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
|
|
||||||
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
|
|
||||||
self.inventory.set_variable(host_name, 'state',
|
|
||||||
host_attrs['state'])
|
|
||||||
self.inventory.set_variable(host_name, 'state_type',
|
|
||||||
host_attrs['state_type'])
|
|
||||||
return groups_dict
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
|
||||||
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path)
|
|
||||||
|
|
||||||
# read config from file, this sets 'options'
|
|
||||||
self._read_config_data(path)
|
|
||||||
|
|
||||||
# Store the options from the YAML file
|
|
||||||
self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
|
|
||||||
self.icinga2_user = self.get_option('user')
|
|
||||||
self.icinga2_password = self.get_option('password')
|
|
||||||
self.ssl_verify = self.get_option('validate_certs')
|
|
||||||
self.host_filter = self.get_option('host_filter')
|
|
||||||
self.inventory_attr = self.get_option('inventory_attr')
|
|
||||||
# Not currently enabled
|
|
||||||
# self.cache_key = self.get_cache_key(path)
|
|
||||||
# self.use_cache = cache and self.get_option('cache')
|
|
||||||
|
|
||||||
# Test connection to API
|
|
||||||
self._api_connect()
|
|
||||||
|
|
||||||
# Call our internal helper to populate the dynamic inventory
|
|
||||||
self._populate()
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2017 Ansible Project
|
# Copyright (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -23,17 +22,9 @@ DOCUMENTATION = r'''
|
|||||||
- constructed
|
- constructed
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: Marks this as an instance of the 'linode' plugin.
|
description: marks this as an instance of the 'linode' plugin
|
||||||
required: true
|
required: true
|
||||||
choices: ['linode', 'community.general.linode']
|
choices: ['linode', 'community.general.linode']
|
||||||
ip_style:
|
|
||||||
description: Populate hostvars with all information available from the Linode APIv4.
|
|
||||||
type: string
|
|
||||||
default: plain
|
|
||||||
choices:
|
|
||||||
- plain
|
|
||||||
- api
|
|
||||||
version_added: 3.6.0
|
|
||||||
access_token:
|
access_token:
|
||||||
description: The Linode account personal access token.
|
description: The Linode account personal access token.
|
||||||
required: true
|
required: true
|
||||||
@@ -86,18 +77,7 @@ groups:
|
|||||||
webservers: "'web' in (tags|list)"
|
webservers: "'web' in (tags|list)"
|
||||||
mailservers: "'mail' in (tags|list)"
|
mailservers: "'mail' in (tags|list)"
|
||||||
compose:
|
compose:
|
||||||
# By default, Ansible tries to connect to the label of the instance.
|
|
||||||
# Since that might not be a valid name to connect to, you can
|
|
||||||
# replace it with the first IPv4 address of the linode as follows:
|
|
||||||
ansible_ssh_host: ipv4[0]
|
|
||||||
ansible_port: 2222
|
ansible_port: 2222
|
||||||
|
|
||||||
# Example where control traffic limited to internal network
|
|
||||||
plugin: community.general.linode
|
|
||||||
access_token: foobar
|
|
||||||
ip_style: api
|
|
||||||
compose:
|
|
||||||
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -185,44 +165,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
|
|
||||||
def _add_hostvars_for_instances(self):
|
def _add_hostvars_for_instances(self):
|
||||||
"""Add hostvars for instances in the dynamic inventory."""
|
"""Add hostvars for instances in the dynamic inventory."""
|
||||||
ip_style = self.get_option('ip_style')
|
|
||||||
for instance in self.instances:
|
for instance in self.instances:
|
||||||
hostvars = instance._raw_json
|
hostvars = instance._raw_json
|
||||||
for hostvar_key in hostvars:
|
for hostvar_key in hostvars:
|
||||||
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
|
|
||||||
continue
|
|
||||||
self.inventory.set_variable(
|
self.inventory.set_variable(
|
||||||
instance.label,
|
instance.label,
|
||||||
hostvar_key,
|
hostvar_key,
|
||||||
hostvars[hostvar_key]
|
hostvars[hostvar_key]
|
||||||
)
|
)
|
||||||
if ip_style == 'api':
|
|
||||||
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
|
|
||||||
ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
|
|
||||||
ips += instance.ips.ipv6.pools
|
|
||||||
|
|
||||||
for ip_type in set(ip.type for ip in ips):
|
|
||||||
self.inventory.set_variable(
|
|
||||||
instance.label,
|
|
||||||
ip_type,
|
|
||||||
self._ip_data([ip for ip in ips if ip.type == ip_type])
|
|
||||||
)
|
|
||||||
|
|
||||||
def _ip_data(self, ip_list):
|
|
||||||
data = []
|
|
||||||
for ip in list(ip_list):
|
|
||||||
data.append(
|
|
||||||
{
|
|
||||||
'address': ip.address,
|
|
||||||
'subnet_mask': ip.subnet_mask,
|
|
||||||
'gateway': ip.gateway,
|
|
||||||
'public': ip.public,
|
|
||||||
'prefix': ip.prefix,
|
|
||||||
'rdns': ip.rdns,
|
|
||||||
'type': ip.type
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return data
|
|
||||||
|
|
||||||
def _validate_option(self, name, desired_type, option_value):
|
def _validate_option(self, name, desired_type, option_value):
|
||||||
"""Validate user specified configuration data against types."""
|
"""Validate user specified configuration data against types."""
|
||||||
|
|||||||
@@ -13,9 +13,6 @@ DOCUMENTATION = r'''
|
|||||||
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
||||||
version_added: "3.0.0"
|
version_added: "3.0.0"
|
||||||
author: "Frank Dornheim (@conloos)"
|
author: "Frank Dornheim (@conloos)"
|
||||||
requirements:
|
|
||||||
- ipaddress
|
|
||||||
- lxd >= 4.0
|
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||||
@@ -50,38 +47,26 @@ DOCUMENTATION = r'''
|
|||||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||||
type: str
|
type: str
|
||||||
state:
|
state:
|
||||||
description: Filter the instance according to the current status.
|
description: Filter the container according to the current status.
|
||||||
type: str
|
type: str
|
||||||
default: none
|
default: none
|
||||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||||
type_filter:
|
prefered_container_network_interface:
|
||||||
description:
|
description:
|
||||||
- Filter the instances by type C(virtual-machine), C(container) or C(both).
|
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
||||||
- The first version of the inventory only supported containers.
|
|
||||||
type: str
|
|
||||||
default: container
|
|
||||||
choices: [ 'virtual-machine', 'container', 'both' ]
|
|
||||||
version_added: 4.2.0
|
|
||||||
prefered_instance_network_interface:
|
|
||||||
description:
|
|
||||||
- If an instance has multiple network interfaces, select which one is the prefered as pattern.
|
|
||||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||||
- The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
|
|
||||||
The old name still works as an alias.
|
|
||||||
type: str
|
type: str
|
||||||
default: eth
|
default: eth
|
||||||
aliases:
|
prefered_container_network_family:
|
||||||
- prefered_container_network_interface
|
|
||||||
prefered_instance_network_family:
|
|
||||||
description:
|
description:
|
||||||
- If an instance has multiple network interfaces, which one is the prefered by family.
|
- If a container has multiple network interfaces, which one is the prefered by family.
|
||||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||||
type: str
|
type: str
|
||||||
default: inet
|
default: inet
|
||||||
choices: [ 'inet', 'inet6' ]
|
choices: [ 'inet', 'inet6' ]
|
||||||
groupby:
|
groupby:
|
||||||
description:
|
description:
|
||||||
- Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
|
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
||||||
- See example for syntax.
|
- See example for syntax.
|
||||||
type: dict
|
type: dict
|
||||||
'''
|
'''
|
||||||
@@ -96,49 +81,38 @@ plugin: community.general.lxd
|
|||||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||||
state: RUNNING
|
state: RUNNING
|
||||||
|
|
||||||
# simple lxd.yml including virtual machines and containers
|
|
||||||
plugin: community.general.lxd
|
|
||||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
|
||||||
type_filter: both
|
|
||||||
|
|
||||||
# grouping lxd.yml
|
# grouping lxd.yml
|
||||||
groupby:
|
groupby:
|
||||||
locationBerlin:
|
|
||||||
type: location
|
|
||||||
attribute: Berlin
|
|
||||||
netRangeIPv4:
|
|
||||||
type: network_range
|
|
||||||
attribute: 10.98.143.0/24
|
|
||||||
netRangeIPv6:
|
|
||||||
type: network_range
|
|
||||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
|
||||||
osUbuntu:
|
|
||||||
type: os
|
|
||||||
attribute: ubuntu
|
|
||||||
testpattern:
|
testpattern:
|
||||||
type: pattern
|
type: pattern
|
||||||
attribute: test
|
attribute: test
|
||||||
profileDefault:
|
vlan666:
|
||||||
type: profile
|
type: vlanid
|
||||||
attribute: default
|
attribute: 666
|
||||||
profileX11:
|
locationBerlin:
|
||||||
type: profile
|
type: location
|
||||||
attribute: x11
|
attribute: Berlin
|
||||||
|
osUbuntu:
|
||||||
|
type: os
|
||||||
|
attribute: ubuntu
|
||||||
releaseFocal:
|
releaseFocal:
|
||||||
type: release
|
type: release
|
||||||
attribute: focal
|
attribute: focal
|
||||||
releaseBionic:
|
releaseBionic:
|
||||||
type: release
|
type: release
|
||||||
attribute: bionic
|
attribute: bionic
|
||||||
typeVM:
|
profileDefault:
|
||||||
type: type
|
type: profile
|
||||||
attribute: virtual-machine
|
attribute: default
|
||||||
typeContainer:
|
profileX11:
|
||||||
type: type
|
type: profile
|
||||||
attribute: container
|
attribute: x11
|
||||||
vlan666:
|
netRangeIPv4:
|
||||||
type: vlanid
|
type: network_range
|
||||||
attribute: 666
|
attribute: 10.98.143.0/24
|
||||||
|
netRangeIPv6:
|
||||||
|
type: network_range
|
||||||
|
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import binascii
|
import binascii
|
||||||
@@ -148,19 +122,12 @@ import time
|
|||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils._text import to_native, to_text
|
||||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||||
from ansible.module_utils.six import raise_from
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
|
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
|
||||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||||
|
|
||||||
try:
|
|
||||||
import ipaddress
|
|
||||||
except ImportError as exc:
|
|
||||||
IPADDRESS_IMPORT_ERROR = exc
|
|
||||||
else:
|
|
||||||
IPADDRESS_IMPORT_ERROR = None
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin):
|
class InventoryModule(BaseInventoryPlugin):
|
||||||
DEBUG = 4
|
DEBUG = 4
|
||||||
@@ -307,10 +274,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||||
|
|
||||||
def _get_instances(self):
|
def _get_containers(self):
|
||||||
"""Get instancenames
|
"""Get Containernames
|
||||||
|
|
||||||
Returns all instancenames
|
Returns all containernames
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
None
|
None
|
||||||
@@ -319,27 +286,25 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
list(names): names of all instances"""
|
list(names): names of all containers"""
|
||||||
# e.g. {
|
# e.g. {'type': 'sync',
|
||||||
# "metadata": [
|
# 'status': 'Success',
|
||||||
# "/1.0/instances/foo",
|
# 'status_code': 200,
|
||||||
# "/1.0/instances/bar"
|
# 'operation': '',
|
||||||
# ],
|
# 'error_code': 0,
|
||||||
# "status": "Success",
|
# 'error': '',
|
||||||
# "status_code": 200,
|
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
||||||
# "type": "sync"
|
containers = self.socket.do('GET', '/1.0/containers')
|
||||||
# }
|
return [m.split('/')[3] for m in containers['metadata']]
|
||||||
instances = self.socket.do('GET', '/1.0/instances')
|
|
||||||
return [m.split('/')[3] for m in instances['metadata']]
|
|
||||||
|
|
||||||
def _get_config(self, branch, name):
|
def _get_config(self, branch, name):
|
||||||
"""Get inventory of instance
|
"""Get inventory of container
|
||||||
|
|
||||||
Get config of instance
|
Get config of container
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(branch): Name oft the API-Branch
|
str(branch): Name oft the API-Branch
|
||||||
str(name): Name of instance
|
str(name): Name of Container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Source:
|
Source:
|
||||||
@@ -347,7 +312,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(config): Config of the instance"""
|
dict(config): Config of the container"""
|
||||||
config = {}
|
config = {}
|
||||||
if isinstance(branch, (tuple, list)):
|
if isinstance(branch, (tuple, list)):
|
||||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||||
@@ -355,13 +320,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||||
return config
|
return config
|
||||||
|
|
||||||
def get_instance_data(self, names):
|
def get_container_data(self, names):
|
||||||
"""Create Inventory of the instance
|
"""Create Inventory of the container
|
||||||
|
|
||||||
Iterate through the different branches of the instances and collect Informations.
|
Iterate through the different branches of the containers and collect Informations.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
list(names): List of instance names
|
list(names): List of container names
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -370,20 +335,20 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None"""
|
None"""
|
||||||
# tuple(('instances','metadata/templates')) to get section in branch
|
# tuple(('instances','metadata/templates')) to get section in branch
|
||||||
# e.g. /1.0/instances/<name>/metadata/templates
|
# e.g. /1.0/instances/<name>/metadata/templates
|
||||||
branches = ['instances', ('instances', 'state')]
|
branches = ['containers', ('instances', 'state')]
|
||||||
instance_config = {}
|
container_config = {}
|
||||||
for branch in branches:
|
for branch in branches:
|
||||||
for name in names:
|
for name in names:
|
||||||
instance_config['instances'] = self._get_config(branch, name)
|
container_config['containers'] = self._get_config(branch, name)
|
||||||
self.data = dict_merge(instance_config, self.data)
|
self.data = dict_merge(container_config, self.data)
|
||||||
|
|
||||||
def get_network_data(self, names):
|
def get_network_data(self, names):
|
||||||
"""Create Inventory of the instance
|
"""Create Inventory of the container
|
||||||
|
|
||||||
Iterate through the different branches of the instances and collect Informations.
|
Iterate through the different branches of the containers and collect Informations.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
list(names): List of instance names
|
list(names): List of container names
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -402,26 +367,26 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_config['networks'] = {name: None}
|
network_config['networks'] = {name: None}
|
||||||
self.data = dict_merge(network_config, self.data)
|
self.data = dict_merge(network_config, self.data)
|
||||||
|
|
||||||
def extract_network_information_from_instance_config(self, instance_name):
|
def extract_network_information_from_container_config(self, container_name):
|
||||||
"""Returns the network interface configuration
|
"""Returns the network interface configuration
|
||||||
|
|
||||||
Returns the network ipv4 and ipv6 config of the instance without local-link
|
Returns the network ipv4 and ipv6 config of the container without local-link
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(instance_name): Name oft he instance
|
str(container_name): Name oft he container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(network_configuration): network config"""
|
dict(network_configuration): network config"""
|
||||||
instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
|
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
||||||
network_configuration = None
|
network_configuration = None
|
||||||
if instance_network_interfaces:
|
if container_network_interfaces:
|
||||||
network_configuration = {}
|
network_configuration = {}
|
||||||
gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
|
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
||||||
for interface_name in gen_interface_names:
|
for interface_name in gen_interface_names:
|
||||||
gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||||
network_configuration[interface_name] = []
|
network_configuration[interface_name] = []
|
||||||
for address in gen_address:
|
for address in gen_address:
|
||||||
address_set = {}
|
address_set = {}
|
||||||
@@ -432,24 +397,24 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
network_configuration[interface_name].append(address_set)
|
network_configuration[interface_name].append(address_set)
|
||||||
return network_configuration
|
return network_configuration
|
||||||
|
|
||||||
def get_prefered_instance_network_interface(self, instance_name):
|
def get_prefered_container_network_interface(self, container_name):
|
||||||
"""Helper to get the prefered interface of thr instance
|
"""Helper to get the prefered interface of thr container
|
||||||
|
|
||||||
Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
|
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(containe_name): name of instance
|
str(containe_name): name of container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
str(prefered_interface): None or interface name"""
|
str(prefered_interface): None or interface name"""
|
||||||
instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||||
prefered_interface = None # init
|
prefered_interface = None # init
|
||||||
if instance_network_interfaces: # instance have network interfaces
|
if container_network_interfaces: # container have network interfaces
|
||||||
# generator if interfaces which start with the desired pattern
|
# generator if interfaces which start with the desired pattern
|
||||||
net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
|
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
||||||
selected_interfaces = [] # init
|
selected_interfaces = [] # init
|
||||||
for interface in net_generator:
|
for interface in net_generator:
|
||||||
selected_interfaces.append(interface)
|
selected_interfaces.append(interface)
|
||||||
@@ -457,13 +422,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
prefered_interface = sorted(selected_interfaces)[0]
|
prefered_interface = sorted(selected_interfaces)[0]
|
||||||
return prefered_interface
|
return prefered_interface
|
||||||
|
|
||||||
def get_instance_vlans(self, instance_name):
|
def get_container_vlans(self, container_name):
|
||||||
"""Get VLAN(s) from instance
|
"""Get VLAN(s) from container
|
||||||
|
|
||||||
Helper to get the VLAN_ID from the instance
|
Helper to get the VLAN_ID from the container
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(containe_name): name of instance
|
str(containe_name): name of container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -476,13 +441,13 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||||
|
|
||||||
# get networkdevices of instance and return
|
# get networkdevices of container and return
|
||||||
# e.g.
|
# e.g.
|
||||||
# "eth0":{ "name":"eth0",
|
# "eth0":{ "name":"eth0",
|
||||||
# "network":"lxdbr0",
|
# "network":"lxdbr0",
|
||||||
# "type":"nic"},
|
# "type":"nic"},
|
||||||
vlan_ids = {}
|
vlan_ids = {}
|
||||||
devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
|
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
||||||
for device in devices:
|
for device in devices:
|
||||||
if 'network' in devices[device]:
|
if 'network' in devices[device]:
|
||||||
if devices[device]['network'] in network_vlans:
|
if devices[device]['network'] in network_vlans:
|
||||||
@@ -518,14 +483,14 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _set_data_entry(self, instance_name, key, value, path=None):
|
def _set_data_entry(self, container_name, key, value, path=None):
|
||||||
"""Helper to save data
|
"""Helper to save data
|
||||||
|
|
||||||
Helper to save the data in self.data
|
Helper to save the data in self.data
|
||||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(instance_name): name of instance
|
str(container_name): name of container
|
||||||
str(key): same as dict
|
str(key): same as dict
|
||||||
*(value): same as dict
|
*(value): same as dict
|
||||||
Kwargs:
|
Kwargs:
|
||||||
@@ -536,24 +501,24 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None"""
|
None"""
|
||||||
if not path:
|
if not path:
|
||||||
path = self.data['inventory']
|
path = self.data['inventory']
|
||||||
if instance_name not in path:
|
if container_name not in path:
|
||||||
path[instance_name] = {}
|
path[container_name] = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if isinstance(value, dict) and key in path[instance_name]:
|
if isinstance(value, dict) and key in path[container_name]:
|
||||||
path[instance_name] = dict_merge(value, path[instance_name][key])
|
path[container_name] = dict_merge(value, path[container_name][key])
|
||||||
else:
|
else:
|
||||||
path[instance_name][key] = value
|
path[container_name][key] = value
|
||||||
except KeyError as err:
|
except KeyError as err:
|
||||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||||
|
|
||||||
def extract_information_from_instance_configs(self):
|
def extract_information_from_container_configs(self):
|
||||||
"""Process configuration information
|
"""Process configuration information
|
||||||
|
|
||||||
Preparation of the data
|
Preparation of the data
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
dict(configs): instance configurations
|
dict(configs): Container configurations
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -564,35 +529,33 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if 'inventory' not in self.data:
|
if 'inventory' not in self.data:
|
||||||
self.data['inventory'] = {}
|
self.data['inventory'] = {}
|
||||||
|
|
||||||
for instance_name in self.data['instances']:
|
for container_name in self.data['containers']:
|
||||||
self._set_data_entry(instance_name, 'os', self._get_data_entry(
|
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
|
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'release', self._get_data_entry(
|
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
|
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'version', self._get_data_entry(
|
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
|
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
|
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/profiles'.format(instance_name)))
|
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'location', self._get_data_entry(
|
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/location'.format(instance_name)))
|
'containers/{0}/containers/metadata/location'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'state', self._get_data_entry(
|
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
||||||
'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
|
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
||||||
self._set_data_entry(instance_name, 'type', self._get_data_entry(
|
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
||||||
'instances/{0}/instances/metadata/type'.format(instance_name)))
|
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
||||||
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
|
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
||||||
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
|
|
||||||
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
|
|
||||||
|
|
||||||
def build_inventory_network(self, instance_name):
|
def build_inventory_network(self, container_name):
|
||||||
"""Add the network interfaces of the instance to the inventory
|
"""Add the network interfaces of the container to the inventory
|
||||||
|
|
||||||
Logic:
|
Logic:
|
||||||
- if the instance have no interface -> 'ansible_connection: local'
|
- if the container have no interface -> 'ansible_connection: local'
|
||||||
- get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||||
- first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(instance_name): name of instance
|
str(container_name): name of container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
@@ -600,45 +563,45 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
|
|
||||||
def interface_selection(instance_name):
|
def interface_selection(container_name):
|
||||||
"""Select instance Interface for inventory
|
"""Select container Interface for inventory
|
||||||
|
|
||||||
Logic:
|
Logic:
|
||||||
- get preferred_interface & prefered_instance_network_family -> str(IP)
|
- get preferred_interface & prefered_container_network_family -> str(IP)
|
||||||
- first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
|
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(instance_name): name of instance
|
str(container_name): name of container
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
None
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
dict(interface_name: ip)"""
|
dict(interface_name: ip)"""
|
||||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
|
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
||||||
prefered_instance_network_family = self.prefered_instance_network_family
|
prefered_container_network_family = self.prefered_container_network_family
|
||||||
|
|
||||||
ip_address = ''
|
ip_address = ''
|
||||||
if prefered_interface:
|
if prefered_interface:
|
||||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
|
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
||||||
for config in interface:
|
for config in interface:
|
||||||
if config['family'] == prefered_instance_network_family:
|
if config['family'] == prefered_container_network_family:
|
||||||
ip_address = config['address']
|
ip_address = config['address']
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||||
for interface in interfaces.values():
|
for config in interface:
|
||||||
for config in interface:
|
if config['family'] == prefered_container_network_family:
|
||||||
if config['family'] == prefered_instance_network_family:
|
ip_address = config['address']
|
||||||
ip_address = config['address']
|
break
|
||||||
break
|
|
||||||
return ip_address
|
return ip_address
|
||||||
|
|
||||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
|
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
||||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
|
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
||||||
self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
|
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
||||||
|
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
||||||
else:
|
else:
|
||||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
|
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
||||||
|
|
||||||
def build_inventory_hosts(self):
|
def build_inventory_hosts(self):
|
||||||
"""Build host-part dynamic inventory
|
"""Build host-part dynamic inventory
|
||||||
@@ -654,33 +617,29 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
for instance_name in self.data['inventory']:
|
for container_name in self.data['inventory']:
|
||||||
instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
|
# Only consider containers that match the "state" filter, if self.state is not None
|
||||||
|
|
||||||
# Only consider instances that match the "state" filter, if self.state is not None
|
|
||||||
if self.filter:
|
if self.filter:
|
||||||
if self.filter.lower() != instance_state:
|
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
||||||
continue
|
continue
|
||||||
# add instance
|
# add container
|
||||||
self.inventory.add_host(instance_name)
|
self.inventory.add_host(container_name)
|
||||||
# add network informations
|
# add network informations
|
||||||
self.build_inventory_network(instance_name)
|
self.build_inventory_network(container_name)
|
||||||
# add os
|
# add os
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(instance_name)).lower())
|
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
||||||
# add release
|
# add release
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(instance_name)).lower())
|
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
||||||
# add profile
|
# add profile
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
|
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
||||||
# add state
|
# add state
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
|
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
||||||
# add type
|
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
|
|
||||||
# add location information
|
# add location information
|
||||||
if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
|
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
|
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
||||||
# add VLAN_ID information
|
# add VLAN_ID information
|
||||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
|
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
||||||
self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
|
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
||||||
|
|
||||||
def build_inventory_groups_location(self, group_name):
|
def build_inventory_groups_location(self, group_name):
|
||||||
"""create group by attribute: location
|
"""create group by attribute: location
|
||||||
@@ -697,9 +656,9 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
for instance_name in self.inventory.hosts:
|
for container_name in self.inventory.hosts:
|
||||||
if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
|
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_pattern(self, group_name):
|
def build_inventory_groups_pattern(self, group_name):
|
||||||
"""create group by name pattern
|
"""create group by name pattern
|
||||||
@@ -718,10 +677,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
regex_pattern = self.groupby[group_name].get('attribute')
|
regex_pattern = self.groupby[group_name].get('attribute')
|
||||||
|
|
||||||
for instance_name in self.inventory.hosts:
|
for container_name in self.inventory.hosts:
|
||||||
result = re.search(regex_pattern, instance_name)
|
result = re.search(regex_pattern, container_name)
|
||||||
if result:
|
if result:
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_network_range(self, group_name):
|
def build_inventory_groups_network_range(self, group_name):
|
||||||
"""check if IP is in network-class
|
"""check if IP is in network-class
|
||||||
@@ -744,14 +703,14 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
raise AnsibleParserError(
|
raise AnsibleParserError(
|
||||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||||
|
|
||||||
for instance_name in self.inventory.hosts:
|
for container_name in self.inventory.hosts:
|
||||||
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
|
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
||||||
for interface in self.data['inventory'][instance_name].get('network_interfaces'):
|
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
||||||
for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
|
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
||||||
try:
|
try:
|
||||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||||
if address.version == network.version and address in network:
|
if address.version == network.version and address in network:
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Ignore invalid IP addresses returned by lxd
|
# Ignore invalid IP addresses returned by lxd
|
||||||
pass
|
pass
|
||||||
@@ -762,7 +721,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
Args:
|
Args:
|
||||||
str(group_name): Group name
|
str(group_name): Group name
|
||||||
Kwargs:
|
Kwargs:
|
||||||
None
|
Noneself.data['inventory'][container_name][interface]
|
||||||
Raises:
|
Raises:
|
||||||
None
|
None
|
||||||
Returns:
|
Returns:
|
||||||
@@ -771,12 +730,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_instances = [
|
gen_containers = [
|
||||||
instance_name for instance_name in self.inventory.hosts
|
container_name for container_name in self.inventory.hosts
|
||||||
if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
|
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
||||||
for instance_name in gen_instances:
|
for container_name in gen_containers:
|
||||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
|
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_release(self, group_name):
|
def build_inventory_groups_release(self, group_name):
|
||||||
"""create group by attribute: release
|
"""create group by attribute: release
|
||||||
@@ -793,12 +752,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_instances = [
|
gen_containers = [
|
||||||
instance_name for instance_name in self.inventory.hosts
|
container_name for container_name in self.inventory.hosts
|
||||||
if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
|
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
||||||
for instance_name in gen_instances:
|
for container_name in gen_containers:
|
||||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
|
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_profile(self, group_name):
|
def build_inventory_groups_profile(self, group_name):
|
||||||
"""create group by attribute: profile
|
"""create group by attribute: profile
|
||||||
@@ -815,12 +774,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_instances = [
|
gen_containers = [
|
||||||
instance_name for instance_name in self.inventory.hosts.keys()
|
container_name for container_name in self.inventory.hosts.keys()
|
||||||
if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
|
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||||
for instance_name in gen_instances:
|
for container_name in gen_containers:
|
||||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
|
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_vlanid(self, group_name):
|
def build_inventory_groups_vlanid(self, group_name):
|
||||||
"""create group by attribute: vlanid
|
"""create group by attribute: vlanid
|
||||||
@@ -837,34 +796,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if group_name not in self.inventory.groups:
|
if group_name not in self.inventory.groups:
|
||||||
self.inventory.add_group(group_name)
|
self.inventory.add_group(group_name)
|
||||||
|
|
||||||
gen_instances = [
|
gen_containers = [
|
||||||
instance_name for instance_name in self.inventory.hosts.keys()
|
container_name for container_name in self.inventory.hosts.keys()
|
||||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
|
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||||
for instance_name in gen_instances:
|
for container_name in gen_containers:
|
||||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||||
self.inventory.add_child(group_name, instance_name)
|
self.inventory.add_child(group_name, container_name)
|
||||||
|
|
||||||
def build_inventory_groups_type(self, group_name):
|
|
||||||
"""create group by attribute: type
|
|
||||||
|
|
||||||
Args:
|
|
||||||
str(group_name): Group name
|
|
||||||
Kwargs:
|
|
||||||
None
|
|
||||||
Raises:
|
|
||||||
None
|
|
||||||
Returns:
|
|
||||||
None"""
|
|
||||||
# maybe we just want to expand one group
|
|
||||||
if group_name not in self.inventory.groups:
|
|
||||||
self.inventory.add_group(group_name)
|
|
||||||
|
|
||||||
gen_instances = [
|
|
||||||
instance_name for instance_name in self.inventory.hosts
|
|
||||||
if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
|
|
||||||
for instance_name in gen_instances:
|
|
||||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
|
|
||||||
self.inventory.add_child(group_name, instance_name)
|
|
||||||
|
|
||||||
def build_inventory_groups(self):
|
def build_inventory_groups(self):
|
||||||
"""Build group-part dynamic inventory
|
"""Build group-part dynamic inventory
|
||||||
@@ -893,7 +830,6 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
* 'release'
|
* 'release'
|
||||||
* 'profile'
|
* 'profile'
|
||||||
* 'vlanid'
|
* 'vlanid'
|
||||||
* 'type'
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
str(group_name): Group name
|
str(group_name): Group name
|
||||||
@@ -919,8 +855,6 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.build_inventory_groups_profile(group_name)
|
self.build_inventory_groups_profile(group_name)
|
||||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||||
self.build_inventory_groups_vlanid(group_name)
|
self.build_inventory_groups_vlanid(group_name)
|
||||||
elif self.groupby[group_name].get('type') == 'type':
|
|
||||||
self.build_inventory_groups_type(group_name)
|
|
||||||
else:
|
else:
|
||||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||||
|
|
||||||
@@ -947,30 +881,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.build_inventory_hosts()
|
self.build_inventory_hosts()
|
||||||
self.build_inventory_groups()
|
self.build_inventory_groups()
|
||||||
|
|
||||||
def cleandata(self):
|
|
||||||
"""Clean the dynamic inventory
|
|
||||||
|
|
||||||
The first version of the inventory only supported container.
|
|
||||||
This will change in the future.
|
|
||||||
The following function cleans up the data and remove the all items with the wrong type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
None
|
|
||||||
Kwargs:
|
|
||||||
None
|
|
||||||
Raises:
|
|
||||||
None
|
|
||||||
Returns:
|
|
||||||
None"""
|
|
||||||
iter_keys = list(self.data['instances'].keys())
|
|
||||||
for instance_name in iter_keys:
|
|
||||||
if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
|
|
||||||
del self.data['instances'][instance_name]
|
|
||||||
|
|
||||||
def _populate(self):
|
def _populate(self):
|
||||||
"""Return the hosts and groups
|
"""Return the hosts and groups
|
||||||
|
|
||||||
Returns the processed instance configurations from the lxd import
|
Returns the processed container configurations from the lxd import
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
None
|
None
|
||||||
@@ -983,16 +897,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||||
self.socket = self._connect_to_socket()
|
self.socket = self._connect_to_socket()
|
||||||
self.get_instance_data(self._get_instances())
|
self.get_container_data(self._get_containers())
|
||||||
self.get_network_data(self._get_networks())
|
self.get_network_data(self._get_networks())
|
||||||
|
|
||||||
# The first version of the inventory only supported containers.
|
self.extract_information_from_container_configs()
|
||||||
# This will change in the future.
|
|
||||||
# The following function cleans up the data.
|
|
||||||
if self.type_filter != 'both':
|
|
||||||
self.cleandata()
|
|
||||||
|
|
||||||
self.extract_information_from_instance_configs()
|
|
||||||
|
|
||||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||||
|
|
||||||
@@ -1016,10 +924,6 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
AnsibleParserError
|
AnsibleParserError
|
||||||
Returns:
|
Returns:
|
||||||
None"""
|
None"""
|
||||||
if IPADDRESS_IMPORT_ERROR:
|
|
||||||
raise_from(
|
|
||||||
AnsibleError('another_library must be installed to use this plugin'),
|
|
||||||
IPADDRESS_IMPORT_ERROR)
|
|
||||||
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
||||||
# Read the inventory YAML file
|
# Read the inventory YAML file
|
||||||
@@ -1031,9 +935,8 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
self.data = {} # store for inventory-data
|
self.data = {} # store for inventory-data
|
||||||
self.groupby = self.get_option('groupby')
|
self.groupby = self.get_option('groupby')
|
||||||
self.plugin = self.get_option('plugin')
|
self.plugin = self.get_option('plugin')
|
||||||
self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
|
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
||||||
self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
|
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
||||||
self.type_filter = self.get_option('type_filter')
|
|
||||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||||
self.filter = None
|
self.filter = None
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2017 Ansible Project
|
# Copyright (c) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -57,7 +56,7 @@ from subprocess import Popen, PIPE
|
|||||||
|
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible.errors import AnsibleParserError
|
from ansible.errors import AnsibleParserError
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils._text import to_native, to_text
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||||
from ansible.module_utils.common.process import get_bin_path
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018 Ansible Project
|
# Copyright (c) 2018 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
@@ -8,7 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: online
|
name: online
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@remyleone)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
||||||
@@ -62,7 +61,7 @@ from sys import version as python_version
|
|||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils._text import to_native, to_text
|
||||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urljoin
|
from ansible.module_utils.six.moves.urllib.parse import urljoin
|
||||||
|
|
||||||
@@ -235,7 +234,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
self.headers = {
|
self.headers = {
|
||||||
'Authorization': "Bearer %s" % token,
|
'Authorization': "Bearer %s" % token,
|
||||||
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
|
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]),
|
||||||
'Content-type': 'application/json'
|
'Content-type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,239 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
name: opennebula
|
|
||||||
author:
|
|
||||||
- Kristian Feldsam (@feldsam)
|
|
||||||
short_description: OpenNebula inventory source
|
|
||||||
version_added: "3.8.0"
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- constructed
|
|
||||||
description:
|
|
||||||
- Get inventory hosts from OpenNebula cloud.
|
|
||||||
- Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
|
|
||||||
to set parameter values.
|
|
||||||
- Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
|
|
||||||
options:
|
|
||||||
plugin:
|
|
||||||
description: Token that ensures this is a source file for the 'opennebula' plugin.
|
|
||||||
type: string
|
|
||||||
required: true
|
|
||||||
choices: [ community.general.opennebula ]
|
|
||||||
api_url:
|
|
||||||
description:
|
|
||||||
- URL of the OpenNebula RPC server.
|
|
||||||
- It is recommended to use HTTPS so that the username/password are not
|
|
||||||
transferred over the network unencrypted.
|
|
||||||
- If not set then the value of the C(ONE_URL) environment variable is used.
|
|
||||||
env:
|
|
||||||
- name: ONE_URL
|
|
||||||
required: True
|
|
||||||
type: string
|
|
||||||
api_username:
|
|
||||||
description:
|
|
||||||
- Name of the user to login into the OpenNebula RPC server. If not set
|
|
||||||
then the value of the C(ONE_USERNAME) environment variable is used.
|
|
||||||
env:
|
|
||||||
- name: ONE_USERNAME
|
|
||||||
type: string
|
|
||||||
api_password:
|
|
||||||
description:
|
|
||||||
- Password or a token of the user to login into OpenNebula RPC server.
|
|
||||||
- If not set, the value of the C(ONE_PASSWORD) environment variable is used.
|
|
||||||
env:
|
|
||||||
- name: ONE_PASSWORD
|
|
||||||
required: False
|
|
||||||
type: string
|
|
||||||
api_authfile:
|
|
||||||
description:
|
|
||||||
- If both I(api_username) or I(api_password) are not set, then it will try
|
|
||||||
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
|
|
||||||
- Set environment variable C(ONE_AUTH) to override this path.
|
|
||||||
env:
|
|
||||||
- name: ONE_AUTH
|
|
||||||
required: False
|
|
||||||
type: string
|
|
||||||
hostname:
|
|
||||||
description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
|
|
||||||
type: string
|
|
||||||
default: v4_first_ip
|
|
||||||
choices:
|
|
||||||
- v4_first_ip
|
|
||||||
- v6_first_ip
|
|
||||||
- name
|
|
||||||
filter_by_label:
|
|
||||||
description: Only return servers filtered by this label.
|
|
||||||
type: string
|
|
||||||
group_by_labels:
|
|
||||||
description: Create host groups by vm labels
|
|
||||||
type: bool
|
|
||||||
default: True
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = r'''
|
|
||||||
# inventory_opennebula.yml file in YAML format
|
|
||||||
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
|
|
||||||
|
|
||||||
# Pass a label filter to the API
|
|
||||||
plugin: community.general.opennebula
|
|
||||||
api_url: https://opennebula:2633/RPC2
|
|
||||||
filter_by_label: Cache
|
|
||||||
'''
|
|
||||||
|
|
||||||
try:
|
|
||||||
import pyone
|
|
||||||
|
|
||||||
HAS_PYONE = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_PYONE = False
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
|
|
||||||
from collections import namedtuple
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
|
||||||
NAME = 'community.general.opennebula'
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
|
||||||
valid = False
|
|
||||||
if super(InventoryModule, self).verify_file(path):
|
|
||||||
if path.endswith(('opennebula.yaml', 'opennebula.yml')):
|
|
||||||
valid = True
|
|
||||||
return valid
|
|
||||||
|
|
||||||
def _get_connection_info(self):
|
|
||||||
url = self.get_option('api_url')
|
|
||||||
username = self.get_option('api_username')
|
|
||||||
password = self.get_option('api_password')
|
|
||||||
authfile = self.get_option('api_authfile')
|
|
||||||
|
|
||||||
if not username and not password:
|
|
||||||
if authfile is None:
|
|
||||||
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
|
|
||||||
try:
|
|
||||||
with open(authfile, "r") as fp:
|
|
||||||
authstring = fp.read().rstrip()
|
|
||||||
username, password = authstring.split(":")
|
|
||||||
except (OSError, IOError):
|
|
||||||
raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
|
|
||||||
except Exception:
|
|
||||||
raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
|
|
||||||
|
|
||||||
auth_params = namedtuple('auth', ('url', 'username', 'password'))
|
|
||||||
|
|
||||||
return auth_params(url=url, username=username, password=password)
|
|
||||||
|
|
||||||
def _get_vm_ipv4(self, vm):
|
|
||||||
nic = vm.TEMPLATE.get('NIC')
|
|
||||||
|
|
||||||
if isinstance(nic, dict):
|
|
||||||
nic = [nic]
|
|
||||||
|
|
||||||
for net in nic:
|
|
||||||
return net['IP']
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_vm_ipv6(self, vm):
|
|
||||||
nic = vm.TEMPLATE.get('NIC')
|
|
||||||
|
|
||||||
if isinstance(nic, dict):
|
|
||||||
nic = [nic]
|
|
||||||
|
|
||||||
for net in nic:
|
|
||||||
if net.get('IP6_GLOBAL'):
|
|
||||||
return net['IP6_GLOBAL']
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_vm_pool(self):
|
|
||||||
auth = self._get_connection_info()
|
|
||||||
|
|
||||||
if not (auth.username and auth.password):
|
|
||||||
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
|
|
||||||
else:
|
|
||||||
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
|
||||||
|
|
||||||
# get hosts (VMs)
|
|
||||||
try:
|
|
||||||
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
|
|
||||||
|
|
||||||
return vm_pool
|
|
||||||
|
|
||||||
def _retrieve_servers(self, label_filter=None):
|
|
||||||
vm_pool = self._get_vm_pool()
|
|
||||||
|
|
||||||
result = []
|
|
||||||
|
|
||||||
# iterate over hosts
|
|
||||||
for vm in vm_pool.VM:
|
|
||||||
server = vm.USER_TEMPLATE
|
|
||||||
|
|
||||||
labels = []
|
|
||||||
if vm.USER_TEMPLATE.get('LABELS'):
|
|
||||||
labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
|
|
||||||
labels = ''.join(labels)
|
|
||||||
labels = labels.replace(' ', '_')
|
|
||||||
labels = labels.replace('-', '_')
|
|
||||||
labels = labels.split(',')
|
|
||||||
|
|
||||||
# filter by label
|
|
||||||
if label_filter is not None:
|
|
||||||
if label_filter not in labels:
|
|
||||||
continue
|
|
||||||
|
|
||||||
server['name'] = vm.NAME
|
|
||||||
server['LABELS'] = labels
|
|
||||||
server['v4_first_ip'] = self._get_vm_ipv4(vm)
|
|
||||||
server['v6_first_ip'] = self._get_vm_ipv6(vm)
|
|
||||||
|
|
||||||
result.append(server)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _populate(self):
|
|
||||||
hostname_preference = self.get_option('hostname')
|
|
||||||
group_by_labels = self.get_option('group_by_labels')
|
|
||||||
|
|
||||||
# Add a top group 'one'
|
|
||||||
self.inventory.add_group(group='all')
|
|
||||||
|
|
||||||
filter_by_label = self.get_option('filter_by_label')
|
|
||||||
for server in self._retrieve_servers(filter_by_label):
|
|
||||||
# check for labels
|
|
||||||
if group_by_labels and server['LABELS']:
|
|
||||||
for label in server['LABELS']:
|
|
||||||
self.inventory.add_group(group=label)
|
|
||||||
self.inventory.add_host(host=server['name'], group=label)
|
|
||||||
|
|
||||||
self.inventory.add_host(host=server['name'], group='all')
|
|
||||||
|
|
||||||
for attribute, value in server.items():
|
|
||||||
self.inventory.set_variable(server['name'], attribute, value)
|
|
||||||
|
|
||||||
if hostname_preference != 'name':
|
|
||||||
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
|
|
||||||
|
|
||||||
if server.get('SSH_PORT'):
|
|
||||||
self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
|
||||||
if not HAS_PYONE:
|
|
||||||
raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
|
|
||||||
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path)
|
|
||||||
self._read_config_data(path=path)
|
|
||||||
|
|
||||||
self._populate()
|
|
||||||
@@ -88,24 +88,13 @@ DOCUMENTATION = '''
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
# Minimal example which will not gather additional facts for QEMU/LXC guests
|
|
||||||
# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
|
|
||||||
# my.proxmox.yml
|
# my.proxmox.yml
|
||||||
plugin: community.general.proxmox
|
plugin: community.general.proxmox
|
||||||
|
url: http://localhost:8006
|
||||||
user: ansible@pve
|
user: ansible@pve
|
||||||
password: secure
|
password: secure
|
||||||
|
validate_certs: no
|
||||||
# More complete example demonstrating the use of 'want_facts' and the constructed options
|
|
||||||
# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
|
|
||||||
# my.proxmox.yml
|
|
||||||
plugin: community.general.proxmox
|
|
||||||
url: http://pve.domain.com:8006
|
|
||||||
user: ansible@pve
|
|
||||||
password: secure
|
|
||||||
validate_certs: false
|
|
||||||
want_facts: true
|
|
||||||
keyed_groups:
|
keyed_groups:
|
||||||
# proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
|
|
||||||
- key: proxmox_tags_parsed
|
- key: proxmox_tags_parsed
|
||||||
separator: ""
|
separator: ""
|
||||||
prefix: group
|
prefix: group
|
||||||
@@ -336,15 +325,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
|
status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
|
||||||
self.inventory.set_variable(name, status_key, status)
|
self.inventory.set_variable(name, status_key, status)
|
||||||
|
|
||||||
def _get_vm_snapshots(self, node, vmid, vmtype, name):
|
|
||||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
|
|
||||||
|
|
||||||
snapshots_key = 'snapshots'
|
|
||||||
snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower()))
|
|
||||||
|
|
||||||
snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
|
|
||||||
self.inventory.set_variable(name, snapshots_key, snapshots)
|
|
||||||
|
|
||||||
def to_safe(self, word):
|
def to_safe(self, word):
|
||||||
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
||||||
#> ProxmoxInventory.to_safe("foo-bar baz")
|
#> ProxmoxInventory.to_safe("foo-bar baz")
|
||||||
@@ -389,9 +369,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
if node['type'] == 'node':
|
if node['type'] == 'node':
|
||||||
self.inventory.add_child(nodes_group, node['node'])
|
self.inventory.add_child(nodes_group, node['node'])
|
||||||
|
|
||||||
if node['status'] == 'offline':
|
|
||||||
continue
|
|
||||||
|
|
||||||
# get node IP address
|
# get node IP address
|
||||||
if self.get_option("want_proxmox_nodes_ansible_host"):
|
if self.get_option("want_proxmox_nodes_ansible_host"):
|
||||||
ip = self._get_node_ip(node['node'])
|
ip = self._get_node_ip(node['node'])
|
||||||
@@ -413,10 +390,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
elif lxc['status'] == 'running':
|
elif lxc['status'] == 'running':
|
||||||
self.inventory.add_child(running_group, lxc['name'])
|
self.inventory.add_child(running_group, lxc['name'])
|
||||||
|
|
||||||
# get LXC config and snapshots for facts
|
# get LXC config for facts
|
||||||
if self.get_option('want_facts'):
|
if self.get_option('want_facts'):
|
||||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||||
self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
|
||||||
|
|
||||||
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
||||||
|
|
||||||
@@ -424,7 +400,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||||
self.inventory.add_group(node_qemu_group)
|
self.inventory.add_group(node_qemu_group)
|
||||||
for qemu in self._get_qemu_per_node(node['node']):
|
for qemu in self._get_qemu_per_node(node['node']):
|
||||||
if qemu.get('template'):
|
if qemu['template']:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.inventory.add_host(qemu['name'])
|
self.inventory.add_host(qemu['name'])
|
||||||
@@ -438,10 +414,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
elif qemu['status'] == 'running':
|
elif qemu['status'] == 'running':
|
||||||
self.inventory.add_child(running_group, qemu['name'])
|
self.inventory.add_child(running_group, qemu['name'])
|
||||||
|
|
||||||
# get QEMU config and snapshots for facts
|
# get QEMU config for facts
|
||||||
if self.get_option('want_facts'):
|
if self.get_option('want_facts'):
|
||||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||||
self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
|
||||||
|
|
||||||
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
||||||
|
|
||||||
|
|||||||
@@ -1,27 +1,24 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# Copyright (c) 2017 Ansible Project
|
||||||
# Copyright: (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = '''
|
||||||
name: scaleway
|
name: scaleway
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@remyleone)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway inventory source
|
short_description: Scaleway inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from Scaleway.
|
- Get inventory hosts from Scaleway
|
||||||
requirements:
|
|
||||||
- PyYAML
|
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: Token that ensures this is a source file for the 'scaleway' plugin.
|
description: token that ensures this is a source file for the 'scaleway' plugin.
|
||||||
required: True
|
required: True
|
||||||
choices: ['scaleway', 'community.general.scaleway']
|
choices: ['scaleway', 'community.general.scaleway']
|
||||||
regions:
|
regions:
|
||||||
description: Filter results on a specific Scaleway region.
|
description: Filter results on a specific Scaleway region
|
||||||
type: list
|
type: list
|
||||||
default:
|
default:
|
||||||
- ams1
|
- ams1
|
||||||
@@ -29,14 +26,11 @@ DOCUMENTATION = r'''
|
|||||||
- par2
|
- par2
|
||||||
- waw1
|
- waw1
|
||||||
tags:
|
tags:
|
||||||
description: Filter results on a specific tag.
|
description: Filter results on a specific tag
|
||||||
type: list
|
type: list
|
||||||
oauth_token:
|
oauth_token:
|
||||||
description:
|
required: True
|
||||||
- Scaleway OAuth token.
|
description: Scaleway OAuth token.
|
||||||
- If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
|
|
||||||
(C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
|
|
||||||
- More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
|
|
||||||
env:
|
env:
|
||||||
# in order of precedence
|
# in order of precedence
|
||||||
- name: SCW_TOKEN
|
- name: SCW_TOKEN
|
||||||
@@ -54,14 +48,14 @@ DOCUMENTATION = r'''
|
|||||||
- hostname
|
- hostname
|
||||||
- id
|
- id
|
||||||
variables:
|
variables:
|
||||||
description: 'Set individual variables: keys are variable names and
|
description: 'set individual variables: keys are variable names and
|
||||||
values are templates. Any value returned by the
|
values are templates. Any value returned by the
|
||||||
L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
|
L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
|
||||||
can be used.'
|
can be used.'
|
||||||
type: dict
|
type: dict
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = '''
|
||||||
# scaleway_inventory.yml file in YAML format
|
# scaleway_inventory.yml file in YAML format
|
||||||
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
|
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
|
||||||
|
|
||||||
@@ -87,33 +81,15 @@ regions:
|
|||||||
- par1
|
- par1
|
||||||
variables:
|
variables:
|
||||||
ansible_host: public_ip.address
|
ansible_host: public_ip.address
|
||||||
|
|
||||||
# Using static strings as variables
|
|
||||||
plugin: community.general.scaleway
|
|
||||||
hostnames:
|
|
||||||
- hostname
|
|
||||||
variables:
|
|
||||||
ansible_host: public_ip.address
|
|
||||||
ansible_connection: "'ssh'"
|
|
||||||
ansible_user: "'admin'"
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml
|
|
||||||
except ImportError as exc:
|
|
||||||
YAML_IMPORT_ERROR = exc
|
|
||||||
else:
|
|
||||||
YAML_IMPORT_ERROR = None
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
|
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
|
||||||
from ansible.module_utils.urls import open_url
|
from ansible.module_utils.urls import open_url
|
||||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.module_utils.six import raise_from
|
|
||||||
|
|
||||||
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
|
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
|
||||||
|
|
||||||
@@ -129,7 +105,7 @@ def _fetch_information(token, url):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
|
raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
|
||||||
try:
|
try:
|
||||||
raw_json = json.loads(to_text(response.read()))
|
raw_json = json.loads(response.read())
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise AnsibleError("Incorrect JSON payload")
|
raise AnsibleError("Incorrect JSON payload")
|
||||||
|
|
||||||
@@ -254,7 +230,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
|
|
||||||
if not matching_tags:
|
if not matching_tags:
|
||||||
return set()
|
return set()
|
||||||
return matching_tags.union((server_zone,))
|
else:
|
||||||
|
return matching_tags.union((server_zone,))
|
||||||
|
|
||||||
def _filter_host(self, host_infos, hostname_preferences):
|
def _filter_host(self, host_infos, hostname_preferences):
|
||||||
|
|
||||||
@@ -290,38 +267,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
|||||||
# Composed variables
|
# Composed variables
|
||||||
self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
|
self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
|
||||||
|
|
||||||
def get_oauth_token(self):
|
|
||||||
oauth_token = self.get_option('oauth_token')
|
|
||||||
|
|
||||||
if 'SCW_CONFIG_PATH' in os.environ:
|
|
||||||
scw_config_path = os.getenv('SCW_CONFIG_PATH')
|
|
||||||
elif 'XDG_CONFIG_HOME' in os.environ:
|
|
||||||
scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml')
|
|
||||||
else:
|
|
||||||
scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml')
|
|
||||||
|
|
||||||
if not oauth_token and os.path.exists(scw_config_path):
|
|
||||||
with open(scw_config_path) as fh:
|
|
||||||
scw_config = yaml.safe_load(fh)
|
|
||||||
active_profile = scw_config.get('active_profile', 'default')
|
|
||||||
if active_profile == 'default':
|
|
||||||
oauth_token = scw_config.get('secret_key')
|
|
||||||
else:
|
|
||||||
oauth_token = scw_config['profiles'][active_profile].get('secret_key')
|
|
||||||
|
|
||||||
return oauth_token
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
def parse(self, inventory, loader, path, cache=True):
|
||||||
if YAML_IMPORT_ERROR:
|
|
||||||
raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path)
|
super(InventoryModule, self).parse(inventory, loader, path)
|
||||||
self._read_config_data(path=path)
|
self._read_config_data(path=path)
|
||||||
|
|
||||||
config_zones = self.get_option("regions")
|
config_zones = self.get_option("regions")
|
||||||
tags = self.get_option("tags")
|
tags = self.get_option("tags")
|
||||||
token = self.get_oauth_token()
|
token = self.get_option("oauth_token")
|
||||||
if not token:
|
|
||||||
raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.")
|
|
||||||
hostname_preference = self.get_option("hostnames")
|
hostname_preference = self.get_option("hostnames")
|
||||||
|
|
||||||
for zone in self._get_zones(config_zones):
|
for zone in self._get_zones(config_zones):
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
|
# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
|
||||||
# Copyright (c) 2020 Ansible Project
|
# Copyright (c) 2020 Ansible Project
|
||||||
# GNU General Public License v3.0+
|
# GNU General Public License v3.0+
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user