mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 09:56:53 +00:00
Compare commits
119 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16ffb4ba10 | ||
|
|
31c3865251 | ||
|
|
53e0bf8297 | ||
|
|
9b80b14956 | ||
|
|
be763e6ed2 | ||
|
|
4375280497 | ||
|
|
ebda14ba41 | ||
|
|
c16a5f3780 | ||
|
|
f6c1566924 | ||
|
|
bffed2fda5 | ||
|
|
440804fd62 | ||
|
|
a915a4b7c5 | ||
|
|
ed69bde7a9 | ||
|
|
77700e7110 | ||
|
|
91d445ab35 | ||
|
|
19c2af03b7 | ||
|
|
58a5463ddb | ||
|
|
84941d0a7f | ||
|
|
87880da6da | ||
|
|
7acc0b897a | ||
|
|
5174fc98d2 | ||
|
|
d9ad386a13 | ||
|
|
739719a3b1 | ||
|
|
311b618016 | ||
|
|
70820cab5d | ||
|
|
a75a12227f | ||
|
|
6959847701 | ||
|
|
ad93c40d40 | ||
|
|
5bfbd65115 | ||
|
|
71de1ee1d5 | ||
|
|
ad4efaeb31 | ||
|
|
786ea68016 | ||
|
|
dd878f931f | ||
|
|
8f03511d9c | ||
|
|
004e6d06c3 | ||
|
|
25f46caefb | ||
|
|
0a733c60ca | ||
|
|
f006aa4cf6 | ||
|
|
72e0d8c310 | ||
|
|
b96aaffeae | ||
|
|
5bd5de4281 | ||
|
|
4aebefcf9e | ||
|
|
62f9a5b0a9 | ||
|
|
3d03eda99e | ||
|
|
c01ce10b4b | ||
|
|
16aa776c93 | ||
|
|
d7d1659e34 | ||
|
|
5b9b99384f | ||
|
|
f898279c8c | ||
|
|
2215c6d360 | ||
|
|
ca3948858a | ||
|
|
f14e566cc7 | ||
|
|
a2c93f5e99 | ||
|
|
67a2abcab2 | ||
|
|
2e4864db7f | ||
|
|
1f0b2a5173 | ||
|
|
25482000f0 | ||
|
|
c0f3aa14cf | ||
|
|
1ef104be61 | ||
|
|
773df88a41 | ||
|
|
d77e256088 | ||
|
|
2917389779 | ||
|
|
59af80235b | ||
|
|
aec52198e3 | ||
|
|
cbe4490c9e | ||
|
|
9de059b44d | ||
|
|
c72a23a5f1 | ||
|
|
0b9d9c0fdb | ||
|
|
67eaf9405f | ||
|
|
5de05a6243 | ||
|
|
46b4b9a6de | ||
|
|
10146aae1c | ||
|
|
d2ec7053c5 | ||
|
|
51fcacae08 | ||
|
|
29211b970c | ||
|
|
5c1fa53558 | ||
|
|
2348f3d439 | ||
|
|
46a051d168 | ||
|
|
b2212bc8ef | ||
|
|
e05e3aed67 | ||
|
|
a13541299e | ||
|
|
221067e708 | ||
|
|
db6458bd93 | ||
|
|
f342243fb0 | ||
|
|
37f2b06c3c | ||
|
|
0a8a41966d | ||
|
|
263c5ba9de | ||
|
|
cfc28a3f6a | ||
|
|
b495035923 | ||
|
|
d4637e9b1c | ||
|
|
7842dc0dea | ||
|
|
314a0bc553 | ||
|
|
34b7876e4f | ||
|
|
65c10de630 | ||
|
|
a86f31ac0f | ||
|
|
bc82fe36be | ||
|
|
7c810a6186 | ||
|
|
9d468fb078 | ||
|
|
2c79d42eb4 | ||
|
|
d95c3a738f | ||
|
|
839880d711 | ||
|
|
bc0edf7d55 | ||
|
|
68458fd8aa | ||
|
|
4aa70ab48f | ||
|
|
739210c6b9 | ||
|
|
8c23d0e345 | ||
|
|
cde4a1a099 | ||
|
|
10b3381f21 | ||
|
|
0ccd52b63a | ||
|
|
c76e598d61 | ||
|
|
ad3efa9719 | ||
|
|
b9c8d2bee5 | ||
|
|
2c167547f6 | ||
|
|
73de447489 | ||
|
|
134f6132ce | ||
|
|
f229c800da | ||
|
|
52a0970ef8 | ||
|
|
11e0797650 | ||
|
|
e6bbbac6a0 |
@@ -68,6 +68,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
@@ -117,7 +130,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
@@ -125,6 +137,22 @@ stages:
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: '3.10'
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
@@ -150,13 +178,8 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
@@ -186,8 +209,8 @@ stages:
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
@@ -196,6 +219,23 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
@@ -204,8 +244,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
@@ -227,14 +265,6 @@ stages:
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -248,6 +278,8 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: FreeBSD 12.0
|
||||
test: freebsd/12.0
|
||||
groups:
|
||||
@@ -263,16 +295,12 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
@@ -285,6 +313,28 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
@@ -293,14 +343,12 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
@@ -312,12 +360,8 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
@@ -331,8 +375,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: openSUSE 15 py3
|
||||
@@ -350,6 +392,17 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Cloud_2_12
|
||||
displayName: Cloud 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
@@ -361,7 +414,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
@@ -372,7 +424,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 3.5
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
@@ -382,7 +434,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 2.7
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
@@ -390,21 +442,26 @@ stages:
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
@@ -11,7 +11,7 @@ mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
|
||||
101
.azure-pipelines/scripts/publish-codecov.py
Executable file
101
.azure-pipelines/scripts/publish-codecov.py
Executable file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing as t
|
||||
import urllib.request
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CoverageFile:
|
||||
name: str
|
||||
path: pathlib.Path
|
||||
flags: t.List[str]
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Args:
|
||||
dry_run: bool
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
def parse_args() -> Args:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||
parser.add_argument('path', type=pathlib.Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store arguments in a typed dataclass
|
||||
fields = dataclasses.fields(Args)
|
||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||
|
||||
return Args(**kwargs)
|
||||
|
||||
|
||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||
processed = []
|
||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||
name = file.stem.replace('coverage=', '')
|
||||
|
||||
# Get flags from name
|
||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||
|
||||
processed.append(CoverageFile(name, file, flags))
|
||||
|
||||
return tuple(processed)
|
||||
|
||||
|
||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||
for file in files:
|
||||
cmd = [
|
||||
str(codecov_bin),
|
||||
'--name', file.name,
|
||||
'--file', str(file.path),
|
||||
]
|
||||
for flag in file.flags:
|
||||
cmd.extend(['--flags', flag])
|
||||
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would run command: {cmd}')
|
||||
continue
|
||||
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||
return
|
||||
|
||||
with urllib.request.urlopen(url) as resp:
|
||||
with dest.open('w+b') as f:
|
||||
# Read data in chunks rather than all at once
|
||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||
|
||||
dest.chmod(flags)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||
|
||||
files = process_files(args.path)
|
||||
upload_files(codecov_bin, files, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Upload code coverage reports to codecov.io.
|
||||
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
output_path="$1"
|
||||
|
||||
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
|
||||
|
||||
for file in "${output_path}"/reports/coverage*.xml; do
|
||||
name="${file}"
|
||||
name="${name##*/}" # remove path
|
||||
name="${name##coverage=}" # remove 'coverage=' prefix if present
|
||||
name="${name%.xml}" # remove '.xml' suffix
|
||||
|
||||
bash codecov.sh \
|
||||
-f "${file}" \
|
||||
-n "${name}" \
|
||||
-X coveragepy \
|
||||
-X gcov \
|
||||
-X fix \
|
||||
-X search \
|
||||
-X xcode \
|
||||
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
|
||||
done
|
||||
@@ -12,4 +12,4 @@ if ! ansible-test --help >/dev/null 2>&1; then
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
|
||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
|
||||
35
.github/BOTMETA.yml
vendored
35
.github/BOTMETA.yml
vendored
@@ -1,7 +1,8 @@
|
||||
notifications: true
|
||||
automerge: true
|
||||
files:
|
||||
plugins/:
|
||||
supershipit: quidame Ajpantuso
|
||||
supershipit: quidame
|
||||
changelogs/: {}
|
||||
changelogs/fragments/:
|
||||
support: community
|
||||
@@ -48,6 +49,9 @@ files:
|
||||
maintainers: dagwieers
|
||||
$callbacks/diy.py:
|
||||
maintainers: theque5t
|
||||
$callbacks/elastic.py:
|
||||
maintainers: v1v
|
||||
keywords: apm observability
|
||||
$callbacks/hipchat.py: {}
|
||||
$callbacks/jabber.py: {}
|
||||
$callbacks/loganalytics.py:
|
||||
@@ -62,6 +66,9 @@ files:
|
||||
$callbacks/nrdp.py:
|
||||
maintainers: rverchere
|
||||
$callbacks/null.py: {}
|
||||
$callbacks/opentelemetry.py:
|
||||
maintainers: v1v
|
||||
keywords: opentelemetry observability
|
||||
$callbacks/say.py:
|
||||
notify: chris-short
|
||||
maintainers: $team_macos
|
||||
@@ -131,6 +138,8 @@ files:
|
||||
$filters/random_mac.py: {}
|
||||
$filters/time.py:
|
||||
maintainers: resmo
|
||||
$filters/unicode_normalize.py:
|
||||
maintainers: Ajpantuso
|
||||
$filters/version_sort.py:
|
||||
maintainers: ericzolf
|
||||
$inventories/:
|
||||
@@ -148,8 +157,14 @@ files:
|
||||
$inventories/nmap.py: {}
|
||||
$inventories/online.py:
|
||||
maintainers: sieben
|
||||
$inventories/opennebula.py:
|
||||
maintainers: feldsam
|
||||
labels: cloud opennebula
|
||||
keywords: opennebula dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/icinga2.py:
|
||||
maintainers: bongoeadgc6
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -173,7 +188,7 @@ files:
|
||||
$lookups/dnstxt.py:
|
||||
maintainers: jpmens
|
||||
$lookups/dsv.py:
|
||||
maintainers: amigus
|
||||
maintainers: amigus endlesstrax
|
||||
$lookups/etcd3.py:
|
||||
maintainers: eric-belhomme
|
||||
$lookups/etcd.py:
|
||||
@@ -209,7 +224,7 @@ files:
|
||||
maintainers: $team_ansible_core jpmens
|
||||
$lookups/shelvefile.py: {}
|
||||
$lookups/tss.py:
|
||||
maintainers: amigus
|
||||
maintainers: amigus endlesstrax
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/gitlab.py:
|
||||
@@ -458,6 +473,10 @@ files:
|
||||
maintainers: slok
|
||||
$modules/database/misc/redis_info.py:
|
||||
maintainers: levonet
|
||||
$modules/database/misc/redis_data_info.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/redis_data.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/database/mssql/mssql_db.py:
|
||||
@@ -512,6 +531,8 @@ files:
|
||||
maintainers: kris2kris
|
||||
$modules/identity/keycloak/keycloak_role.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/keycloak/keycloak_user_federation.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/onepassword_info.py:
|
||||
maintainers: Rylon
|
||||
$modules/identity/opendj/opendj_backendprop.py:
|
||||
@@ -740,6 +761,8 @@ files:
|
||||
ignore: jle64
|
||||
$modules/packaging/language/pip_package_info.py:
|
||||
maintainers: bcoca matburt maxamillion
|
||||
$modules/packaging/language/pipx.py:
|
||||
maintainers: russoz
|
||||
$modules/packaging/language/yarn.py:
|
||||
maintainers: chrishoffman verkaufer
|
||||
$modules/packaging/os/apk.py:
|
||||
@@ -1140,6 +1163,10 @@ files:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_project.py:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_job_run.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/rundeck_job_executions_info.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/sophos_utm/:
|
||||
maintainers: $team_e_spirit
|
||||
keywords: sophos utm
|
||||
@@ -1194,7 +1221,7 @@ macros:
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
|
||||
194
CHANGELOG.rst
194
CHANGELOG.rst
@@ -6,6 +6,198 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 2.0.0.
|
||||
|
||||
v3.8.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- counter_enabled callback plugin - fix output to correctly display host and task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
|
||||
- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
|
||||
- lvol - allows logical volumes to be created with certain size arguments prefixed with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
|
||||
- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, https://github.com/ansible-collections/community.general/pull/3625).
|
||||
- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726).
|
||||
- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
|
||||
|
||||
v3.8.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while running task in check mode (https://github.com/ansible-collections/community.general/issues/3621, https://github.com/ansible-collections/community.general/pull/3622).
|
||||
- gitlab_project_members - ``get_project_id`` return the project id by matching ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602).
|
||||
- ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560).
|
||||
- nmcli - fixed ``dns6`` option handling so that it is treated as a list internally (https://github.com/ansible-collections/community.general/pull/3563).
|
||||
- nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563).
|
||||
- one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626).
|
||||
- pipx - ``state=inject`` was failing to parse the list of injected packages (https://github.com/ansible-collections/community.general/pull/3611).
|
||||
- pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611).
|
||||
- pkgin - Fix exception encountered when all packages are already installed (https://github.com/ansible-collections/community.general/pull/3583).
|
||||
- proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649).
|
||||
- redfish_utils module utils - do not attempt to change the boot source override mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/).
|
||||
- redfish_utils module utils - if a manager network property is not specified in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/).
|
||||
|
||||
v3.8.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release. Please note that this is the last minor 3.x.0 release; afterwards there will only be bugfix releases 3.8.y.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command (per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded separately. Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running under non-root user. The deployment file is copied with file content only. The file permission is set to ``440`` and belongs to root user. When the JBoss ``WildFly`` server is running under non-root user, it is unable to read the deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=`` and ``state=present`` only the first run was successful, future runs failed due to missing checks prior starting zypper. Usage of ``state=absent`` in combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, https://github.com/ansible-collections/community.general/issues/3466).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Callback
|
||||
~~~~~~~~
|
||||
|
||||
- elastic - Create distributed traces for each Ansible task in Elastic APM
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- opennebula - OpenNebula inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_tasks_info - Retrieve information about one or more Proxmox VE tasks
|
||||
|
||||
Packaging
|
||||
~~~~~~~~~
|
||||
|
||||
language
|
||||
^^^^^^^^
|
||||
|
||||
- pipx - Manages applications installed with pipx
|
||||
|
||||
Web Infrastructure
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- rundeck_job_executions_info - Query executions for a Rundeck job
|
||||
- rundeck_job_run - Run a Rundeck job
|
||||
|
||||
v3.7.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
|
||||
- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
|
||||
- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` (https://github.com/ansible-collections/community.general/pull/3379).
|
||||
- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
|
||||
- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order (https://github.com/ansible-collections/community.general/pull/3178).
|
||||
- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
|
||||
- lxd_container - add ``ignore_volatile_options`` option which allows to disable the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
|
||||
- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
|
||||
- pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option (https://github.com/ansible-collections/community.general/pull/3315).
|
||||
- redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
|
||||
- tss lookup plugin - added ``token`` parameter for token authorization; ``username`` and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
|
||||
- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-<number>`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237).
|
||||
- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
|
||||
- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
|
||||
- openbsd_pkg - fix crash from ``KeyError`` exception when package installs, but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
|
||||
- redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Callback
|
||||
~~~~~~~~
|
||||
|
||||
- opentelemetry - Create distributed traces with OpenTelemetry
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- unicode_normalize - Normalizes unicode strings to facilitate comparison of characters with normalized forms
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- icinga2 - Icinga2 inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Database
|
||||
~~~~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- redis_data - Set key value pairs in Redis
|
||||
- redis_data_info - Get value of key in Redis database
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_user_federation - Allows administration of Keycloak user federations via Keycloak API
|
||||
|
||||
v3.6.0
|
||||
======
|
||||
|
||||
@@ -725,7 +917,7 @@ Deprecated Features
|
||||
- puppet - deprecated undocumented parameter ``show_diff``, will be removed in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- urpmi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- xbps - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- xfconf - returning output as facts is deprecated, this will be removed in community.general 4.0.0. Please register the task output in a variable and use it instead. You can already switch to the new behavior now by using the new ``disable_facts`` option (https://github.com/ansible-collections/community.general/pull/1747).
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
@@ -42,7 +43,12 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
|
||||
can only be used by very few people should better be added to more specialized collections.
|
||||
|
||||
2. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
|
||||
That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
|
||||
of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
|
||||
such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
|
||||
|
||||
3. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
|
||||
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
|
||||
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
|
||||
@@ -52,7 +58,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
||||
which run in CI.
|
||||
|
||||
3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
|
||||
subdirectories.)
|
||||
|
||||
@@ -60,7 +66,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
|
||||
4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
|
||||
33
README.md
33
README.md
@@ -17,7 +17,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -76,7 +76,21 @@ Also for some notes specific to this collection see [our CONTRIBUTING documentat
|
||||
|
||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||
|
||||
### Communication
|
||||
## Collection maintenance
|
||||
|
||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||
|
||||
It is necessary for maintainers of this collection to be subscribed to:
|
||||
|
||||
* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
|
||||
* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
|
||||
|
||||
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
|
||||
|
||||
## Communication
|
||||
|
||||
We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
|
||||
|
||||
@@ -86,16 +100,11 @@ We take part in the global quarterly [Ansible Contributor Summit](https://github
|
||||
|
||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||
|
||||
For more information about communication, refer to the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
|
||||
### Publishing New Version
|
||||
## Publishing New Version
|
||||
|
||||
Basic instructions without release branches:
|
||||
|
||||
1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
|
||||
2. Run `antsibull-changelog release --collection-flatmap yes`
|
||||
3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
|
||||
4. Tag the commit with `<version>`. Push changes and tag to the main repository.
|
||||
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
|
||||
|
||||
## Release notes
|
||||
|
||||
@@ -103,10 +112,10 @@ See the [changelog](https://github.com/ansible-collections/community.general/blo
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
|
||||
|
||||
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
||||
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
|
||||
|
||||
## More information
|
||||
|
||||
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
|
||||
|
||||
@@ -286,7 +286,7 @@ releases:
|
||||
- runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``,
|
||||
- urpmi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``,
|
||||
will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- xbps - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
@@ -1747,3 +1747,260 @@ releases:
|
||||
name: keycloak_identity_provider
|
||||
namespace: identity.keycloak
|
||||
release_date: '2021-08-31'
|
||||
3.7.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-<number>``
|
||||
(for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084,
|
||||
https://github.com/ansible-collections/community.general/pull/3237).
|
||||
- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead
|
||||
of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
|
||||
- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
|
||||
- openbsd_pkg - fix crash from ``KeyError`` exception when package installs,
|
||||
but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
|
||||
- redfish_utils module utils - if given, add account ID of user that should
|
||||
be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
|
||||
minor_changes:
|
||||
- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``,
|
||||
``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
|
||||
- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
|
||||
- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled``
|
||||
(https://github.com/ansible-collections/community.general/pull/3379).
|
||||
- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_project_members - added functionality to set all members exactly as
|
||||
given (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
|
||||
- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user
|
||||
map order (https://github.com/ansible-collections/community.general/pull/3178).
|
||||
- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
|
||||
- lxd_container - add ``ignore_volatile_options`` option which allows to disable
|
||||
the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
|
||||
- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
|
||||
- pids - refactor to add support for older ``psutil`` versions to the ``pattern``
|
||||
option (https://github.com/ansible-collections/community.general/pull/3315).
|
||||
- redfish_command and redfish_config and redfish_utils module utils - add parameter
|
||||
to strip etag of quotes before patch, since some vendors do not properly ``If-Match``
|
||||
etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
|
||||
- tss lookup plugin - added ``token`` parameter for token authorization; ``username``
|
||||
and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
|
||||
- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 3.7.0.yml
|
||||
- 3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml
|
||||
- 3237-copr-fix_chroot_naming.yml
|
||||
- 3248-adds-few-more-gitlab-group-options.yml
|
||||
- 3296-clean-etag.yaml
|
||||
- 3313-nmcli-add_gsm_support.yml
|
||||
- 3315-pids-refactor.yml
|
||||
- 3319-gitlab_project_members_enhancement.yml
|
||||
- 3327-tss-token-authorization.yml
|
||||
- 3328-interfaces_file-improvements.yaml
|
||||
- 3329-kernel_blacklist-improvements.yaml
|
||||
- 3331-do_not_ignore_volatile_configs_by_option.yml
|
||||
- 3332-zpool_facts-pythonify.yaml
|
||||
- 3334-django_manage-split-params.yaml
|
||||
- 3336-openbsd_pkg-fix-KeyError.yml
|
||||
- 3337-linode-fix.yml
|
||||
- 3343-redfish_utils-addUser-userId.yml
|
||||
- 3359-add-unicode_normalize-filter.yml
|
||||
- 3367-add-require_two_factor_authentication-property-to-gitlab-group.yml
|
||||
- 3379-gitlab_project-ci_cd_properties.yml
|
||||
- 634-gitlab_project_runners.yaml
|
||||
modules:
|
||||
- description: Allows administration of Keycloak user federations via Keycloak
|
||||
API
|
||||
name: keycloak_user_federation
|
||||
namespace: identity.keycloak
|
||||
- description: Set key value pairs in Redis
|
||||
name: redis_data
|
||||
namespace: database.misc
|
||||
- description: Get value of key in Redis database
|
||||
name: redis_data_info
|
||||
namespace: database.misc
|
||||
plugins:
|
||||
callback:
|
||||
- description: Create distributed traces with OpenTelemetry
|
||||
name: opentelemetry
|
||||
namespace: null
|
||||
filter:
|
||||
- description: Normalizes unicode strings to facilitate comparison of characters
|
||||
with normalized forms
|
||||
name: unicode_normalize
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: Icinga2 inventory source
|
||||
name: icinga2
|
||||
namespace: null
|
||||
release_date: '2021-09-21'
|
||||
3.8.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys
|
||||
(https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication``
|
||||
on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``,
|
||||
``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running
|
||||
under non-root user. The deployment file is copied with file content only.
|
||||
The file permission is set to ``440`` and belongs to root user. When the JBoss
|
||||
``WildFly`` server is running under non-root user, it is unable to read the
|
||||
deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED``
|
||||
when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider
|
||||
mappers (https://github.com/ansible-collections/community.general/pull/3538,
|
||||
https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when
|
||||
role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535,
|
||||
https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to
|
||||
fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without
|
||||
crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450,
|
||||
https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using
|
||||
it on the controller, like the ``to_yaml`` filter, do not produce different
|
||||
output (https://github.com/ansible-collections/community.general/issues/3471,
|
||||
https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=``
|
||||
and ``state=present`` only the first run was successful, future runs failed
|
||||
due to missing checks prior starting zypper. Usage of ``state=absent`` in
|
||||
combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791,
|
||||
https://github.com/ansible-collections/community.general/issues/3466).
|
||||
minor_changes:
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of
|
||||
the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings,
|
||||
instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator
|
||||
(https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to
|
||||
support enabling the plugin only if the given environment variable exists
|
||||
and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the
|
||||
``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command
|
||||
(per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded
|
||||
separately. Previously, all changes were reported as one count of packages
|
||||
"added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert``
|
||||
is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
release_summary: Regular feature and bugfix release. Please note that this is
|
||||
the last minor 3.x.0 release; afterwards there will only be bugfix releases
|
||||
3.8.y.
|
||||
fragments:
|
||||
- 2692-logstash-callback-plugin-replacing_options.yml
|
||||
- 3.8.0.yml
|
||||
- 3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml
|
||||
- 3393-pkgng-many_packages_one_command.yml
|
||||
- 3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml
|
||||
- 3401-nmcli-needs-type.yml
|
||||
- 3422-open-iscsi-mutual-authentication-support.yaml
|
||||
- 3425-mail_add_configurable_ehlo_hostname.yml
|
||||
- 3426-copy-permissions-along-with-file-for-jboss-module.yml
|
||||
- 3450-callback_opentelemetry-exception_handling.yml
|
||||
- 3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml
|
||||
- 3473-gitlab_deploy_key-fix_idempotency.yml
|
||||
- 3474-zypper_repository_improve_repo_file_idempotency.yml
|
||||
- 3478-yaml-callback.yml
|
||||
- 3496-callback_opentelemetry-enrich_stacktraces.yml
|
||||
- 3498-callback_opentelemetry-only_in_ci.yml
|
||||
- 3500-macports-add-stdout-and-stderr-to-status.yaml
|
||||
- 3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml
|
||||
- 3536-quote-role-name-in-url.yml
|
||||
- 3538-fix-keycloak-idp-mappers-change-detection.yml
|
||||
- 3540-terraform_add_parallelism_parameter.yml
|
||||
modules:
|
||||
- description: Manages applications installed with pipx
|
||||
name: pipx
|
||||
namespace: packaging.language
|
||||
- description: Retrieve information about one or more Proxmox VE tasks
|
||||
name: proxmox_tasks_info
|
||||
namespace: cloud.misc
|
||||
- description: Query executions for a Rundeck job
|
||||
name: rundeck_job_executions_info
|
||||
namespace: web_infrastructure
|
||||
- description: Run a Rundeck job
|
||||
name: rundeck_job_run
|
||||
namespace: web_infrastructure
|
||||
plugins:
|
||||
callback:
|
||||
- description: Create distributed traces for each Ansible task in Elastic APM
|
||||
name: elastic
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: OpenNebula inventory source
|
||||
name: opennebula
|
||||
namespace: null
|
||||
release_date: '2021-10-12'
|
||||
3.8.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while
|
||||
running task in check mode (https://github.com/ansible-collections/community.general/issues/3621,
|
||||
https://github.com/ansible-collections/community.general/pull/3622).
|
||||
- gitlab_project_members - ``get_project_id`` return the project id by matching
|
||||
``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602).
|
||||
- ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560).
|
||||
- nmcli - fixed ``dns6`` option handling so that it is treated as a list internally
|
||||
(https://github.com/ansible-collections/community.general/pull/3563).
|
||||
- nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563).
|
||||
- one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626).
|
||||
- pipx - ``state=inject`` was failing to parse the list of injected packages
|
||||
(https://github.com/ansible-collections/community.general/pull/3611).
|
||||
- pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms
|
||||
that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611).
|
||||
- pkgin - Fix exception encountered when all packages are already installed
|
||||
(https://github.com/ansible-collections/community.general/pull/3583).
|
||||
- proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649).
|
||||
- redfish_utils module utils - do not attempt to change the boot source override
|
||||
mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/).
|
||||
- redfish_utils module utils - if a manager network property is not specified
|
||||
in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 3.8.1.yml
|
||||
- 3404-redfish_utils-skip-manager-network-check.yml
|
||||
- 3509-redfish_utils-SetOneTimeBoot-mode-fix.yml
|
||||
- 3561-fix-ipa-host-var-detection.yml
|
||||
- 3563-nmcli-ipv6_dns.yaml
|
||||
- 3583-fix-pkgin-exception.yml
|
||||
- 3602-fix-gitlab_project_members-improve-search-method.yml
|
||||
- 3611-pipx-fix-inject.yml
|
||||
- 3622-fix-gitlab-deploy-key-check-mode.yml
|
||||
- 3626-fix-one_image-error.yml
|
||||
- 3649-proxmox_group_info_TypeError.yml
|
||||
release_date: '2021-11-02'
|
||||
3.8.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- counter_enabled callback plugin - fix output to correctly display host and
|
||||
task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
|
||||
- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
|
||||
- lvol - allows logical volumes to be created with certain size arguments prefixed
|
||||
with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
|
||||
- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with
|
||||
``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612,
|
||||
https://github.com/ansible-collections/community.general/pull/3625).
|
||||
- terraform - fix command options being ignored during planned/plan in function
|
||||
``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707,
|
||||
https://github.com/ansible-collections/community.general/pull/3726).
|
||||
- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError``
|
||||
due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 3.8.2.yml
|
||||
- 3625-nmcli_false_changed_mtu_fix.yml
|
||||
- 3667-ldap_search.yml
|
||||
- 3675-xattr-handle-base64-values.yml
|
||||
- 3681-lvol-fix-create.yml
|
||||
- 3709-support-batch-mode.yml
|
||||
- 3726-terraform-missing-parameters-planned-fix.yml
|
||||
release_date: '2021-11-23'
|
||||
|
||||
@@ -69,5 +69,6 @@ Individuals who have been asked to become a part of this group have generally be
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Alexei Znamensky | russoz | russoz | |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Andrew Pantuso | Ajpantuso | ajpantuso | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
|
||||
@@ -751,3 +751,34 @@ To extract ports from all clusters with name containing 'server1':
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
|
||||
Working with Unicode
|
||||
---------------------
|
||||
|
||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
||||
|
||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Compare Unicode representations
|
||||
debug:
|
||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
||||
vars:
|
||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
||||
without_combining_character: Mayagüez
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Compare Unicode representations] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": true
|
||||
}
|
||||
|
||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
||||
|
||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
||||
|
||||
.. versionadded:: 3.7.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.6.0
|
||||
version: 3.8.2
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -45,6 +45,8 @@ class CallbackModule(CallbackBase):
|
||||
_task_total = 0
|
||||
_host_counter = 1
|
||||
_host_total = 0
|
||||
_current_batch_total = 0
|
||||
_previous_batch_total = 0
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
@@ -76,8 +78,11 @@ class CallbackModule(CallbackBase):
|
||||
self._display.banner(msg)
|
||||
self._play = play
|
||||
|
||||
self._previous_batch_total = self._current_batch_total
|
||||
self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch'])
|
||||
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
|
||||
self._task_total = len(self._play.get_tasks()[0])
|
||||
self._task_counter = 1
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self._display.banner("PLAY RECAP")
|
||||
@@ -145,7 +150,7 @@ class CallbackModule(CallbackBase):
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
||||
self._host_counter = 0
|
||||
self._host_counter = self._previous_batch_total
|
||||
self._task_counter += 1
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
408
plugins/callback/elastic.py
Normal file
408
plugins/callback/elastic.py
Normal file
@@ -0,0 +1,408 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: elastic
|
||||
type: notification
|
||||
short_description: Create distributed traces for each Ansible task in Elastic APM
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task in Elastic APM.
|
||||
- You can configure the plugin with environment variables.
|
||||
- See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
apm_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVICE_NAME
|
||||
apm_server_url:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server and its environment variables.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVER_URL
|
||||
apm_secret_token:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server token
|
||||
env:
|
||||
- name: ELASTIC_APM_SECRET_TOKEN
|
||||
apm_api_key:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM API key
|
||||
env:
|
||||
- name: ELASTIC_APM_API_KEY
|
||||
apm_verify_server_cert:
|
||||
default: true
|
||||
type: bool
|
||||
description:
|
||||
- Verifies the SSL certificate if an HTTPS connection.
|
||||
env:
|
||||
- name: ELASTIC_APM_VERIFY_SERVER_CERT
|
||||
traceparent:
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- elastic-apm (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.elastic
|
||||
|
||||
Set the environment variable:
|
||||
export ELASTIC_APM_SERVER_URL=<your APM server URL)>
|
||||
export ELASTIC_APM_SERVICE_NAME=your_service_name
|
||||
export ELASTIC_APM_API_KEY=your_APM_API_KEY
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleRuntimeError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
|
||||
except ImportError as imp_exc:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
self.start = time.time()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
self.finish = time.time()
|
||||
|
||||
|
||||
class ElasticSource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
|
||||
apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
|
||||
if apm_cli:
|
||||
instrument() # Only call this once, as early as possible.
|
||||
if traceparent:
|
||||
parent = trace_parent_from_string(traceparent)
|
||||
apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
|
||||
else:
|
||||
apm_cli.begin_transaction("Session", start=parent_start_time)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
label(ansible_version=self.ansible_version)
|
||||
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
|
||||
if self.ip_address is not None:
|
||||
label(ansible_host_ip=self.ip_address)
|
||||
|
||||
for task_data in tasks:
|
||||
for host_uuid, host_data in task_data.host_data.items():
|
||||
self.create_span_data(apm_cli, task_data, host_data)
|
||||
|
||||
apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
|
||||
|
||||
def create_span_data(self, apm_cli, task_data, host_data):
|
||||
""" create the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = "success"
|
||||
status = "success"
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
if res.get('exception') is not None:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
status = "failure"
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = "unknown"
|
||||
|
||||
with capture_span(task_data.name,
|
||||
start=task_data.start,
|
||||
span_type="ansible.task.run",
|
||||
duration=host_data.finish - task_data.start,
|
||||
labels={"ansible.task.args": task_data.args,
|
||||
"ansible.task.message": message,
|
||||
"ansible.task.module": task_data.action,
|
||||
"ansible.task.name": name,
|
||||
"ansible.task.result": rc,
|
||||
"ansible.task.host.name": host_data.name,
|
||||
"ansible.task.host.status": host_data.status}) as span:
|
||||
span.outcome = status
|
||||
if 'failure' in status:
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||
|
||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
if apm_server_url:
|
||||
return Client(service_name=apm_service_name,
|
||||
server_url=apm_server_url,
|
||||
verify_server_cert=False,
|
||||
secret_token=apm_secret_token,
|
||||
api_key=apm_api_key,
|
||||
use_elastic_traceparent_header=True,
|
||||
debug=True)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces with Elastic APM.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.elastic'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.apm_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.traceparent = False
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
|
||||
if ELASTIC_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `elastic-apm` must be installed to use this plugin'),
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.elastic = ElasticSource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.apm_service_name = self.get_option('apm_service_name')
|
||||
if not self.apm_service_name:
|
||||
self.apm_service_name = 'ansible'
|
||||
|
||||
self.apm_server_url = self.get_option('apm_server_url')
|
||||
self.apm_secret_token = self.get_option('apm_secret_token')
|
||||
self.apm_api_key = self.get_option('apm_api_key')
|
||||
self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = "success"
|
||||
else:
|
||||
status = "failure"
|
||||
self.elastic.generate_distributed_traces(
|
||||
self.tasks_data,
|
||||
status,
|
||||
time.time(),
|
||||
self.traceparent,
|
||||
self.apm_service_name,
|
||||
self.apm_server_url,
|
||||
self.apm_verify_server_cert,
|
||||
self.apm_secret_token,
|
||||
self.apm_api_key
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -94,6 +94,7 @@ ansible.cfg: |
|
||||
|
||||
import os
|
||||
import json
|
||||
from ansible import context
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
@@ -152,11 +153,11 @@ class CallbackModule(CallbackBase):
|
||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||
self.ls_pre_command).read()
|
||||
|
||||
if self._options is not None:
|
||||
self.base_data['ansible_checkmode'] = self._options.check
|
||||
self.base_data['ansible_tags'] = self._options.tags
|
||||
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||
self.base_data['inventory'] = self._options.inventory
|
||||
if context.CLIARGS is not None:
|
||||
self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
|
||||
self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
|
||||
self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
|
||||
self.base_data['inventory'] = context.CLIARGS.get('inventory')
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
437
plugins/callback/opentelemetry.py
Normal file
437
plugins/callback/opentelemetry.py
Normal file
@@ -0,0 +1,437 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: opentelemetry
|
||||
type: notification
|
||||
short_description: Create distributed traces with OpenTelemetry
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task with OpenTelemetry.
|
||||
- You can configure the OpenTelemetry exporter and SDK with environment variables.
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
enable_from_environment:
|
||||
type: str
|
||||
description:
|
||||
- Whether to enable this callback only if the given environment variable exists and it is set to C(true).
|
||||
- This is handy when you use Configuration as Code and want to send distributed traces
|
||||
if running in the CI rather when running Ansible locally.
|
||||
- For such, it evaluates the given I(enable_from_environment) value as environment variable
|
||||
and if set to true this plugin will be enabled.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
|
||||
version_added: 3.8.0
|
||||
otel_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
traceparent:
|
||||
default: None
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- opentelemetry-api (Python library)
|
||||
- opentelemetry-exporter-otlp (Python library)
|
||||
- opentelemetry-sdk (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.opentelemetry
|
||||
|
||||
Set the environment variable:
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
|
||||
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
|
||||
export OTEL_SERVICE_NAME=your_service_name
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.trace import SpanKind
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.trace.status import Status, StatusCode
|
||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
if sys.version_info >= (3, 7):
|
||||
self.start = time.time_ns()
|
||||
else:
|
||||
self.start = _time_ns()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
if sys.version_info >= (3, 7):
|
||||
self.finish = time.time_ns()
|
||||
else:
|
||||
self.finish = _time_ns()
|
||||
|
||||
|
||||
class OpenTelemetrySource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def traceparent_context(self, traceparent):
|
||||
carrier = dict()
|
||||
carrier['traceparent'] = traceparent
|
||||
return TraceContextTextMapPropagator().extract(carrier=carrier)
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(
|
||||
resource=Resource.create({SERVICE_NAME: otel_service_name})
|
||||
)
|
||||
)
|
||||
|
||||
processor = BatchSpanProcessor(OTLPSpanExporter())
|
||||
|
||||
trace.get_tracer_provider().add_span_processor(processor)
|
||||
|
||||
tracer = trace.get_tracer(__name__)
|
||||
|
||||
with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
|
||||
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
|
||||
parent.set_status(status)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
parent.set_attribute("ansible.version", self.ansible_version)
|
||||
parent.set_attribute("ansible.session", self.session)
|
||||
parent.set_attribute("ansible.host.name", self.host)
|
||||
if self.ip_address is not None:
|
||||
parent.set_attribute("ansible.host.ip", self.ip_address)
|
||||
parent.set_attribute("ansible.host.user", self.user)
|
||||
for task in tasks:
|
||||
for host_uuid, host_data in task.host_data.items():
|
||||
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
|
||||
self.update_span_data(task, host_data, span)
|
||||
|
||||
def update_span_data(self, task_data, host_data, span):
|
||||
""" update the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = 'success'
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message(res)
|
||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||
# Record an exception with the task message
|
||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
|
||||
span.set_status(status)
|
||||
self.set_span_attribute(span, "ansible.task.args", task_data.args)
|
||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
||||
self.set_span_attribute(span, "ansible.task.message", message)
|
||||
self.set_span_attribute(span, "ansible.task.name", name)
|
||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||
span.end(end_time=host_data.finish)
|
||||
|
||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||
""" update the span attribute with the given attribute and value if not None """
|
||||
|
||||
if span is None and self._display is not None:
|
||||
self._display.warning('span object is None. Please double check if that is expected.')
|
||||
else:
|
||||
if attributeValue is not None:
|
||||
span.set_attribute(attributeName, attributeValue)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return OpenTelemetrySource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
return lines[-1]
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message(result):
|
||||
message = result.get('msg', 'failed')
|
||||
exception = result.get('exception')
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.opentelemetry'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.otel_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
self.traceparent = False
|
||||
|
||||
if OTEL_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
|
||||
OTEL_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.opentelemetry = OpenTelemetrySource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
environment_variable = self.get_option('enable_from_environment')
|
||||
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
|
||||
self.disabled = True
|
||||
self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
|
||||
"Disabling the `opentelemetry` callback plugin.".format(environment_variable))
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.otel_service_name = self.get_option('otel_service_name')
|
||||
|
||||
if not self.otel_service_name:
|
||||
self.otel_service_name = 'ansible'
|
||||
|
||||
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
else:
|
||||
status = Status(status_code=StatusCode.ERROR)
|
||||
self.opentelemetry.generate_distributed_traces(
|
||||
self.otel_service_name,
|
||||
self.ansible_playbook,
|
||||
self.tasks_data,
|
||||
status,
|
||||
self.traceparent
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -42,28 +42,29 @@ def should_use_block(value):
|
||||
return False
|
||||
|
||||
|
||||
def my_represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
class MyDumper(AnsibleDumper):
|
||||
def represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
|
||||
|
||||
class CallbackModule(Default):
|
||||
@@ -79,7 +80,6 @@ class CallbackModule(Default):
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
@@ -121,7 +121,7 @@ class CallbackModule(Default):
|
||||
|
||||
if abridged_result:
|
||||
dumped += '\n'
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
|
||||
|
||||
# indent by a couple of spaces
|
||||
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
||||
|
||||
57
plugins/doc_fragments/redis.py
Normal file
57
plugins/doc_fragments/redis.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# Common parameters for Redis modules
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
login_host:
|
||||
description:
|
||||
- Specify the target host running the database.
|
||||
default: localhost
|
||||
type: str
|
||||
login_port:
|
||||
description:
|
||||
- Specify the port to connect to.
|
||||
default: 6379
|
||||
type: int
|
||||
login_user:
|
||||
description:
|
||||
- Specify the user to authenticate with.
|
||||
- Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- Specify the password to authenticate with.
|
||||
- Usually not used when target is localhost.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Specify whether or not to use TLS for the connection.
|
||||
type: bool
|
||||
default: true
|
||||
validate_certs:
|
||||
description:
|
||||
- Specify whether or not to validate TLS certificates.
|
||||
- This should only be turned off for personally controlled sites or with
|
||||
C(localhost) as target.
|
||||
type: bool
|
||||
default: true
|
||||
ca_certs:
|
||||
description:
|
||||
- Path to root certificates file. If not set and I(tls) is
|
||||
set to C(true), certifi ca-certificates will be used.
|
||||
type: str
|
||||
requirements: [ "redis", "certifi" ]
|
||||
|
||||
notes:
|
||||
- Requires the C(redis) Python package on the remote host. You can
|
||||
install it with pip (C(pip install redis)) or with a package manager.
|
||||
Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
|
||||
'''
|
||||
31
plugins/doc_fragments/rundeck.py
Normal file
31
plugins/doc_fragments/rundeck.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Rundeck API version to be used.
|
||||
- API version must be at least 14.
|
||||
default: 39
|
||||
api_token:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck User API Token.
|
||||
required: true
|
||||
'''
|
||||
40
plugins/filter/unicode_normalize.py
Normal file
40
plugins/filter/unicode_normalize.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from unicodedata import normalize
|
||||
|
||||
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
|
||||
def unicode_normalize(data, form='NFC'):
|
||||
"""Applies normalization to 'unicode' strings.
|
||||
|
||||
Args:
|
||||
data: A unicode string piped into the Jinja filter
|
||||
form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
|
||||
See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
|
||||
|
||||
Returns:
|
||||
A normalized unicode string of the specified 'form'.
|
||||
"""
|
||||
|
||||
if not isinstance(data, text_type):
|
||||
raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
|
||||
|
||||
if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
|
||||
raise AnsibleFilterError("%s is not a valid form" % form)
|
||||
|
||||
return normalize(form, data)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'unicode_normalize': unicode_normalize,
|
||||
}
|
||||
222
plugins/inventory/icinga2.py
Normal file
222
plugins/inventory/icinga2.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
|
||||
# Copyright (c) 2021 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: icinga2
|
||||
short_description: Icinga2 inventory source
|
||||
version_added: 3.7.0
|
||||
author:
|
||||
- Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
|
||||
description:
|
||||
- Get inventory hosts from the Icinga2 API.
|
||||
- "Uses a configuration file as an inventory source, it must end in
|
||||
C(.icinga2.yml) or C(.icinga2.yaml)."
|
||||
options:
|
||||
plugin:
|
||||
description: Name of the plugin.
|
||||
required: true
|
||||
type: string
|
||||
choices: ['community.general.icinga2']
|
||||
url:
|
||||
description: Root URL of Icinga2 API.
|
||||
type: string
|
||||
required: true
|
||||
user:
|
||||
description: Username to query the API.
|
||||
type: string
|
||||
required: true
|
||||
password:
|
||||
description: Password to query the API.
|
||||
type: string
|
||||
required: true
|
||||
host_filter:
|
||||
description: An Icinga2 API valid host filter.
|
||||
type: string
|
||||
required: false
|
||||
validate_certs:
|
||||
description: Enables or disables SSL certificate verification.
|
||||
type: boolean
|
||||
default: true
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# my.icinga2.yml
|
||||
plugin: community.general.icinga2
|
||||
url: http://localhost:5665
|
||||
user: ansible
|
||||
password: secure
|
||||
host_filter: \"linux-servers\" in host.groups
|
||||
validate_certs: false
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Icinga2 as source. '''
|
||||
|
||||
NAME = 'community.general.icinga2'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# from config
|
||||
self.icinga2_url = None
|
||||
self.icinga2_user = None
|
||||
self.icinga2_password = None
|
||||
self.ssl_verify = None
|
||||
self.host_filter = None
|
||||
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('icinga2.yaml', 'icinga2.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
|
||||
return valid
|
||||
|
||||
def _api_connect(self):
|
||||
self.headers = {
|
||||
'User-Agent': "ansible-icinga2-inv",
|
||||
'Accept': "application/json",
|
||||
}
|
||||
api_status_url = self.icinga2_url + "/status"
|
||||
request_args = {
|
||||
'headers': self.headers,
|
||||
'url_username': self.icinga2_user,
|
||||
'url_password': self.icinga2_password,
|
||||
'validate_certs': self.ssl_verify
|
||||
}
|
||||
open_url(api_status_url, **request_args)
|
||||
|
||||
def _post_request(self, request_url, data=None):
|
||||
self.display.vvv("Requested URL: %s" % request_url)
|
||||
request_args = {
|
||||
'headers': self.headers,
|
||||
'url_username': self.icinga2_user,
|
||||
'url_password': self.icinga2_password,
|
||||
'validate_certs': self.ssl_verify
|
||||
}
|
||||
if data is not None:
|
||||
request_args['data'] = json.dumps(data)
|
||||
self.display.vvv("Request Args: %s" % request_args)
|
||||
response = open_url(request_url, **request_args)
|
||||
response_body = response.read()
|
||||
json_data = json.loads(response_body.decode('utf-8'))
|
||||
if 200 <= response.status <= 299:
|
||||
return json_data
|
||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
||||
raise AnsibleParserError(
|
||||
"API returned no data -- Response: %s - %s"
|
||||
% (response.status, json_data['status']))
|
||||
if response.status == 401:
|
||||
raise AnsibleParserError(
|
||||
"API was unable to complete query -- Response: %s - %s"
|
||||
% (response.status, json_data['status']))
|
||||
if response.status == 500:
|
||||
raise AnsibleParserError(
|
||||
"API Response - %s - %s"
|
||||
% (json_data['status'], json_data['errors']))
|
||||
raise AnsibleParserError(
|
||||
"Unexpected data returned - %s - %s"
|
||||
% (json_data['status'], json_data['errors']))
|
||||
|
||||
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
|
||||
query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
|
||||
self.headers['X-HTTP-Method-Override'] = 'GET'
|
||||
data_dict = dict()
|
||||
if hosts:
|
||||
data_dict['hosts'] = hosts
|
||||
if attrs is not None:
|
||||
data_dict['attrs'] = attrs
|
||||
if joins is not None:
|
||||
data_dict['joins'] = joins
|
||||
if host_filter is not None:
|
||||
data_dict['filter'] = host_filter.replace("\\\"", "\"")
|
||||
self.display.vvv(host_filter)
|
||||
host_dict = self._post_request(query_hosts_url, data_dict)
|
||||
return host_dict['results']
|
||||
|
||||
def get_inventory_from_icinga(self):
|
||||
"""Query for all hosts """
|
||||
self.display.vvv("Querying Icinga2 for inventory")
|
||||
query_args = {
|
||||
"attrs": ["address", "state_type", "state", "groups"],
|
||||
}
|
||||
if self.host_filter is not None:
|
||||
query_args['host_filter'] = self.host_filter
|
||||
# Icinga2 API Call
|
||||
results_json = self._query_hosts(**query_args)
|
||||
# Manipulate returned API data to Ansible inventory spec
|
||||
ansible_inv = self._convert_inv(results_json)
|
||||
return ansible_inv
|
||||
|
||||
def _populate(self):
|
||||
groups = self._to_json(self.get_inventory_from_icinga())
|
||||
return groups
|
||||
|
||||
def _to_json(self, in_dict):
|
||||
"""Convert dictionary to JSON"""
|
||||
return json.dumps(in_dict, sort_keys=True, indent=2)
|
||||
|
||||
def _convert_inv(self, json_data):
|
||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
||||
groups_dict = {"_meta": {"hostvars": {}}}
|
||||
for entry in json_data:
|
||||
host_name = entry['name']
|
||||
host_attrs = entry['attrs']
|
||||
if host_attrs['state'] == 0:
|
||||
host_attrs['state'] = 'on'
|
||||
else:
|
||||
host_attrs['state'] = 'off'
|
||||
host_groups = host_attrs['groups']
|
||||
host_addr = host_attrs['address']
|
||||
self.inventory.add_host(host_addr)
|
||||
for group in host_groups:
|
||||
if group not in self.inventory.groups.keys():
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, host_addr)
|
||||
self.inventory.set_variable(host_addr, 'address', host_addr)
|
||||
self.inventory.set_variable(host_addr, 'hostname', host_name)
|
||||
self.inventory.set_variable(host_addr, 'state',
|
||||
host_attrs['state'])
|
||||
self.inventory.set_variable(host_addr, 'state_type',
|
||||
host_attrs['state_type'])
|
||||
return groups_dict
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
|
||||
# Store the options from the YAML file
|
||||
self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
|
||||
self.icinga2_user = self.get_option('user')
|
||||
self.icinga2_password = self.get_option('password')
|
||||
self.ssl_verify = self.get_option('validate_certs')
|
||||
self.host_filter = self.get_option('host_filter')
|
||||
# Not currently enabled
|
||||
# self.cache_key = self.get_cache_key(path)
|
||||
# self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
# Test connection to API
|
||||
self._api_connect()
|
||||
|
||||
# Call our internal helper to populate the dynamic inventory
|
||||
self._populate()
|
||||
@@ -29,8 +29,7 @@ DOCUMENTATION = r'''
|
||||
ip_style:
|
||||
description: Populate hostvars with all information available from the Linode APIv4.
|
||||
type: string
|
||||
default:
|
||||
- plain
|
||||
default: plain
|
||||
choices:
|
||||
- plain
|
||||
- api
|
||||
|
||||
239
plugins/inventory/opennebula.py
Normal file
239
plugins/inventory/opennebula.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: opennebula
|
||||
author:
|
||||
- Kristian Feldsam (@feldsam)
|
||||
short_description: OpenNebula inventory source
|
||||
version_added: "3.8.0"
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from OpenNebula cloud.
|
||||
- Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
|
||||
to set parameter values.
|
||||
- Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'opennebula' plugin.
|
||||
type: string
|
||||
required: true
|
||||
choices: [ community.general.opennebula ]
|
||||
api_url:
|
||||
description:
|
||||
- URL of the OpenNebula RPC server.
|
||||
- It is recommended to use HTTPS so that the username/password are not
|
||||
transferred over the network unencrypted.
|
||||
- If not set then the value of the C(ONE_URL) environment variable is used.
|
||||
env:
|
||||
- name: ONE_URL
|
||||
required: True
|
||||
type: string
|
||||
api_username:
|
||||
description:
|
||||
- Name of the user to login into the OpenNebula RPC server. If not set
|
||||
then the value of the C(ONE_USERNAME) environment variable is used.
|
||||
env:
|
||||
- name: ONE_USERNAME
|
||||
type: string
|
||||
api_password:
|
||||
description:
|
||||
- Password or a token of the user to login into OpenNebula RPC server.
|
||||
- If not set, the value of the C(ONE_PASSWORD) environment variable is used.
|
||||
env:
|
||||
- name: ONE_PASSWORD
|
||||
required: False
|
||||
type: string
|
||||
api_authfile:
|
||||
description:
|
||||
- If both I(api_username) or I(api_password) are not set, then it will try
|
||||
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
|
||||
- Set environment variable C(ONE_AUTH) to override this path.
|
||||
env:
|
||||
- name: ONE_AUTH
|
||||
required: False
|
||||
type: string
|
||||
hostname:
|
||||
description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
|
||||
type: string
|
||||
default: v4_first_ip
|
||||
choices:
|
||||
- v4_first_ip
|
||||
- v6_first_ip
|
||||
- name
|
||||
filter_by_label:
|
||||
description: Only return servers filtered by this label.
|
||||
type: string
|
||||
group_by_labels:
|
||||
description: Create host groups by vm labels
|
||||
type: bool
|
||||
default: True
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# inventory_opennebula.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
|
||||
|
||||
# Pass a label filter to the API
|
||||
plugin: community.general.opennebula
|
||||
api_url: https://opennebula:2633/RPC2
|
||||
filter_by_label: Cache
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyone
|
||||
|
||||
HAS_PYONE = True
|
||||
except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from collections import namedtuple
|
||||
import os
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
NAME = 'community.general.opennebula'
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('opennebula.yaml', 'opennebula.yml')):
|
||||
valid = True
|
||||
return valid
|
||||
|
||||
def _get_connection_info(self):
|
||||
url = self.get_option('api_url')
|
||||
username = self.get_option('api_username')
|
||||
password = self.get_option('api_password')
|
||||
authfile = self.get_option('api_authfile')
|
||||
|
||||
if not username and not password:
|
||||
if authfile is None:
|
||||
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
|
||||
try:
|
||||
with open(authfile, "r") as fp:
|
||||
authstring = fp.read().rstrip()
|
||||
username, password = authstring.split(":")
|
||||
except (OSError, IOError):
|
||||
raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
except Exception:
|
||||
raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
|
||||
auth_params = namedtuple('auth', ('url', 'username', 'password'))
|
||||
|
||||
return auth_params(url=url, username=username, password=password)
|
||||
|
||||
def _get_vm_ipv4(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
return net['IP']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_ipv6(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
if net.get('IP6_GLOBAL'):
|
||||
return net['IP6_GLOBAL']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_pool(self):
|
||||
auth = self._get_connection_info()
|
||||
|
||||
if not (auth.username and auth.password):
|
||||
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
|
||||
else:
|
||||
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
|
||||
# get hosts (VMs)
|
||||
try:
|
||||
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
|
||||
|
||||
return vm_pool
|
||||
|
||||
def _retrieve_servers(self, label_filter=None):
|
||||
vm_pool = self._get_vm_pool()
|
||||
|
||||
result = []
|
||||
|
||||
# iterate over hosts
|
||||
for vm in vm_pool.VM:
|
||||
server = vm.USER_TEMPLATE
|
||||
|
||||
labels = []
|
||||
if vm.USER_TEMPLATE.get('LABELS'):
|
||||
labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
|
||||
labels = ''.join(labels)
|
||||
labels = labels.replace(' ', '_')
|
||||
labels = labels.replace('-', '_')
|
||||
labels = labels.split(',')
|
||||
|
||||
# filter by label
|
||||
if label_filter is not None:
|
||||
if label_filter not in labels:
|
||||
continue
|
||||
|
||||
server['name'] = vm.NAME
|
||||
server['LABELS'] = labels
|
||||
server['v4_first_ip'] = self._get_vm_ipv4(vm)
|
||||
server['v6_first_ip'] = self._get_vm_ipv6(vm)
|
||||
|
||||
result.append(server)
|
||||
|
||||
return result
|
||||
|
||||
def _populate(self):
|
||||
hostname_preference = self.get_option('hostname')
|
||||
group_by_labels = self.get_option('group_by_labels')
|
||||
|
||||
# Add a top group 'one'
|
||||
self.inventory.add_group(group='all')
|
||||
|
||||
filter_by_label = self.get_option('filter_by_label')
|
||||
for server in self._retrieve_servers(filter_by_label):
|
||||
# check for labels
|
||||
if group_by_labels and server['LABELS']:
|
||||
for label in server['LABELS']:
|
||||
self.inventory.add_group(group=label)
|
||||
self.inventory.add_host(host=server['name'], group=label)
|
||||
|
||||
self.inventory.add_host(host=server['name'], group='all')
|
||||
|
||||
for attribute, value in server.items():
|
||||
self.inventory.set_variable(server['name'], attribute, value)
|
||||
|
||||
if hostname_preference != 'name':
|
||||
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
|
||||
|
||||
if server.get('SSH_PORT'):
|
||||
self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_PYONE:
|
||||
raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
self._populate()
|
||||
@@ -36,19 +36,20 @@ options:
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: username
|
||||
required: true
|
||||
password:
|
||||
description: The password associated with the supplied username.
|
||||
description:
|
||||
- The password associated with the supplied username.
|
||||
- Required when I(token) is not provided.
|
||||
env:
|
||||
- name: TSS_PASSWORD
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: password
|
||||
required: true
|
||||
domain:
|
||||
default: ""
|
||||
description:
|
||||
- The domain with which to request the OAuth2 Access Grant.
|
||||
- Optional when I(token) is not provided.
|
||||
- Requires C(python-tss-sdk) version 1.0.0 or greater.
|
||||
env:
|
||||
- name: TSS_DOMAIN
|
||||
@@ -57,6 +58,17 @@ options:
|
||||
key: domain
|
||||
required: false
|
||||
version_added: 3.6.0
|
||||
token:
|
||||
description:
|
||||
- Existing token for Thycotic authorizer.
|
||||
- If provided, I(username) and I(password) are not needed.
|
||||
- Requires C(python-tss-sdk) version 1.0.0 or greater.
|
||||
env:
|
||||
- name: TSS_TOKEN
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: token
|
||||
version_added: 3.7.0
|
||||
api_path_uri:
|
||||
default: /api/v1
|
||||
description: The path to append to the base URL to form a valid REST
|
||||
@@ -83,18 +95,6 @@ _list:
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: "{{ lookup('community.general.tss', 1) }}"
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
@@ -116,10 +116,39 @@ EXAMPLES = r"""
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
username='user.name',
|
||||
password='password',
|
||||
domain='domain'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret_password: >-
|
||||
{{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}"
|
||||
{{
|
||||
((lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
token='thycotic_access_token',
|
||||
) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: the password is {{ secret_password }}
|
||||
@@ -142,12 +171,13 @@ except ImportError:
|
||||
HAS_TSS_SDK = False
|
||||
|
||||
try:
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
|
||||
|
||||
HAS_TSS_AUTHORIZER = True
|
||||
except ImportError:
|
||||
PasswordGrantAuthorizer = None
|
||||
DomainPasswordGrantAuthorizer = None
|
||||
AccessTokenAuthorizer = None
|
||||
HAS_TSS_AUTHORIZER = False
|
||||
|
||||
|
||||
@@ -209,6 +239,11 @@ class TSSClientV1(TSSClient):
|
||||
|
||||
@staticmethod
|
||||
def _get_authorizer(**server_parameters):
|
||||
if server_parameters.get("token"):
|
||||
return AccessTokenAuthorizer(
|
||||
server_parameters["token"],
|
||||
)
|
||||
|
||||
if server_parameters.get("domain"):
|
||||
return DomainPasswordGrantAuthorizer(
|
||||
server_parameters["base_url"],
|
||||
@@ -238,6 +273,7 @@ class LookupModule(LookupBase):
|
||||
username=self.get_option("username"),
|
||||
password=self.get_option("password"),
|
||||
domain=self.get_option("domain"),
|
||||
token=self.get_option("token"),
|
||||
api_path_uri=self.get_option("api_path_uri"),
|
||||
token_path_uri=self.get_option("token_path_uri"),
|
||||
)
|
||||
|
||||
@@ -83,6 +83,9 @@ URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/
|
||||
URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers"
|
||||
URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}"
|
||||
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -1028,7 +1031,7 @@ class KeycloakAPI(object):
|
||||
:param name: Name of the role to fetch.
|
||||
:param realm: Realm in which the role resides; default 'master'.
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1062,7 +1065,7 @@ class KeycloakAPI(object):
|
||||
:param rolerep: A RoleRepresentation of the updated role.
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=rolerep['name'])
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1076,7 +1079,7 @@ class KeycloakAPI(object):
|
||||
:param name: The name of the role.
|
||||
:param realm: The realm in which this role resides, default "master".
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
@@ -1119,7 +1122,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1165,7 +1168,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=rolerep['name'])
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1184,7 +1187,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
@@ -1601,3 +1604,93 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
|
||||
% (mid, alias, realm, str(e)))
|
||||
|
||||
def get_components(self, filter=None, realm='master'):
|
||||
""" Fetch representations for components in a realm
|
||||
:param realm: realm to be queried
|
||||
:param filter: search filter
|
||||
:return: list of representations for components
|
||||
"""
|
||||
comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
|
||||
if filter is not None:
|
||||
comps_url += '?%s' % filter
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain list of components for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
|
||||
def get_component(self, cid, realm='master'):
|
||||
""" Fetch component representation from a realm using its cid.
|
||||
If the component does not exist, None is returned.
|
||||
:param cid: Unique ID of the component to fetch.
|
||||
:param realm: Realm in which the component resides; default 'master'.
|
||||
"""
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def create_component(self, comprep, realm='master'):
|
||||
""" Create an component.
|
||||
:param comprep: Component representation of the component to be created.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
:return: Component representation of the created component
|
||||
"""
|
||||
comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
resp = open_url(comps_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(comprep), validate_certs=self.validate_certs)
|
||||
comp_url = resp.getheader('Location')
|
||||
if comp_url is None:
|
||||
self.module.fail_json(msg='Could not create component in realm %s: %s'
|
||||
% (realm, 'unexpected response'))
|
||||
return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create component in realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
|
||||
def update_component(self, comprep, realm='master'):
|
||||
""" Update an existing component.
|
||||
:param comprep: Component representation of the component to be updated.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
cid = comprep.get('id')
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Cannot update component without id')
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return open_url(comp_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(comprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def delete_component(self, cid, realm='master'):
|
||||
""" Delete an component.
|
||||
:param cid: Unique ID of the component.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
"""
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return open_url(comp_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
@@ -31,6 +31,7 @@ def _env_then_dns_fallback(*args, **kwargs):
|
||||
result = env_fallback(*args, **kwargs)
|
||||
if result == '':
|
||||
raise AnsibleFallbackNotFound
|
||||
return result
|
||||
except AnsibleFallbackNotFound:
|
||||
# If no host was given, we try to guess it from IPA.
|
||||
# The ipa-ca entry is a standard entry that IPA will have set for
|
||||
|
||||
@@ -29,7 +29,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\
|
||||
class RedfishUtils(object):
|
||||
|
||||
def __init__(self, creds, root_uri, timeout, module, resource_id=None,
|
||||
data_modification=False):
|
||||
data_modification=False, strip_etag_quotes=False):
|
||||
self.root_uri = root_uri
|
||||
self.creds = creds
|
||||
self.timeout = timeout
|
||||
@@ -37,6 +37,7 @@ class RedfishUtils(object):
|
||||
self.service_root = '/redfish/v1/'
|
||||
self.resource_id = resource_id
|
||||
self.data_modification = data_modification
|
||||
self.strip_etag_quotes = strip_etag_quotes
|
||||
self._init_session()
|
||||
|
||||
def _auth_params(self, headers):
|
||||
@@ -121,6 +122,8 @@ class RedfishUtils(object):
|
||||
if not etag:
|
||||
etag = r['data'].get('@odata.etag')
|
||||
if etag:
|
||||
if self.strip_etag_quotes:
|
||||
etag = etag.strip('"')
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
@@ -976,6 +979,8 @@ class RedfishUtils(object):
|
||||
payload['Password'] = user.get('account_password')
|
||||
if user.get('account_roleid'):
|
||||
payload['RoleId'] = user.get('account_roleid')
|
||||
if user.get('account_id'):
|
||||
payload['Id'] = user.get('account_id')
|
||||
|
||||
response = self.post_request(self.root_uri + self.accounts_uri, payload)
|
||||
if not response['ret']:
|
||||
@@ -1600,9 +1605,6 @@ class RedfishUtils(object):
|
||||
cur_boot_next = boot.get('BootNext')
|
||||
cur_override_mode = boot.get('BootSourceOverrideMode')
|
||||
|
||||
if not boot_override_mode:
|
||||
boot_override_mode = cur_override_mode
|
||||
|
||||
if override_enabled == 'Disabled':
|
||||
payload = {
|
||||
'Boot': {
|
||||
@@ -1638,16 +1640,18 @@ class RedfishUtils(object):
|
||||
}
|
||||
}
|
||||
else:
|
||||
if cur_enabled == override_enabled and target == bootdevice and cur_override_mode == boot_override_mode:
|
||||
if (cur_enabled == override_enabled and target == bootdevice and
|
||||
(cur_override_mode == boot_override_mode or not boot_override_mode)):
|
||||
# If properties are already set, no changes needed
|
||||
return {'ret': True, 'changed': False}
|
||||
payload = {
|
||||
'Boot': {
|
||||
'BootSourceOverrideEnabled': override_enabled,
|
||||
'BootSourceOverrideMode': boot_override_mode,
|
||||
'BootSourceOverrideTarget': bootdevice
|
||||
}
|
||||
}
|
||||
if boot_override_mode:
|
||||
payload['Boot']['BootSourceOverrideMode'] = boot_override_mode
|
||||
|
||||
response = self.patch_request(self.root_uri + self.systems_uri, payload)
|
||||
if response['ret'] is False:
|
||||
@@ -2757,7 +2761,9 @@ class RedfishUtils(object):
|
||||
if isinstance(set_value, dict):
|
||||
for subprop in payload[property].keys():
|
||||
if subprop not in target_ethernet_current_setting[property]:
|
||||
return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
|
||||
# Not configured already; need to apply the request
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][subprop]
|
||||
sub_cur_value = target_ethernet_current_setting[property][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
@@ -2771,7 +2777,9 @@ class RedfishUtils(object):
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in target_ethernet_current_setting[property][i]:
|
||||
return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
|
||||
# Not configured already; need to apply the request
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][i][subprop]
|
||||
sub_cur_value = target_ethernet_current_setting[property][i][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
|
||||
93
plugins/module_utils/redis.py
Normal file
93
plugins/module_utils/redis.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
REDIS_IMP_ERR = None
|
||||
try:
|
||||
from redis import Redis
|
||||
from redis import __version__ as redis_version
|
||||
HAS_REDIS_PACKAGE = True
|
||||
except ImportError:
|
||||
REDIS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REDIS_PACKAGE = False
|
||||
|
||||
try:
|
||||
import certifi
|
||||
HAS_CERTIFI_PACKAGE = True
|
||||
except ImportError:
|
||||
CERTIFI_IMPORT_ERROR = traceback.format_exc()
|
||||
HAS_CERTIFI_PACKAGE = False
|
||||
|
||||
|
||||
def fail_imports(module):
|
||||
errors = []
|
||||
traceback = []
|
||||
if not HAS_REDIS_PACKAGE:
|
||||
errors.append(missing_required_lib('redis'))
|
||||
traceback.append(REDIS_IMP_ERR)
|
||||
if not HAS_CERTIFI_PACKAGE:
|
||||
errors.append(missing_required_lib('certifi'))
|
||||
traceback.append(CERTIFI_IMPORT_ERROR)
|
||||
if errors:
|
||||
module.fail_json(errors=errors, traceback='\n'.join(traceback))
|
||||
|
||||
|
||||
def redis_auth_argument_spec():
|
||||
return dict(
|
||||
login_host=dict(type='str',
|
||||
default='localhost',),
|
||||
login_user=dict(type='str'),
|
||||
login_password=dict(type='str',
|
||||
no_log=True
|
||||
),
|
||||
login_port=dict(type='int', default=6379),
|
||||
tls=dict(type='bool',
|
||||
default=True),
|
||||
validate_certs=dict(type='bool',
|
||||
default=True
|
||||
),
|
||||
ca_certs=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
class RedisAnsible(object):
|
||||
'''Base class for Redis module'''
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.connection = self._connect()
|
||||
|
||||
def _connect(self):
|
||||
login_host = self.module.params['login_host']
|
||||
login_user = self.module.params['login_user']
|
||||
login_password = self.module.params['login_password']
|
||||
login_port = self.module.params['login_port']
|
||||
tls = self.module.params['tls']
|
||||
validate_certs = 'required' if self.module.params['validate_certs'] else None
|
||||
ca_certs = self.module.params['ca_certs']
|
||||
if tls and ca_certs is None:
|
||||
ca_certs = str(certifi.where())
|
||||
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
|
||||
self.module.fail_json(
|
||||
msg='The option `username` in only supported with redis >= 3.4.0.')
|
||||
params = {'host': login_host,
|
||||
'port': login_port,
|
||||
'password': login_password,
|
||||
'ssl_ca_certs': ca_certs,
|
||||
'ssl_cert_reqs': validate_certs,
|
||||
'ssl': tls}
|
||||
if login_user is not None:
|
||||
params['username'] = login_user
|
||||
try:
|
||||
return Redis(**params)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='{0}'.format(str(e)))
|
||||
return None
|
||||
94
plugins/module_utils/rundeck.py
Normal file
94
plugins/module_utils/rundeck.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def api_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via Rundeck API
|
||||
'''
|
||||
api_argument_spec = url_argument_spec()
|
||||
api_argument_spec.update(dict(
|
||||
url=dict(required=True, type="str"),
|
||||
api_version=dict(type="int", default=39),
|
||||
api_token=dict(required=True, type="str", no_log=True)
|
||||
))
|
||||
|
||||
return api_argument_spec
|
||||
|
||||
|
||||
def api_request(module, endpoint, data=None, method="GET"):
|
||||
"""Manages Rundeck API requests via HTTP(S)
|
||||
|
||||
:arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
|
||||
:arg endpoint: The API endpoint to be used.
|
||||
:kwarg data: The data to be sent (in case of POST/PUT).
|
||||
:kwarg method: "POST", "PUT", etc.
|
||||
|
||||
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
|
||||
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
|
||||
occurred then ``info['body']`` contains the error response data::
|
||||
|
||||
Example::
|
||||
|
||||
data={...}
|
||||
resp, info = fetch_url(module,
|
||||
"http://rundeck.example.org",
|
||||
data=module.jsonify(data),
|
||||
method="POST")
|
||||
status_code = info["status"]
|
||||
body = resp.read()
|
||||
if status_code >= 400 :
|
||||
body = info['body']
|
||||
"""
|
||||
|
||||
response, info = fetch_url(
|
||||
module=module,
|
||||
url="%s/api/%s/%s" % (
|
||||
module.params["url"],
|
||||
module.params["api_version"],
|
||||
endpoint
|
||||
),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": module.params["api_token"]
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 403:
|
||||
module.fail_json(msg="Token authorization failed",
|
||||
execution_info=json.loads(info["body"]))
|
||||
if info["status"] == 409:
|
||||
module.fail_json(msg="Job executions limit reached",
|
||||
execution_info=json.loads(info["body"]))
|
||||
elif info["status"] >= 500:
|
||||
module.fail_json(msg="Rundeck API error",
|
||||
execution_info=json.loads(info["body"]))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
exception=to_native(error),
|
||||
execution_info=content
|
||||
)
|
||||
@@ -23,40 +23,48 @@ options:
|
||||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- The architecture for the container (e.g. "x86_64" or "i686").
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the container (e.g. {"limits.cpu": "2"}).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
- If the container already exists and its "config" value in metadata
|
||||
obtained from
|
||||
GET /1.0/containers/<name>
|
||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
- If the container already exists and its "config" values in metadata
|
||||
obtained from GET /1.0/containers/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
||||
are different, they this module tries to apply the configurations.
|
||||
- The key starts with 'volatile.' are ignored for this comparison.
|
||||
- Not all config values are supported to apply the existing container.
|
||||
Maybe you need to delete and recreate a container.
|
||||
are different, this module tries to apply the configurations.
|
||||
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
||||
type: dict
|
||||
required: false
|
||||
ignore_volatile_options:
|
||||
description:
|
||||
- If set to C(true), options starting with C(volatile.) are ignored. As a result,
|
||||
they are reapplied for each execution.
|
||||
- This default behavior can be changed by setting this option to C(false).
|
||||
- The default value C(true) will be deprecated in community.general 4.0.0,
|
||||
and will change to C(false) in community.general 5.0.0.
|
||||
type: bool
|
||||
default: true
|
||||
required: false
|
||||
version_added: 3.7.0
|
||||
profiles:
|
||||
description:
|
||||
- Profile to be used by the container
|
||||
- Profile to be used by the container.
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the container
|
||||
(e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the container is ephemeral (e.g. true or false).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
@@ -68,7 +76,7 @@ options:
|
||||
"protocol": "lxd",
|
||||
"alias": "ubuntu/xenial/amd64" }).'
|
||||
- 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
|
||||
required: false
|
||||
type: dict
|
||||
state:
|
||||
@@ -144,10 +152,10 @@ options:
|
||||
trust_password:
|
||||
description:
|
||||
- The client trusted password.
|
||||
- You need to set this password on the LXD server before
|
||||
running this module using the following command.
|
||||
lxc config set core.trust_password <some random password>
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
|
||||
- 'You need to set this password on the LXD server before
|
||||
running this module using the following command:
|
||||
C(lxc config set core.trust_password <some random password>).
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
|
||||
- If trust_password is set, this module send a request for
|
||||
authentication before sending any requests.
|
||||
required: false
|
||||
@@ -176,6 +184,7 @@ EXAMPLES = '''
|
||||
- name: Create a started container
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -209,6 +218,7 @@ EXAMPLES = '''
|
||||
- name: Create a started container
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -279,6 +289,7 @@ EXAMPLES = '''
|
||||
- name: Create LXD container
|
||||
community.general.lxd_container:
|
||||
name: new-container-1
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -289,6 +300,7 @@ EXAMPLES = '''
|
||||
- name: Create container on another node
|
||||
community.general.lxd_container:
|
||||
name: new-container-2
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -557,7 +569,7 @@ class LXDContainerManagement(object):
|
||||
def _needs_to_change_container_config(self, key):
|
||||
if key not in self.config:
|
||||
return False
|
||||
if key == 'config':
|
||||
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
@@ -565,6 +577,14 @@ class LXDContainerManagement(object):
|
||||
if old_configs[k] != v:
|
||||
return True
|
||||
return False
|
||||
elif key == 'config': # next default behavior
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items())
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
if old_configs[k] != v:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
old_configs = self.old_container_json['metadata'][key]
|
||||
return self.config[key] != old_configs
|
||||
@@ -606,6 +626,7 @@ class LXDContainerManagement(object):
|
||||
try:
|
||||
if self.trust_password is not None:
|
||||
self.client.authenticate(self.trust_password)
|
||||
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
||||
|
||||
self.old_container_json = self._get_container_json()
|
||||
self.old_state = self._container_json_to_module_state(self.old_container_json)
|
||||
@@ -651,6 +672,10 @@ def main():
|
||||
config=dict(
|
||||
type='dict',
|
||||
),
|
||||
ignore_volatile_options=dict(
|
||||
type='bool',
|
||||
default=True
|
||||
),
|
||||
devices=dict(
|
||||
type='dict',
|
||||
),
|
||||
@@ -703,7 +728,13 @@ def main():
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
# if module.params['ignore_volatile_options'] is None:
|
||||
# module.params['ignore_volatile_options'] = True
|
||||
# module.deprecate(
|
||||
# 'If the keyword "volatile" is used in a playbook in the config section, a
|
||||
# "changed" message will appear with every run, even without a change to the playbook.
|
||||
# This will change in the future.
|
||||
# Please test your scripts by "ignore_volatile_options: false"', version='5.0.0', collection_name='community.general')
|
||||
lxd_manage = LXDContainerManagement(module=module)
|
||||
lxd_manage.run()
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ def main():
|
||||
group = module.params['group']
|
||||
|
||||
if group:
|
||||
groups = [proxmox.get_group(group=group)]
|
||||
groups = [proxmox.get_group(groupid=group)]
|
||||
else:
|
||||
groups = proxmox.get_groups()
|
||||
result['proxmox_groups'] = [group.group for group in groups]
|
||||
|
||||
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_tasks_info
|
||||
short_description: Retrieve information about one or more Proxmox VE tasks
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Retrieve information about one or more Proxmox VE tasks.
|
||||
author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
|
||||
options:
|
||||
node:
|
||||
description:
|
||||
- Node where to get tasks.
|
||||
required: true
|
||||
type: str
|
||||
task:
|
||||
description:
|
||||
- Return specific task.
|
||||
aliases: ['upid', 'name']
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
node: node01
|
||||
register: result
|
||||
|
||||
- name: Retrieve information about specific tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
|
||||
node: node01
|
||||
register: proxmox_tasks
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
proxmox_tasks:
|
||||
description: List of tasks.
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: ID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
node:
|
||||
description: Node name.
|
||||
returned: on success
|
||||
type: str
|
||||
pid:
|
||||
description: PID of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
pstart:
|
||||
description: pastart of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
starttime:
|
||||
description: Starting time of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
type:
|
||||
description: Type of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
upid:
|
||||
description: UPID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
user:
|
||||
description: User that owns the task.
|
||||
returned: on success
|
||||
type: str
|
||||
endtime:
|
||||
description: Endtime of the task.
|
||||
returned: on success, can be absent
|
||||
type: int
|
||||
status:
|
||||
description: Status of the task.
|
||||
returned: on success, can be absent
|
||||
type: str
|
||||
failed:
|
||||
description: If the task failed.
|
||||
returned: when status is defined
|
||||
type: bool
|
||||
msg:
|
||||
description: Short message.
|
||||
returned: on failure
|
||||
type: str
|
||||
sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
|
||||
def get_task(self, upid, node):
|
||||
tasks = self.get_tasks(node)
|
||||
for task in tasks:
|
||||
if task.info['upid'] == upid:
|
||||
return [task]
|
||||
|
||||
def get_tasks(self, node):
|
||||
tasks = self.proxmox_api.nodes(node).tasks.get()
|
||||
return [ProxmoxTask(task) for task in tasks]
|
||||
|
||||
|
||||
class ProxmoxTask:
|
||||
def __init__(self, task):
|
||||
self.info = dict()
|
||||
for k, v in task.items():
|
||||
if k == 'status' and isinstance(v, str):
|
||||
self.info[k] = v
|
||||
if v != 'OK':
|
||||
self.info['failed'] = True
|
||||
else:
|
||||
self.info[k] = v
|
||||
|
||||
|
||||
def proxmox_task_info_argument_spec():
|
||||
return dict(
|
||||
task=dict(type='str', aliases=['upid', 'name'], required=False),
|
||||
node=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
task_info_args = proxmox_task_info_argument_spec()
|
||||
module_args.update(task_info_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret'),
|
||||
('api_user', 'api_password')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib(
|
||||
'proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
proxmox = ProxmoxTaskInfoAnsible(module)
|
||||
upid = module.params['task']
|
||||
node = module.params['node']
|
||||
if upid:
|
||||
tasks = proxmox.get_task(upid=upid, node=node)
|
||||
else:
|
||||
tasks = proxmox.get_tasks(node=node)
|
||||
if tasks is not None:
|
||||
result['proxmox_tasks'] = [task.info for task in tasks]
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
|
||||
upid, node)
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -137,6 +137,11 @@ options:
|
||||
type: bool
|
||||
default: false
|
||||
version_added: '3.3.0'
|
||||
parallelism:
|
||||
description:
|
||||
- Restrict concurrent operations when Terraform applies the plan.
|
||||
type: int
|
||||
version_added: '3.8.0'
|
||||
notes:
|
||||
- To just run a `terraform plan`, use check mode.
|
||||
requirements: [ "terraform" ]
|
||||
@@ -314,11 +319,25 @@ def remove_workspace(bin_path, project_path, workspace):
|
||||
_workspace_cmd(bin_path, project_path, 'delete', workspace)
|
||||
|
||||
|
||||
def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
|
||||
def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None):
|
||||
if plan_path is None:
|
||||
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
|
||||
|
||||
plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
|
||||
local_command = command.copy()
|
||||
|
||||
plan_command = [command[0], 'plan']
|
||||
|
||||
if state == "planned":
|
||||
for c in local_command[1:]:
|
||||
plan_command.append(c)
|
||||
|
||||
if state == "present":
|
||||
for a in apply_args:
|
||||
local_command.remove(a)
|
||||
for c in local_command[1:]:
|
||||
plan_command.append(c)
|
||||
|
||||
plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
|
||||
|
||||
for t in targets:
|
||||
plan_command.extend(['-target', t])
|
||||
@@ -363,6 +382,7 @@ def main():
|
||||
init_reconfigure=dict(type='bool', default=False),
|
||||
overwrite_init=dict(type='bool', default=True),
|
||||
check_destroy=dict(type='bool', default=False),
|
||||
parallelism=dict(type='int'),
|
||||
),
|
||||
required_if=[('state', 'planned', ['plan_file'])],
|
||||
supports_check_mode=True,
|
||||
@@ -415,6 +435,9 @@ def main():
|
||||
elif state == 'absent':
|
||||
command.extend(DESTROY_ARGS)
|
||||
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
@@ -452,7 +475,7 @@ def main():
|
||||
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
|
||||
else:
|
||||
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
|
||||
module.params.get('targets'), state, plan_file)
|
||||
module.params.get('targets'), state, APPLY_ARGS, plan_file)
|
||||
if state == 'present' and check_destroy and '- destroy' in out:
|
||||
module.fail_json(msg="Aborting command because it would destroy some resources. "
|
||||
"Consider switching the 'check_destroy' to false to suppress this error")
|
||||
|
||||
@@ -306,7 +306,7 @@ def rename_image(module, client, image, new_name):
|
||||
|
||||
tmp_image = get_image_by_name(module, client, new_name)
|
||||
if tmp_image:
|
||||
module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
|
||||
module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID))
|
||||
|
||||
if not module.check_mode:
|
||||
client.image.rename(image.ID, new_name)
|
||||
|
||||
@@ -84,7 +84,7 @@ options:
|
||||
description:
|
||||
- the address to advertise that the service will be listening on.
|
||||
This value will be passed as the I(address) parameter to Consul's
|
||||
U(/v1/agent/service/register) API method, so refer to the Consul API
|
||||
C(/v1/agent/service/register) API method, so refer to the Consul API
|
||||
documentation for further details.
|
||||
tags:
|
||||
type: list
|
||||
|
||||
249
plugins/modules/database/misc/redis_data.py
Normal file
249
plugins/modules/database/misc/redis_data.py
Normal file
@@ -0,0 +1,249 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redis_data
|
||||
short_description: Set key value pairs in Redis
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- Set key value pairs in Redis database.
|
||||
author: "Andreas Botzner (@paginabianca)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- Database key.
|
||||
required: true
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- Value that key should be set to.
|
||||
required: false
|
||||
type: str
|
||||
expiration:
|
||||
description:
|
||||
- Expiration time in milliseconds.
|
||||
Setting this flag will always result in a change in the database.
|
||||
required: false
|
||||
type: int
|
||||
non_existing:
|
||||
description:
|
||||
- Only set key if it does not already exist.
|
||||
required: false
|
||||
type: bool
|
||||
existing:
|
||||
description:
|
||||
- Only set key if it already exists.
|
||||
required: false
|
||||
type: bool
|
||||
keep_ttl:
|
||||
description:
|
||||
- Retain the time to live associated with the key.
|
||||
required: false
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- State of the key.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis.documentation
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set key foo=bar on localhost with no username
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
state: present
|
||||
|
||||
- name: Set key foo=bar if non existing with expiration of 30s
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
non_existing: true
|
||||
expiration: 30000
|
||||
state: present
|
||||
|
||||
- name: Set key foo=bar if existing and keep current TTL
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
existing: true
|
||||
keep_ttl: true
|
||||
|
||||
- name: Set key foo=bar on redishost with custom ca-cert file
|
||||
community.general.redis_data:
|
||||
login_host: redishost
|
||||
login_password: supersecret
|
||||
login_user: someuser
|
||||
validate_certs: true
|
||||
ssl_ca_certs: /path/to/ca/certs
|
||||
key: foo
|
||||
value: bar
|
||||
|
||||
- name: Delete key foo on localhost with no username
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
old_value:
|
||||
description: Value of key before setting.
|
||||
returned: on_success if state is C(present) and key exists in database.
|
||||
type: str
|
||||
sample: 'old_value_of_key'
|
||||
value:
|
||||
description: Value key was set to.
|
||||
returned: on success if state is C(present).
|
||||
type: str
|
||||
sample: 'new_value_of_key'
|
||||
msg:
|
||||
description: A short message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Set key: foo to bar'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||
|
||||
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec()
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='str', required=False),
|
||||
expiration=dict(type='int', required=False),
|
||||
non_existing=dict(type='bool', required=False),
|
||||
existing=dict(type='bool', required=False),
|
||||
keep_ttl=dict(type='bool', required=False),
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'absent']),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
required_if=[('state', 'present', ('value',))],
|
||||
mutually_exclusive=[['non_existing', 'existing'],
|
||||
['keep_ttl', 'expiration']],)
|
||||
fail_imports(module)
|
||||
|
||||
redis = RedisAnsible(module)
|
||||
|
||||
key = module.params['key']
|
||||
value = module.params['value']
|
||||
px = module.params['expiration']
|
||||
nx = module.params['non_existing']
|
||||
xx = module.params['existing']
|
||||
keepttl = module.params['keep_ttl']
|
||||
state = module.params['state']
|
||||
set_args = {'name': key, 'value': value, 'px': px,
|
||||
'nx': nx, 'xx': xx, 'keepttl': keepttl}
|
||||
|
||||
result = {'changed': False}
|
||||
|
||||
old_value = None
|
||||
try:
|
||||
old_value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
if state == 'absent':
|
||||
if module.check_mode:
|
||||
if old_value is None:
|
||||
msg = 'Key: {0} not present'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
msg = 'Deleted key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
try:
|
||||
ret = redis.connection.delete(key)
|
||||
if ret == 0:
|
||||
msg = 'Key: {0} not present'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
msg = 'Deleted key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to delete key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
old_value = None
|
||||
try:
|
||||
old_value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
result['old_value'] = old_value
|
||||
if old_value == value and keepttl is not False and px is None:
|
||||
msg = 'Key {0} already has desired value'.format(key)
|
||||
result['msg'] = msg
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
if module.check_mode:
|
||||
result['msg'] = 'Set key: {0}'.format(key)
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
try:
|
||||
ret = redis.connection.set(**set_args)
|
||||
if ret is None:
|
||||
if nx:
|
||||
msg = 'Could not set key: {0}. Key already present.'.format(
|
||||
key)
|
||||
else:
|
||||
msg = 'Could not set key: {0}. Key not present.'.format(key)
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
msg = 'Set key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
result['changed'] = True
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
111
plugins/modules/database/misc/redis_data_info.py
Normal file
111
plugins/modules/database/misc/redis_data_info.py
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redis_data_info
|
||||
short_description: Get value of key in Redis database
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- Get value of keys in Redis database.
|
||||
author: "Andreas Botzner (@paginabianca)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- Database key.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get key foo=bar from loalhost with no username
|
||||
community.general.redis_data_info:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
|
||||
- name: Get key foo=bar on redishost with custom ca-cert file
|
||||
community.general.redis_data_info:
|
||||
login_host: redishost
|
||||
login_password: supersecret
|
||||
login_user: somuser
|
||||
validate_certs: true
|
||||
ssl_ca_certs: /path/to/ca/certs
|
||||
key: foo
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description: If they key exists in the database.
|
||||
returned: on success
|
||||
type: bool
|
||||
value:
|
||||
description: Value key was set to.
|
||||
returned: if existing
|
||||
type: str
|
||||
sample: 'value_of_some_key'
|
||||
msg:
|
||||
description: A short message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Got key: foo with value: bar'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||
|
||||
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec()
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
fail_imports(module)
|
||||
|
||||
redis = RedisAnsible(module)
|
||||
|
||||
key = module.params['key']
|
||||
result = {'changed': False}
|
||||
|
||||
value = None
|
||||
try:
|
||||
value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key "{0}" with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
if value is None:
|
||||
msg = 'Key "{0}" does not exist in database'.format(key)
|
||||
result['exists'] = False
|
||||
else:
|
||||
msg = 'Got key "{0}"'.format(key)
|
||||
result['value'] = value
|
||||
result['exists'] = True
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -158,7 +158,7 @@ def _run_xattr(module, cmd, check_rc=True):
|
||||
if line.startswith('#') or line == '':
|
||||
pass
|
||||
elif '=' in line:
|
||||
(key, val) = line.split('=')
|
||||
(key, val) = line.split('=', 1)
|
||||
result[key] = val.strip('"')
|
||||
else:
|
||||
result[line] = ''
|
||||
|
||||
@@ -72,6 +72,12 @@ options:
|
||||
aliases: ["searchtimelimit"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipaselinuxusermaporder:
|
||||
description: The SELinux user map order (order in increasing priority of SELinux users).
|
||||
aliases: ["selinuxusermaporder"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '3.7.0'
|
||||
ipauserauthtype:
|
||||
description: The authentication type to use by default.
|
||||
aliases: ["userauthtype"]
|
||||
@@ -181,6 +187,18 @@ EXAMPLES = r'''
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the SELinux user map order is set
|
||||
community.general.ipa_config:
|
||||
ipaselinuxusermaporder:
|
||||
- "guest_u:s0"
|
||||
- "xguest_u:s0"
|
||||
- "user_u:s0"
|
||||
- "staff_u:s0-s0:c0.c1023"
|
||||
- "unconfined_u:s0-s0:c0.c1023"
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -213,8 +231,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
ipagroupsearchfields=None, ipahomesrootdir=None,
|
||||
ipakrbauthzdata=None, ipamaxusernamelength=None,
|
||||
ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
|
||||
ipasearchtimelimit=None, ipauserauthtype=None,
|
||||
ipausersearchfields=None):
|
||||
ipasearchtimelimit=None, ipaselinuxusermaporder=None,
|
||||
ipauserauthtype=None, ipausersearchfields=None):
|
||||
config = {}
|
||||
if ipaconfigstring is not None:
|
||||
config['ipaconfigstring'] = ipaconfigstring
|
||||
@@ -238,6 +256,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
config['ipasearchrecordslimit'] = str(ipasearchrecordslimit)
|
||||
if ipasearchtimelimit is not None:
|
||||
config['ipasearchtimelimit'] = str(ipasearchtimelimit)
|
||||
if ipaselinuxusermaporder is not None:
|
||||
config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder)
|
||||
if ipauserauthtype is not None:
|
||||
config['ipauserauthtype'] = ipauserauthtype
|
||||
if ipausersearchfields is not None:
|
||||
@@ -263,6 +283,7 @@ def ensure(module, client):
|
||||
ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'),
|
||||
ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'),
|
||||
ipasearchtimelimit=module.params.get('ipasearchtimelimit'),
|
||||
ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'),
|
||||
ipauserauthtype=module.params.get('ipauserauthtype'),
|
||||
ipausersearchfields=module.params.get('ipausersearchfields'),
|
||||
)
|
||||
@@ -304,6 +325,8 @@ def main():
|
||||
ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']),
|
||||
ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']),
|
||||
ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']),
|
||||
ipaselinuxusermaporder=dict(type='list', elements='str',
|
||||
aliases=['selinuxusermaporder']),
|
||||
ipauserauthtype=dict(type='list', elements='str',
|
||||
aliases=['userauthtype'],
|
||||
choices=["password", "radius", "otp", "pkinit",
|
||||
|
||||
@@ -195,7 +195,6 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
:param kc: Keycloak API access.
|
||||
:param config: Representation of the authentication flow including it's executions.
|
||||
:param realm: Realm
|
||||
:return: True if executions have been modified. False otherwise.
|
||||
:return: tuple (changed, dict(before, after)
|
||||
WHERE
|
||||
bool changed indicates if changes have been made
|
||||
@@ -235,10 +234,14 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
elif new_exec["providerId"] is not None:
|
||||
kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
elif new_exec["displayName"] is not None:
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
if exec_found:
|
||||
changed = True
|
||||
|
||||
@@ -295,6 +295,20 @@ EXAMPLES = '''
|
||||
clientAuthMethod: client_secret_post
|
||||
clientId: my-client
|
||||
clientSecret: secret
|
||||
syncMode: FORCE
|
||||
mappers:
|
||||
- name: first_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: first_name
|
||||
user.attribute: first_name
|
||||
syncMode: INHERIT
|
||||
- name: last_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: last_name
|
||||
user.attribute: last_name
|
||||
syncMode: INHERIT
|
||||
|
||||
- name: Create SAML identity provider, authentication with credentials
|
||||
community.general.keycloak_identity_provider:
|
||||
@@ -313,6 +327,14 @@ EXAMPLES = '''
|
||||
singleSignOnServiceUrl: https://idp.example.com/login
|
||||
wantAuthnRequestsSigned: true
|
||||
wantAssertionsSigned: true
|
||||
mappers:
|
||||
- name: roles
|
||||
identityProviderMapper: saml-user-attribute-idp-mapper
|
||||
config:
|
||||
user.attribute: roles
|
||||
attribute.friendly.name: User Roles
|
||||
attribute.name: roles
|
||||
syncMode: INHERIT
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -400,15 +422,15 @@ end_state:
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize(idp):
|
||||
result = idp.copy()
|
||||
if 'config' in result:
|
||||
result['config'] = sanitize(result['config'])
|
||||
if 'clientSecret' in result:
|
||||
result['clientSecret'] = '**********'
|
||||
return result
|
||||
idpcopy = deepcopy(idp)
|
||||
if 'config' in idpcopy:
|
||||
if 'clientSecret' in idpcopy['config']:
|
||||
idpcopy['clientSecret'] = '**********'
|
||||
return idpcopy
|
||||
|
||||
|
||||
def get_identity_provider_with_mappers(kc, alias, realm):
|
||||
@@ -493,18 +515,29 @@ def main():
|
||||
changeset[camel(param)] = new_param_value
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
changeset['mappers'] = before_idp.get('mappers', list())
|
||||
if module.params.get('mappers') is not None:
|
||||
for new_mapper in module.params.get('mappers'):
|
||||
old_mapper = next((x for x in changeset['mappers'] if x['name'] == new_mapper['name']), None)
|
||||
new_mapper = dict((k, v) for k, v in new_mapper.items() if new_mapper[k] is not None)
|
||||
if old_mapper is not None:
|
||||
old_mapper.update(new_mapper)
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
|
||||
if before_idp == dict():
|
||||
old_mapper = dict()
|
||||
elif change.get('id') is not None:
|
||||
old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm)
|
||||
if old_mapper is None:
|
||||
old_mapper = dict()
|
||||
else:
|
||||
found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']]
|
||||
if len(found) == 1:
|
||||
old_mapper = found[0]
|
||||
else:
|
||||
old_mapper = dict()
|
||||
new_mapper = old_mapper.copy()
|
||||
new_mapper.update(change)
|
||||
if new_mapper != old_mapper:
|
||||
if changeset.get('mappers') is None:
|
||||
changeset['mappers'] = list()
|
||||
changeset['mappers'].append(new_mapper)
|
||||
# remove mappers if not present in module params
|
||||
changeset['mappers'] = [x for x in changeset['mappers']
|
||||
if [y for y in module.params.get('mappers', []) if y['name'] == x['name']] != []]
|
||||
|
||||
# prepare the new representation
|
||||
updated_idp = before_idp.copy()
|
||||
@@ -538,6 +571,8 @@ def main():
|
||||
mappers = updated_idp.pop('mappers', [])
|
||||
kc.create_identity_provider(updated_idp, realm)
|
||||
for mapper in mappers:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
after_idp = get_identity_provider_with_mappers(kc, alias, realm)
|
||||
|
||||
@@ -572,6 +607,8 @@ def main():
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_identity_provider_mapper(mapper, alias, realm)
|
||||
else:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
for mapper in [x for x in before_idp['mappers']
|
||||
if [y for y in updated_mappers if y["name"] == x['name']] == []]:
|
||||
|
||||
979
plugins/modules/identity/keycloak/keycloak_user_federation.py
Normal file
979
plugins/modules/identity/keycloak/keycloak_user_federation.py
Normal file
@@ -0,0 +1,979 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_user_federation
|
||||
|
||||
short_description: Allows administration of Keycloak user federations via Keycloak API
|
||||
|
||||
version_added: 3.7.0
|
||||
|
||||
description:
|
||||
- This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API.
|
||||
It requires access to the REST API via OpenID Connect; the user connecting and the client being
|
||||
used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate client definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
|
||||
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the user federation.
|
||||
- On C(present), the user federation will be created if it does not yet exist, or updated with
|
||||
the parameters you provide.
|
||||
- On C(absent), the user federation will be removed if it exists.
|
||||
default: 'present'
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
realm:
|
||||
description:
|
||||
- The Keycloak realm under which this user federation resides.
|
||||
default: 'master'
|
||||
type: str
|
||||
|
||||
id:
|
||||
description:
|
||||
- The unique ID for this user federation. If left empty, the user federation will be searched
|
||||
by its I(name).
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Display name of provider when linked in admin console.
|
||||
type: str
|
||||
|
||||
provider_id:
|
||||
description:
|
||||
- Provider for this user federation.
|
||||
aliases:
|
||||
- providerId
|
||||
type: str
|
||||
choices:
|
||||
- ldap
|
||||
- kerberos
|
||||
|
||||
provider_type:
|
||||
description:
|
||||
- Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)).
|
||||
aliases:
|
||||
- providerType
|
||||
default: org.keycloak.storage.UserStorageProvider
|
||||
type: str
|
||||
|
||||
parent_id:
|
||||
description:
|
||||
- Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank.
|
||||
aliases:
|
||||
- parentId
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the provider; the contents differ depending on
|
||||
the value of I(provider_id). Examples are given below for C(ldap) and C(kerberos). It is easiest
|
||||
to obtain valid config values by dumping an already-existing user federation configuration
|
||||
through check-mode in the I(existing) field.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Enable/disable this user federation.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
priority:
|
||||
description:
|
||||
- Priority of provider when doing a user lookup. Lowest first.
|
||||
default: 0
|
||||
type: int
|
||||
|
||||
importEnabled:
|
||||
description:
|
||||
- If C(true), LDAP users will be imported into Keycloak DB and synced by the configured
|
||||
sync policies.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
editMode:
|
||||
description:
|
||||
- C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP
|
||||
on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP.
|
||||
type: str
|
||||
choices:
|
||||
- READ_ONLY
|
||||
- WRITABLE
|
||||
- UNSYNCED
|
||||
|
||||
syncRegistrations:
|
||||
description:
|
||||
- Should newly created users be created within LDAP store? Priority effects which
|
||||
provider is chosen to sync the new user.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
vendor:
|
||||
description:
|
||||
- LDAP vendor (provider).
|
||||
type: str
|
||||
|
||||
usernameLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server
|
||||
vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn).
|
||||
The attribute should be filled for all LDAP user records you want to import from
|
||||
LDAP to Keycloak.
|
||||
type: str
|
||||
|
||||
rdnLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN.
|
||||
Usually it's the same as Username LDAP attribute, however it is not required. For
|
||||
example for Active directory, it is common to use C(cn) as RDN attribute when
|
||||
username attribute might be C(sAMAccountName).
|
||||
type: str
|
||||
|
||||
uuidLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is used as unique object identifier (UUID) for objects
|
||||
in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different.
|
||||
For example for Active directory it should be C(objectGUID). If your LDAP server does
|
||||
not support the notion of UUID, you can use any other attribute that is supposed to
|
||||
be unique among LDAP users in tree.
|
||||
type: str
|
||||
|
||||
userObjectClasses:
|
||||
description:
|
||||
- All values of LDAP objectClass attribute for users in LDAP divided by comma.
|
||||
For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users
|
||||
will be written to LDAP with all those object classes and existing LDAP user records
|
||||
are found just if they contain all those object classes.
|
||||
type: str
|
||||
|
||||
connectionUrl:
|
||||
description:
|
||||
- Connection URL to your LDAP server.
|
||||
type: str
|
||||
|
||||
usersDn:
|
||||
description:
|
||||
- Full DN of LDAP tree where your users are. This DN is the parent of LDAP users.
|
||||
type: str
|
||||
|
||||
customUserSearchFilter:
|
||||
description:
|
||||
- Additional LDAP Filter for filtering searched users. Leave this empty if you don't
|
||||
need additional filter.
|
||||
type: str
|
||||
|
||||
searchScope:
|
||||
description:
|
||||
- For one level, the search applies only for users in the DNs specified by User DNs.
|
||||
For subtree, the search applies to the whole subtree. See LDAP documentation for
|
||||
more details
|
||||
default: '1'
|
||||
type: str
|
||||
choices:
|
||||
- '1'
|
||||
- '2'
|
||||
|
||||
authType:
|
||||
description:
|
||||
- Type of the Authentication method used during LDAP Bind operation. It is used in
|
||||
most of the requests sent to the LDAP server.
|
||||
default: 'none'
|
||||
type: str
|
||||
choices:
|
||||
- none
|
||||
- simple
|
||||
|
||||
bindDn:
|
||||
description:
|
||||
- DN of LDAP user which will be used by Keycloak to access LDAP server.
|
||||
type: str
|
||||
|
||||
bindCredential:
|
||||
description:
|
||||
- Password of LDAP admin.
|
||||
type: str
|
||||
|
||||
startTls:
|
||||
description:
|
||||
- Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
usePasswordModifyExtendedOp:
|
||||
description:
|
||||
- Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify
|
||||
extended operation usually requires that LDAP user already has password in the LDAP
|
||||
server. So when this is used with 'Sync Registrations', it can be good to add also
|
||||
'Hardcoded LDAP attribute mapper' with randomly generated initial password.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
validatePasswordPolicy:
|
||||
description:
|
||||
- Determines if Keycloak should validate the password with the realm password policy
|
||||
before updating it.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
trustEmail:
|
||||
description:
|
||||
- If enabled, email provided by this provider is not verified even if verification is
|
||||
enabled for the realm.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
useTruststoreSpi:
|
||||
description:
|
||||
- Specifies whether LDAP connection will use the truststore SPI with the truststore
|
||||
configured in standalone.xml/domain.xml. C(Always) means that it will always use it.
|
||||
C(Never) means that it will not use it. C(Only for ldaps) means that it will use if
|
||||
your connection URL use ldaps. Note even if standalone.xml/domain.xml is not
|
||||
configured, the default Java cacerts or certificate specified by
|
||||
C(javax.net.ssl.trustStore) property will be used.
|
||||
default: ldapsOnly
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- ldapsOnly
|
||||
- never
|
||||
|
||||
connectionTimeout:
|
||||
description:
|
||||
- LDAP Connection Timeout in milliseconds.
|
||||
type: int
|
||||
|
||||
readTimeout:
|
||||
description:
|
||||
- LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations.
|
||||
type: int
|
||||
|
||||
pagination:
|
||||
description:
|
||||
- Does the LDAP server support pagination.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
connectionPooling:
|
||||
description:
|
||||
- Determines if Keycloak should use connection pooling for accessing LDAP server.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
connectionPoolingAuthentication:
|
||||
description:
|
||||
- A list of space-separated authentication types of connections that may be pooled.
|
||||
type: str
|
||||
choices:
|
||||
- none
|
||||
- simple
|
||||
- DIGEST-MD5
|
||||
|
||||
connectionPoolingDebug:
|
||||
description:
|
||||
- A string that indicates the level of debug output to produce. Example valid values are
|
||||
C(fine) (trace connection creation and removal) and C(all) (all debugging information).
|
||||
type: str
|
||||
|
||||
connectionPoolingInitSize:
|
||||
description:
|
||||
- The number of connections per connection identity to create when initially creating a
|
||||
connection for the identity.
|
||||
type: int
|
||||
|
||||
connectionPoolingMaxSize:
|
||||
description:
|
||||
- The maximum number of connections per connection identity that can be maintained
|
||||
concurrently.
|
||||
type: int
|
||||
|
||||
connectionPoolingPrefSize:
|
||||
description:
|
||||
- The preferred number of connections per connection identity that should be maintained
|
||||
concurrently.
|
||||
type: int
|
||||
|
||||
connectionPoolingProtocol:
|
||||
description:
|
||||
- A list of space-separated protocol types of connections that may be pooled.
|
||||
Valid types are C(plain) and C(ssl).
|
||||
type: str
|
||||
|
||||
connectionPoolingTimeout:
|
||||
description:
|
||||
- The number of milliseconds that an idle connection may remain in the pool without
|
||||
being closed and removed from the pool.
|
||||
type: int
|
||||
|
||||
allowKerberosAuthentication:
|
||||
description:
|
||||
- Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data
|
||||
about authenticated users will be provisioned from this LDAP server.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kerberosRealm:
|
||||
description:
|
||||
- Name of kerberos realm.
|
||||
type: str
|
||||
|
||||
serverPrincipal:
|
||||
description:
|
||||
- Full name of server principal for HTTP service including server and domain name. For
|
||||
example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the
|
||||
KeyTab file.
|
||||
type: str
|
||||
|
||||
keyTab:
|
||||
description:
|
||||
- Location of Kerberos KeyTab file containing the credentials of server principal. For
|
||||
example C(/etc/krb5.keytab).
|
||||
type: str
|
||||
|
||||
debug:
|
||||
description:
|
||||
- Enable/disable debug logging to standard output for Krb5LoginModule.
|
||||
type: bool
|
||||
|
||||
useKerberosForPasswordAuthentication:
|
||||
description:
|
||||
- Use Kerberos login module for authenticate username/password against Kerberos server
|
||||
instead of authenticating against LDAP server with Directory Service API.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
allowPasswordAuthentication:
|
||||
description:
|
||||
- Enable/disable possibility of username/password authentication against Kerberos database.
|
||||
type: bool
|
||||
|
||||
batchSizeForSync:
|
||||
description:
|
||||
- Count of LDAP users to be imported from LDAP to Keycloak within a single transaction.
|
||||
default: 1000
|
||||
type: int
|
||||
|
||||
fullSyncPeriod:
|
||||
description:
|
||||
- Period for full synchronization in seconds.
|
||||
default: -1
|
||||
type: int
|
||||
|
||||
changedSyncPeriod:
|
||||
description:
|
||||
- Period for synchronization of changed or newly created LDAP users in seconds.
|
||||
default: -1
|
||||
type: int
|
||||
|
||||
updateProfileFirstLogin:
|
||||
description:
|
||||
- Update profile on first login.
|
||||
type: bool
|
||||
|
||||
cachePolicy:
|
||||
description:
|
||||
- Cache Policy for this storage provider.
|
||||
type: str
|
||||
default: 'DEFAULT'
|
||||
choices:
|
||||
- DEFAULT
|
||||
- EVICT_DAILY
|
||||
- EVICT_WEEKLY
|
||||
- MAX_LIFESPAN
|
||||
- NO_CACHE
|
||||
|
||||
evictionDay:
|
||||
description:
|
||||
- Day of the week the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
evictionHour:
|
||||
description:
|
||||
- Hour of day the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
evictionMinute:
|
||||
description:
|
||||
- Minute of day the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
maxLifespan:
|
||||
description:
|
||||
- Max lifespan of cache entry in milliseconds.
|
||||
type: int
|
||||
|
||||
mappers:
|
||||
description:
|
||||
- A list of dicts defining mappers associated with this Identity Provider.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
id:
|
||||
description:
|
||||
- Unique ID of this mapper.
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Name of the mapper. If no ID is given, the mapper will be searched by name.
|
||||
type: str
|
||||
|
||||
parentId:
|
||||
description:
|
||||
- Unique ID for the parent of this mapper. ID of the user federation will automatically
|
||||
be used if left blank.
|
||||
type: str
|
||||
|
||||
providerId:
|
||||
description:
|
||||
- The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)).
|
||||
type: str
|
||||
|
||||
providerType:
|
||||
description:
|
||||
- Component type for this mapper (only supported value is C(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)).
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the mapper; the contents differ
|
||||
depending on the value of I(identityProviderMapper).
|
||||
type: dict
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
|
||||
author:
|
||||
- Laurent Paumier (@laurpaum)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create LDAP user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-ldap
|
||||
state: present
|
||||
provider_id: ldap
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
priority: 0
|
||||
enabled: true
|
||||
cachePolicy: DEFAULT
|
||||
batchSizeForSync: 1000
|
||||
editMode: READ_ONLY
|
||||
importEnabled: true
|
||||
syncRegistrations: false
|
||||
vendor: other
|
||||
usernameLDAPAttribute: uid
|
||||
rdnLDAPAttribute: uid
|
||||
uuidLDAPAttribute: entryUUID
|
||||
userObjectClasses: inetOrgPerson, organizationalPerson
|
||||
connectionUrl: ldaps://ldap.example.com:636
|
||||
usersDn: ou=Users,dc=example,dc=com
|
||||
authType: simple
|
||||
bindDn: cn=directory reader
|
||||
bindCredential: password
|
||||
searchScope: 1
|
||||
validatePasswordPolicy: false
|
||||
trustEmail: false
|
||||
useTruststoreSpi: ldapsOnly
|
||||
connectionPooling: true
|
||||
pagination: true
|
||||
allowKerberosAuthentication: false
|
||||
debug: false
|
||||
useKerberosForPasswordAuthentication: false
|
||||
mappers:
|
||||
- name: "full name"
|
||||
providerId: "full-name-ldap-mapper"
|
||||
providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
config:
|
||||
ldap.full.name.attribute: cn
|
||||
read.only: true
|
||||
write.only: false
|
||||
|
||||
- name: Create Kerberos user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-kerberos
|
||||
state: present
|
||||
provider_id: kerberos
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
priority: 0
|
||||
enabled: true
|
||||
cachePolicy: DEFAULT
|
||||
kerberosRealm: EXAMPLE.COM
|
||||
serverPrincipal: HTTP/host.example.com@EXAMPLE.COM
|
||||
keyTab: keytab
|
||||
allowPasswordAuthentication: false
|
||||
updateProfileFirstLogin: false
|
||||
|
||||
- name: Delete user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-federation
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
|
||||
|
||||
proposed:
|
||||
description: Representation of proposed changes to user federation.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowKerberosAuthentication": "false",
|
||||
"authType": "simple",
|
||||
"batchSizeForSync": "1000",
|
||||
"bindCredential": "**********",
|
||||
"bindDn": "cn=directory reader",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"connectionPooling": "true",
|
||||
"connectionUrl": "ldaps://ldap.example.com:636",
|
||||
"debug": "false",
|
||||
"editMode": "READ_ONLY",
|
||||
"enabled": "true",
|
||||
"importEnabled": "true",
|
||||
"pagination": "true",
|
||||
"priority": "0",
|
||||
"rdnLDAPAttribute": "uid",
|
||||
"searchScope": "1",
|
||||
"syncRegistrations": "false",
|
||||
"trustEmail": "false",
|
||||
"useKerberosForPasswordAuthentication": "false",
|
||||
"useTruststoreSpi": "ldapsOnly",
|
||||
"userObjectClasses": "inetOrgPerson, organizationalPerson",
|
||||
"usernameLDAPAttribute": "uid",
|
||||
"usersDn": "ou=Users,dc=example,dc=com",
|
||||
"uuidLDAPAttribute": "entryUUID",
|
||||
"validatePasswordPolicy": "false",
|
||||
"vendor": "other"
|
||||
},
|
||||
"name": "ldap",
|
||||
"providerId": "ldap",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
existing:
|
||||
description: Representation of existing user federation.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowKerberosAuthentication": "false",
|
||||
"authType": "simple",
|
||||
"batchSizeForSync": "1000",
|
||||
"bindCredential": "**********",
|
||||
"bindDn": "cn=directory reader",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"changedSyncPeriod": "-1",
|
||||
"connectionPooling": "true",
|
||||
"connectionUrl": "ldaps://ldap.example.com:636",
|
||||
"debug": "false",
|
||||
"editMode": "READ_ONLY",
|
||||
"enabled": "true",
|
||||
"fullSyncPeriod": "-1",
|
||||
"importEnabled": "true",
|
||||
"pagination": "true",
|
||||
"priority": "0",
|
||||
"rdnLDAPAttribute": "uid",
|
||||
"searchScope": "1",
|
||||
"syncRegistrations": "false",
|
||||
"trustEmail": "false",
|
||||
"useKerberosForPasswordAuthentication": "false",
|
||||
"useTruststoreSpi": "ldapsOnly",
|
||||
"userObjectClasses": "inetOrgPerson, organizationalPerson",
|
||||
"usernameLDAPAttribute": "uid",
|
||||
"usersDn": "ou=Users,dc=example,dc=com",
|
||||
"uuidLDAPAttribute": "entryUUID",
|
||||
"validatePasswordPolicy": "false",
|
||||
"vendor": "other"
|
||||
},
|
||||
"id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
|
||||
"mappers": [
|
||||
{
|
||||
"config": {
|
||||
"always.read.value.from.ldap": "false",
|
||||
"is.mandatory.in.ldap": "false",
|
||||
"ldap.attribute": "mail",
|
||||
"read.only": "true",
|
||||
"user.model.attribute": "email"
|
||||
},
|
||||
"id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
|
||||
"name": "email",
|
||||
"parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
|
||||
"providerId": "user-attribute-ldap-mapper",
|
||||
"providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
}
|
||||
],
|
||||
"name": "myfed",
|
||||
"parentId": "myrealm",
|
||||
"providerId": "ldap",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
end_state:
|
||||
description: Representation of user federation after module execution.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowPasswordAuthentication": "false",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"enabled": "true",
|
||||
"kerberosRealm": "EXAMPLE.COM",
|
||||
"keyTab": "/etc/krb5.keytab",
|
||||
"priority": "0",
|
||||
"serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
|
||||
"updateProfileFirstLogin": "false"
|
||||
},
|
||||
"id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
|
||||
"mappers": [],
|
||||
"name": "kerberos",
|
||||
"parentId": "myrealm",
|
||||
"providerId": "kerberos",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize(comp):
|
||||
compcopy = deepcopy(comp)
|
||||
if 'config' in compcopy:
|
||||
compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items())
|
||||
if 'bindCredential' in compcopy['config']:
|
||||
compcopy['config']['bindCredential'] = '**********'
|
||||
if 'mappers' in compcopy:
|
||||
for mapper in compcopy['mappers']:
|
||||
if 'config' in mapper:
|
||||
mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items())
|
||||
return compcopy
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
config_spec = dict(
|
||||
allowKerberosAuthentication=dict(type='bool', default=False),
|
||||
allowPasswordAuthentication=dict(type='bool'),
|
||||
authType=dict(type='str', choices=['none', 'simple'], default='none'),
|
||||
batchSizeForSync=dict(type='int', default=1000),
|
||||
bindCredential=dict(type='str', no_log=True),
|
||||
bindDn=dict(type='str'),
|
||||
cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'),
|
||||
changedSyncPeriod=dict(type='int', default=-1),
|
||||
connectionPooling=dict(type='bool', default=True),
|
||||
connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']),
|
||||
connectionPoolingDebug=dict(type='str'),
|
||||
connectionPoolingInitSize=dict(type='int'),
|
||||
connectionPoolingMaxSize=dict(type='int'),
|
||||
connectionPoolingPrefSize=dict(type='int'),
|
||||
connectionPoolingProtocol=dict(type='str'),
|
||||
connectionPoolingTimeout=dict(type='int'),
|
||||
connectionTimeout=dict(type='int'),
|
||||
connectionUrl=dict(type='str'),
|
||||
customUserSearchFilter=dict(type='str'),
|
||||
debug=dict(type='bool'),
|
||||
editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']),
|
||||
enabled=dict(type='bool', default=True),
|
||||
evictionDay=dict(type='str'),
|
||||
evictionHour=dict(type='str'),
|
||||
evictionMinute=dict(type='str'),
|
||||
fullSyncPeriod=dict(type='int', default=-1),
|
||||
importEnabled=dict(type='bool', default=True),
|
||||
kerberosRealm=dict(type='str'),
|
||||
keyTab=dict(type='str', no_log=False),
|
||||
maxLifespan=dict(type='int'),
|
||||
pagination=dict(type='bool', default=True),
|
||||
priority=dict(type='int', default=0),
|
||||
rdnLDAPAttribute=dict(type='str'),
|
||||
readTimeout=dict(type='int'),
|
||||
searchScope=dict(type='str', choices=['1', '2'], default='1'),
|
||||
serverPrincipal=dict(type='str'),
|
||||
startTls=dict(type='bool', default=False),
|
||||
syncRegistrations=dict(type='bool', default=False),
|
||||
trustEmail=dict(type='bool', default=False),
|
||||
updateProfileFirstLogin=dict(type='bool'),
|
||||
useKerberosForPasswordAuthentication=dict(type='bool', default=False),
|
||||
usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False),
|
||||
useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'),
|
||||
userObjectClasses=dict(type='str'),
|
||||
usernameLDAPAttribute=dict(type='str'),
|
||||
usersDn=dict(type='str'),
|
||||
uuidLDAPAttribute=dict(type='str'),
|
||||
validatePasswordPolicy=dict(type='bool', default=False),
|
||||
vendor=dict(type='str'),
|
||||
)
|
||||
|
||||
mapper_spec = dict(
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
parentId=dict(type='str'),
|
||||
providerId=dict(type='str'),
|
||||
providerType=dict(type='str'),
|
||||
config=dict(type='dict'),
|
||||
)
|
||||
|
||||
meta_args = dict(
|
||||
config=dict(type='dict', options=config_spec),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
realm=dict(type='str', default='master'),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos']),
|
||||
provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'),
|
||||
parent_id=dict(type='str', aliases=['parentId']),
|
||||
mappers=dict(type='list', elements='dict', options=mapper_spec),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['id', 'name'],
|
||||
['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
state = module.params.get('state')
|
||||
config = module.params.get('config')
|
||||
mappers = module.params.get('mappers')
|
||||
cid = module.params.get('id')
|
||||
name = module.params.get('name')
|
||||
|
||||
# Keycloak API expects config parameters to be arrays containing a single string element
|
||||
if config is not None:
|
||||
module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
|
||||
for k, v in config.items() if config[k] is not None)
|
||||
|
||||
if mappers is not None:
|
||||
for mapper in mappers:
|
||||
if mapper.get('config') is not None:
|
||||
mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
|
||||
for k, v in mapper['config'].items() if mapper['config'][k] is not None)
|
||||
|
||||
# convert module parameters to client representation parameters (if they belong in there)
|
||||
comp_params = [x for x in module.params
|
||||
if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and
|
||||
module.params.get(x) is not None]
|
||||
|
||||
# does the user federation already exist?
|
||||
if cid is None:
|
||||
found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm)
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name))
|
||||
before_comp = next(iter(found), None)
|
||||
if before_comp is not None:
|
||||
cid = before_comp['id']
|
||||
else:
|
||||
before_comp = kc.get_component(cid, realm)
|
||||
|
||||
if before_comp is None:
|
||||
before_comp = dict()
|
||||
|
||||
# if user federation exists, get associated mappers
|
||||
if cid is not None:
|
||||
before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name'))
|
||||
|
||||
# build a changeset
|
||||
changeset = dict()
|
||||
|
||||
for param in comp_params:
|
||||
new_param_value = module.params.get(param)
|
||||
old_value = before_comp[camel(param)] if camel(param) in before_comp else None
|
||||
if param == 'mappers':
|
||||
new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
|
||||
if new_param_value != old_value:
|
||||
changeset[camel(param)] = new_param_value
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
if module.params.get('mappers') is not None:
|
||||
if module.params['provider_id'] == 'kerberos':
|
||||
module.fail_json(msg='Cannot configure mappers for Kerberos federations.')
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
|
||||
if cid is None:
|
||||
old_mapper = dict()
|
||||
elif change.get('id') is not None:
|
||||
old_mapper = kc.get_component(change['id'], realm)
|
||||
if old_mapper is None:
|
||||
old_mapper = dict()
|
||||
else:
|
||||
found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm)
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name']))
|
||||
if len(found) == 1:
|
||||
old_mapper = found[0]
|
||||
else:
|
||||
old_mapper = dict()
|
||||
new_mapper = old_mapper.copy()
|
||||
new_mapper.update(change)
|
||||
if new_mapper != old_mapper:
|
||||
if changeset.get('mappers') is None:
|
||||
changeset['mappers'] = list()
|
||||
changeset['mappers'].append(new_mapper)
|
||||
|
||||
# prepare the new representation
|
||||
updated_comp = before_comp.copy()
|
||||
updated_comp.update(changeset)
|
||||
|
||||
result['proposed'] = sanitize(changeset)
|
||||
result['existing'] = sanitize(before_comp)
|
||||
|
||||
# if before_comp is none, the user federation doesn't exist.
|
||||
if before_comp == dict():
|
||||
if state == 'absent':
|
||||
# nothing to do.
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after='')
|
||||
result['changed'] = False
|
||||
result['end_state'] = dict()
|
||||
result['msg'] = 'User federation does not exist; doing nothing.'
|
||||
module.exit_json(**result)
|
||||
|
||||
# for 'present', create a new user federation.
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after=sanitize(updated_comp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do it for real!
|
||||
updated_comp = updated_comp.copy()
|
||||
updated_mappers = updated_comp.pop('mappers', [])
|
||||
after_comp = kc.create_component(updated_comp, realm)
|
||||
|
||||
for mapper in updated_mappers:
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_component(mapper, realm)
|
||||
else:
|
||||
if mapper.get('parentId') is None:
|
||||
mapper['parentId'] = after_comp['id']
|
||||
mapper = kc.create_component(mapper, realm)
|
||||
|
||||
after_comp['mappers'] = updated_mappers
|
||||
result['end_state'] = sanitize(after_comp)
|
||||
|
||||
result['msg'] = "User federation {id} has been created".format(id=after_comp['id'])
|
||||
module.exit_json(**result)
|
||||
|
||||
else:
|
||||
if state == 'present':
|
||||
# no changes
|
||||
if updated_comp == before_comp:
|
||||
result['changed'] = False
|
||||
result['end_state'] = sanitize(updated_comp)
|
||||
result['msg'] = "No changes required to user federation {id}.".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
# update the existing role
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_comp), after=sanitize(updated_comp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do the update
|
||||
updated_comp = updated_comp.copy()
|
||||
updated_mappers = updated_comp.pop('mappers', [])
|
||||
kc.update_component(updated_comp, realm)
|
||||
after_comp = kc.get_component(cid, realm)
|
||||
|
||||
for mapper in updated_mappers:
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_component(mapper, realm)
|
||||
else:
|
||||
if mapper.get('parentId') is None:
|
||||
mapper['parentId'] = updated_comp['id']
|
||||
mapper = kc.create_component(mapper, realm)
|
||||
|
||||
after_comp['mappers'] = updated_mappers
|
||||
result['end_state'] = sanitize(after_comp)
|
||||
|
||||
result['msg'] = "User federation {id} has been updated".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
elif state == 'absent':
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_comp), after='')
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# delete for real
|
||||
kc.delete_component(cid, realm)
|
||||
|
||||
result['end_state'] = dict()
|
||||
|
||||
result['msg'] = "User federation {id} has been deleted".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/keycloak_user_federation.py
Symbolic link
1
plugins/modules/keycloak_user_federation.py
Symbolic link
@@ -0,0 +1 @@
|
||||
identity/keycloak/keycloak_user_federation.py
|
||||
@@ -92,13 +92,6 @@ EXAMPLES = '''
|
||||
account_api_token: dummyapitoken
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Fetch my.com domain records
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
register: records
|
||||
|
||||
- name: Delete a domain
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
|
||||
@@ -106,11 +106,10 @@ def main():
|
||||
module.fail_json(msg=missing_required_lib('python-ldap'),
|
||||
exception=LDAP_IMP_ERR)
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
LdapSearch(module).main()
|
||||
except Exception as exception:
|
||||
module.fail_json(msg="Attribute action failed.", details=to_native(exception))
|
||||
try:
|
||||
LdapSearch(module).main()
|
||||
except Exception as exception:
|
||||
module.fail_json(msg="Attribute action failed.", details=to_native(exception))
|
||||
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
@@ -54,8 +54,9 @@ options:
|
||||
- Type C(dummy) is added in community.general 3.5.0.
|
||||
- Type C(generic) is added in Ansible 2.5.
|
||||
- Type C(infiniband) is added in community.general 2.0.0.
|
||||
- Type C(gsm) is added in community.general 3.7.0.
|
||||
type: str
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ]
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ]
|
||||
mode:
|
||||
description:
|
||||
- This is the type of device or network connection that you wish to create for a bond or bridge.
|
||||
@@ -99,7 +100,8 @@ options:
|
||||
routing_rules4:
|
||||
description:
|
||||
- Is the same as in an C(ip route add) command, except always requires specifying a priority.
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 3.3.0
|
||||
never_default4:
|
||||
description:
|
||||
@@ -183,7 +185,7 @@ options:
|
||||
mtu:
|
||||
description:
|
||||
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
|
||||
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
|
||||
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband)
|
||||
- This parameter defaults to C(1500) when unset.
|
||||
type: int
|
||||
dhcp_client_id:
|
||||
@@ -643,6 +645,101 @@ options:
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.6.0
|
||||
gsm:
|
||||
description:
|
||||
- The configuration of the GSM connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).'
|
||||
- 'For instance to use apn, pin, username and password:
|
||||
C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
|
||||
type: dict
|
||||
version_added: 3.7.0
|
||||
suboptions:
|
||||
apn:
|
||||
description:
|
||||
- The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network.
|
||||
- The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or
|
||||
just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan.
|
||||
- The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
|
||||
type: str
|
||||
auto-config:
|
||||
description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network
|
||||
the modem will register to in the Mobile Broadband Provider database.
|
||||
type: bool
|
||||
default: false
|
||||
device-id:
|
||||
description:
|
||||
- The device unique identifier (as given by the C(WWAN) management service) which this connection applies to.
|
||||
- If given, the connection will only apply to the specified device.
|
||||
type: str
|
||||
home-only:
|
||||
description:
|
||||
- When C(true), only connections to the home network will be allowed.
|
||||
- Connections to roaming networks will not be made.
|
||||
type: bool
|
||||
default: false
|
||||
mtu:
|
||||
description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
|
||||
type: int
|
||||
default: 0
|
||||
network-id:
|
||||
description:
|
||||
- The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration.
|
||||
- If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network.
|
||||
- This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible.
|
||||
type: str
|
||||
number:
|
||||
description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password used to authenticate with the network, if required.
|
||||
- Many providers do not require a password, or accept any password.
|
||||
- But if a password is required, it is specified here.
|
||||
type: str
|
||||
password-flags:
|
||||
description:
|
||||
- NMSettingSecretFlags indicating how to handle the I(password) property.
|
||||
- 'Following choices are allowed:
|
||||
C(0) B(NONE): The system is responsible for providing and storing this secret (default),
|
||||
C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
|
||||
asked to retrieve it
|
||||
C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
|
||||
C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
|
||||
(some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
|
||||
type: int
|
||||
choices: [ 0, 1, 2 , 4 ]
|
||||
default: 0
|
||||
pin:
|
||||
description:
|
||||
- If the SIM is locked with a PIN it must be unlocked before any other operations are requested.
|
||||
- Specify the PIN here to allow operation of the device.
|
||||
type: str
|
||||
pin-flags:
|
||||
description:
|
||||
- NMSettingSecretFlags indicating how to handle the I(gsm.pin) property.
|
||||
- See I(gsm.password-flags) for NMSettingSecretFlags choices.
|
||||
type: int
|
||||
choices: [ 0, 1, 2 , 4 ]
|
||||
default: 0
|
||||
sim-id:
|
||||
description:
|
||||
- The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
|
||||
- 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching
|
||||
the given identifier.'
|
||||
type: str
|
||||
sim-operator-id:
|
||||
description:
|
||||
- A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to.
|
||||
- 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card
|
||||
provisioned by the given operator.'
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- The username used to authenticate with the network, if required.
|
||||
- Many providers do not require a username, or accept any username.
|
||||
- But if a username is required, it is specified here.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -979,6 +1076,19 @@ EXAMPLES = r'''
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a gsm connection
|
||||
community.general.nmcli:
|
||||
type: gsm
|
||||
conn_name: my-gsm-provider
|
||||
ifname: cdc-wdm0
|
||||
gsm:
|
||||
apn: my.provider.apn
|
||||
username: my-provider-username
|
||||
password: my-provider-password
|
||||
pin: my-sim-pin
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r"""#
|
||||
@@ -1086,6 +1196,7 @@ class Nmcli(object):
|
||||
self.ssid = module.params['ssid']
|
||||
self.wifi = module.params['wifi']
|
||||
self.wifi_sec = module.params['wifi_sec']
|
||||
self.gsm = module.params['gsm']
|
||||
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
@@ -1243,6 +1354,12 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'802-11-wireless-security.%s' % name: value
|
||||
})
|
||||
elif self.type == 'gsm':
|
||||
if self.gsm:
|
||||
for name, value in self.gsm.items():
|
||||
options.update({
|
||||
'gsm.%s' % name: value,
|
||||
})
|
||||
# Convert settings values based on the situation.
|
||||
for setting, value in options.items():
|
||||
setting_type = self.settings_type(setting)
|
||||
@@ -1280,7 +1397,8 @@ class Nmcli(object):
|
||||
'sit',
|
||||
'team',
|
||||
'vlan',
|
||||
'wifi'
|
||||
'wifi',
|
||||
'gsm',
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -1353,7 +1471,7 @@ class Nmcli(object):
|
||||
elif setting in ('ipv4.dns',
|
||||
'ipv4.dns-search',
|
||||
'ipv4.routes',
|
||||
'ipv4.route-metric'
|
||||
'ipv4.routing-rules',
|
||||
'ipv6.dns',
|
||||
'ipv6.dns-search',
|
||||
'802-11-wireless-security.group',
|
||||
@@ -1573,6 +1691,12 @@ class Nmcli(object):
|
||||
value = value.upper()
|
||||
# ensure current_value is also converted to uppercase in case nmcli changes behaviour
|
||||
current_value = current_value.upper()
|
||||
if key == 'gsm.apn':
|
||||
# Depending on version nmcli adds double-qoutes to gsm.apn
|
||||
# Need to strip them in order to compare both
|
||||
current_value = current_value.strip('"')
|
||||
if key == self.mtu_setting and self.mtu is None:
|
||||
self.mtu = 0
|
||||
else:
|
||||
# parameter does not exist
|
||||
current_value = None
|
||||
@@ -1581,6 +1705,8 @@ class Nmcli(object):
|
||||
# compare values between two lists
|
||||
if sorted(current_value) != sorted(value):
|
||||
changed = True
|
||||
elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]):
|
||||
value = None
|
||||
else:
|
||||
if current_value != to_text(value):
|
||||
changed = True
|
||||
@@ -1630,13 +1756,14 @@ def main():
|
||||
'vlan',
|
||||
'vxlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
]),
|
||||
ip4=dict(type='str'),
|
||||
gw4=dict(type='str'),
|
||||
gw4_ignore_auto=dict(type='bool', default=False),
|
||||
routes4=dict(type='list', elements='str'),
|
||||
route_metric4=dict(type='int'),
|
||||
routing_rules4=dict(type='str'),
|
||||
routing_rules4=dict(type='list', elements='str'),
|
||||
never_default4=dict(type='bool', default=False),
|
||||
dns4=dict(type='list', elements='str'),
|
||||
dns4_search=dict(type='list', elements='str'),
|
||||
@@ -1700,6 +1827,7 @@ def main():
|
||||
ssid=dict(type='str'),
|
||||
wifi=dict(type='dict'),
|
||||
wifi_sec=dict(type='dict', no_log=True),
|
||||
gsm=dict(type='dict'),
|
||||
),
|
||||
mutually_exclusive=[['never_default4', 'gw4']],
|
||||
required_if=[("type", "wifi", [("ssid")])],
|
||||
|
||||
@@ -125,6 +125,11 @@ options:
|
||||
- Sets the timeout in seconds for connection attempts.
|
||||
type: int
|
||||
default: 20
|
||||
ehlohost:
|
||||
description:
|
||||
- Allows for manual specification of host for EHLO.
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -189,6 +194,16 @@ EXAMPLES = r'''
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
|
||||
- name: Sending an e-mail using StartTLS, remote server, custom EHLO
|
||||
community.general.mail:
|
||||
host: some.smtp.host.tld
|
||||
port: 25
|
||||
ehlohost: my-resolvable-hostname.tld
|
||||
to: John Smith <john.smith@example.com>
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -215,6 +230,7 @@ def main():
|
||||
password=dict(type='str', no_log=True),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=25),
|
||||
ehlohost=dict(type='str', default=None),
|
||||
sender=dict(type='str', default='root', aliases=['from']),
|
||||
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
|
||||
cc=dict(type='list', elements='str', default=[]),
|
||||
@@ -235,6 +251,7 @@ def main():
|
||||
password = module.params.get('password')
|
||||
host = module.params.get('host')
|
||||
port = module.params.get('port')
|
||||
local_hostname = module.params.get('ehlohost')
|
||||
sender = module.params.get('sender')
|
||||
recipients = module.params.get('to')
|
||||
copies = module.params.get('cc')
|
||||
@@ -259,9 +276,9 @@ def main():
|
||||
if secure != 'never':
|
||||
try:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP_SSL(timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
secure_state = True
|
||||
except ssl.SSLError as e:
|
||||
@@ -273,9 +290,9 @@ def main():
|
||||
|
||||
if not secure_state:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP(timeout=timeout)
|
||||
smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
|
||||
285
plugins/modules/packaging/language/pipx.py
Normal file
285
plugins/modules/packaging/language/pipx.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2021, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pipx
|
||||
short_description: Manages applications installed with pipx
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Manage Python applications installed in isolated virtualenvs using pipx.
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all]
|
||||
default: install
|
||||
description:
|
||||
- Desired state for the application.
|
||||
- The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively.
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
The name of the application to be installed. It must to be a simple package name.
|
||||
For passing package specifications or installing from URLs or directories,
|
||||
please use the I(source) option.
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
If the application source, such as a package with version specifier, or an URL,
|
||||
directory or any other accepted specification. See C(pipx) documentation for more details.
|
||||
- When specified, the C(pipx) command will use I(source) instead of I(name).
|
||||
install_deps:
|
||||
description:
|
||||
- Include applications of dependent packages.
|
||||
- Only used when I(state=install) or I(state=upgrade).
|
||||
type: bool
|
||||
default: false
|
||||
inject_packages:
|
||||
description:
|
||||
- Packages to be injected into an existing virtual environment.
|
||||
- Only used when I(state=inject).
|
||||
type: list
|
||||
elements: str
|
||||
force:
|
||||
description:
|
||||
- Force modification of the application's virtual environment. See C(pipx) for details.
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
include_injected:
|
||||
description:
|
||||
- Upgrade the injected packages along with the application.
|
||||
- Only used when I(state=upgrade) or I(state=upgrade_all).
|
||||
type: bool
|
||||
default: false
|
||||
index_url:
|
||||
description:
|
||||
- Base URL of Python Package Index.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
type: str
|
||||
python:
|
||||
description:
|
||||
- Python version to be used when creating the application virtual environment. Must be 3.6+.
|
||||
- Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all).
|
||||
type: str
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(pipx) installed in the system.
|
||||
- >
|
||||
If not specified, the module will use C(python -m pipx) to run the tool,
|
||||
using the same Python interpreter as ansible itself.
|
||||
type: path
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
Hence, when using version operators, C(pipx) module will always try to execute the operation,
|
||||
even when the application was previously installed.
|
||||
This feature will be added in the future.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
|
||||
- name: Install tox from git repository
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
source: git+https://github.com/tox-dev/tox.git
|
||||
|
||||
- name: Upgrade tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: upgrade
|
||||
|
||||
- name: Reinstall black with specific Python version
|
||||
community.general.pipx:
|
||||
name: black
|
||||
state: reinstall
|
||||
python: 3.7
|
||||
|
||||
- name: Uninstall pycowsay
|
||||
community.general.pipx:
|
||||
name: pycowsay
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import (
|
||||
CmdStateModuleHelper, ArgFormat, ModuleHelperException
|
||||
)
|
||||
from ansible.module_utils.facts.compat import ansible_facts
|
||||
|
||||
|
||||
_state_map = dict(
|
||||
present='install',
|
||||
absent='uninstall',
|
||||
uninstall_all='uninstall-all',
|
||||
upgrade_all='upgrade-all',
|
||||
reinstall_all='reinstall-all',
|
||||
)
|
||||
|
||||
|
||||
class PipX(CmdStateModuleHelper):
|
||||
output_params = ['name', 'source', 'index_url', 'force', 'installdeps']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='install',
|
||||
choices=[
|
||||
'present', 'absent', 'install', 'uninstall', 'uninstall_all',
|
||||
'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all']),
|
||||
name=dict(type='str'),
|
||||
source=dict(type='str'),
|
||||
install_deps=dict(type='bool', default=False),
|
||||
inject_packages=dict(type='list', elements='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
executable=dict(type='path')
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['name']),
|
||||
('state', 'install', ['name']),
|
||||
('state', 'absent', ['name']),
|
||||
('state', 'uninstall', ['name']),
|
||||
('state', 'inject', ['name', 'inject_packages']),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
command_args_formats = dict(
|
||||
state=dict(fmt=lambda v: [_state_map.get(v, v)]),
|
||||
name_source=dict(fmt=lambda n, s: [s] if s else [n], stars=1),
|
||||
install_deps=dict(fmt="--install-deps", style=ArgFormat.BOOLEAN),
|
||||
inject_packages=dict(fmt=lambda v: v),
|
||||
force=dict(fmt="--force", style=ArgFormat.BOOLEAN),
|
||||
include_injected=dict(fmt="--include-injected", style=ArgFormat.BOOLEAN),
|
||||
index_url=dict(fmt=('--index-url', '{0}'),),
|
||||
python=dict(fmt=('--python', '{0}'),),
|
||||
_list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN),
|
||||
)
|
||||
check_rc = True
|
||||
run_command_fixed_options = dict(
|
||||
environ_update={'USE_EMOJI': '0'}
|
||||
)
|
||||
|
||||
def _retrieve_installed(self):
|
||||
def process_list(rc, out, err):
|
||||
if not out:
|
||||
return {}
|
||||
|
||||
results = {}
|
||||
raw_data = json.loads(out)
|
||||
for venv_name, venv in raw_data['venvs'].items():
|
||||
results[venv_name] = {
|
||||
'version': venv['metadata']['main_package']['package_version'],
|
||||
'injected': dict(
|
||||
(k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items()
|
||||
),
|
||||
}
|
||||
return results
|
||||
|
||||
installed = self.run_command(params=[{'_list': True}], process_output=process_list,
|
||||
publish_rc=False, publish_out=False, publish_err=False)
|
||||
|
||||
if self.vars.name is not None:
|
||||
app_list = installed.get(self.vars.name)
|
||||
if app_list:
|
||||
return {self.vars.name: app_list}
|
||||
else:
|
||||
return {}
|
||||
|
||||
return installed
|
||||
|
||||
def __init_module__(self):
|
||||
if self.vars.executable:
|
||||
self.command = [self.vars.executable]
|
||||
else:
|
||||
facts = ansible_facts(self.module, gather_subset=['python'])
|
||||
self.command = [facts['python']['executable'], '-m', 'pipx']
|
||||
|
||||
self.vars.set('will_change', False, output=False, change=True)
|
||||
self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
|
||||
|
||||
def __quit_module__(self):
|
||||
self.vars.application = self._retrieve_installed()
|
||||
|
||||
def state_install(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'python',
|
||||
{'name_source': [self.vars.name, self.vars.source]}])
|
||||
|
||||
state_present = state_install
|
||||
|
||||
def state_upgrade(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to upgrade a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'name'])
|
||||
|
||||
def state_uninstall(self):
|
||||
if self.vars.application and not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name'])
|
||||
|
||||
state_absent = state_uninstall
|
||||
|
||||
def state_reinstall(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to reinstall a non-existent application: {0}".format(self.vars.name))
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name', 'python'])
|
||||
|
||||
def state_inject(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to inject packages into a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'force', 'name', 'inject_packages'])
|
||||
|
||||
def state_uninstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state'])
|
||||
|
||||
def state_reinstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'python'])
|
||||
|
||||
def state_upgrade_all(self):
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'include_injected', 'force'])
|
||||
|
||||
|
||||
def main():
|
||||
PipX.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -120,8 +120,7 @@ class CoprModule(object):
|
||||
@property
|
||||
def short_chroot(self):
|
||||
"""str: Chroot (distribution-version-architecture) shorten to distribution-version."""
|
||||
chroot_parts = self.chroot.split("-")
|
||||
return "{0}-{1}".format(chroot_parts[0], chroot_parts[1])
|
||||
return self.chroot.rsplit('-', 1)[0]
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
@@ -193,18 +192,20 @@ class CoprModule(object):
|
||||
Returns:
|
||||
Information about the repository.
|
||||
"""
|
||||
distribution, version = self.short_chroot.split("-")
|
||||
distribution, version = self.short_chroot.split('-', 1)
|
||||
chroot = self.short_chroot
|
||||
while True:
|
||||
repo_info, status_code = self._get(chroot)
|
||||
if repo_info:
|
||||
return repo_info
|
||||
if distribution == "rhel":
|
||||
chroot = "centos-stream"
|
||||
chroot = "centos-stream-8"
|
||||
distribution = "centos"
|
||||
elif distribution == "centos":
|
||||
if version == "stream":
|
||||
if version == "stream-8":
|
||||
version = "8"
|
||||
elif version == "stream-9":
|
||||
version = "9"
|
||||
chroot = "epel-{0}".format(version)
|
||||
distribution = "epel"
|
||||
else:
|
||||
|
||||
@@ -132,10 +132,10 @@ EXAMPLES = '''
|
||||
name: homebrew/cask/foo
|
||||
state: present
|
||||
|
||||
- name: Use ignored-pinned option while upgrading all
|
||||
- name: Use ignore-pinned option while upgrading all
|
||||
community.general.homebrew:
|
||||
upgrade_all: yes
|
||||
upgrade_options: ignored-pinned
|
||||
upgrade_options: ignore-pinned
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
||||
@@ -120,7 +120,7 @@ def selfupdate(module, port_path):
|
||||
changed = False
|
||||
msg = "Macports already up-to-date"
|
||||
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
|
||||
|
||||
@@ -134,11 +134,11 @@ def upgrade(module, port_path):
|
||||
if out.strip() == "Nothing to upgrade.":
|
||||
changed = False
|
||||
msg = "Ports already upgraded"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
elif rc == 0:
|
||||
changed = True
|
||||
msg = "Outdated ports upgraded successfully"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
|
||||
|
||||
@@ -165,7 +165,7 @@ def query_port(module, port_path, name, state="present"):
|
||||
return False
|
||||
|
||||
|
||||
def remove_ports(module, port_path, ports):
|
||||
def remove_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Uninstalls one or more ports if installed. """
|
||||
|
||||
remove_c = 0
|
||||
@@ -176,20 +176,21 @@ def remove_ports(module, port_path, ports):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already absent")
|
||||
module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def install_ports(module, port_path, ports, variant):
|
||||
def install_ports(module, port_path, ports, variant, stdout, stderr):
|
||||
""" Installs one or more ports if not already installed. """
|
||||
|
||||
install_c = 0
|
||||
@@ -199,66 +200,70 @@ def install_ports(module, port_path, ports, variant):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already present")
|
||||
module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def activate_ports(module, port_path, ports):
|
||||
def activate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Activate a port if it's inactive. """
|
||||
|
||||
activate_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s activate %s" % (port_path, port))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
activate_c += 1
|
||||
|
||||
if activate_c > 0:
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already active")
|
||||
module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def deactivate_ports(module, port_path, ports):
|
||||
def deactivate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Deactivate a port if it's active. """
|
||||
|
||||
deactivated_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
deactivated_c += 1
|
||||
|
||||
if deactivated_c > 0:
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive")
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -272,35 +277,42 @@ def main():
|
||||
)
|
||||
)
|
||||
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
|
||||
|
||||
p = module.params
|
||||
|
||||
if p["selfupdate"]:
|
||||
(changed, msg) = selfupdate(module, port_path)
|
||||
(changed, msg, out, err) = selfupdate(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not (p["name"] or p["upgrade"]):
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
if p["upgrade"]:
|
||||
(changed, msg) = upgrade(module, port_path)
|
||||
(changed, msg, out, err) = upgrade(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not p["name"]:
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
pkgs = p["name"]
|
||||
|
||||
variant = p["variant"]
|
||||
|
||||
if p["state"] in ["present", "installed"]:
|
||||
install_ports(module, port_path, pkgs, variant)
|
||||
install_ports(module, port_path, pkgs, variant, stdout, stderr)
|
||||
|
||||
elif p["state"] in ["absent", "removed"]:
|
||||
remove_ports(module, port_path, pkgs)
|
||||
remove_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "active":
|
||||
activate_ports(module, port_path, pkgs)
|
||||
activate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "inactive":
|
||||
deactivate_ports(module, port_path, pkgs)
|
||||
deactivate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -246,6 +246,7 @@ def package_present(names, pkg_spec, module):
|
||||
if match:
|
||||
# It turns out we were able to install the package.
|
||||
module.debug("package_present(): we were able to install package for name '%s'" % name)
|
||||
pkg_spec[name]['changed'] = True
|
||||
else:
|
||||
# We really did fail, fake the return code.
|
||||
module.debug("package_present(): we really did fail for name '%s'" % name)
|
||||
|
||||
@@ -281,9 +281,9 @@ def install_packages(module, packages):
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
|
||||
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already present", stdout=out, stderr=err)
|
||||
module.exit_json(changed=False, msg="package(s) already present")
|
||||
|
||||
|
||||
def update_package_db(module):
|
||||
|
||||
@@ -134,6 +134,7 @@ EXAMPLES = '''
|
||||
'''
|
||||
|
||||
|
||||
from collections import defaultdict
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -226,7 +227,8 @@ def remove_packages(module, pkgng_path, packages, dir_arg):
|
||||
|
||||
|
||||
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
|
||||
install_c = 0
|
||||
action_queue = defaultdict(list)
|
||||
action_count = defaultdict(int)
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
@@ -263,29 +265,48 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, sta
|
||||
if already_installed and state == "present":
|
||||
continue
|
||||
|
||||
update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
if not update_available and already_installed and state == "latest":
|
||||
if (
|
||||
already_installed and state == "latest"
|
||||
and not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
):
|
||||
continue
|
||||
|
||||
if not module.check_mode:
|
||||
if already_installed:
|
||||
action = "upgrade"
|
||||
else:
|
||||
action = "install"
|
||||
if already_installed:
|
||||
action_queue["upgrade"].append(package)
|
||||
else:
|
||||
action_queue["install"].append(package)
|
||||
|
||||
if not module.check_mode:
|
||||
# install/upgrade all named packages with one pkg command
|
||||
for (action, package_list) in action_queue.items():
|
||||
packages = ' '.join(package_list)
|
||||
if old_pkgng:
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, packages))
|
||||
else:
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, packages))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
|
||||
module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
|
||||
# individually verify packages are in requested state
|
||||
for package in package_list:
|
||||
verified = False
|
||||
if action == 'install':
|
||||
verified = query_package(module, pkgng_path, package, dir_arg)
|
||||
elif action == 'upgrade':
|
||||
verified = not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
|
||||
install_c += 1
|
||||
if verified:
|
||||
action_count[action] += 1
|
||||
else:
|
||||
module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr)
|
||||
|
||||
if install_c > 0:
|
||||
return (True, "added %s package(s)" % (install_c), stdout, stderr)
|
||||
if sum(action_count.values()) > 0:
|
||||
past_tense = {'install': 'installed', 'upgrade': 'upgraded'}
|
||||
messages = []
|
||||
for (action, count) in action_count.items():
|
||||
messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else ""))
|
||||
|
||||
return (True, '; '.join(messages), stdout, stderr)
|
||||
|
||||
return (False, "package(s) already %s" % (state), stdout, stderr)
|
||||
|
||||
|
||||
@@ -137,6 +137,10 @@ from distutils.version import LooseVersion
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six.moves import configparser, StringIO
|
||||
from io import open
|
||||
|
||||
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
|
||||
|
||||
@@ -382,12 +386,62 @@ def main():
|
||||
if not alias and state == "present":
|
||||
module.fail_json(msg='Name required when adding non-repo files.')
|
||||
|
||||
# Download / Open and parse .repo file to ensure idempotency
|
||||
if repo and repo.endswith('.repo'):
|
||||
if repo.startswith(('http://', 'https://')):
|
||||
response, info = fetch_url(module=module, url=repo, force=True)
|
||||
if not response or info['status'] != 200:
|
||||
module.fail_json(msg='Error downloading .repo file from provided URL')
|
||||
repofile_text = to_text(response.read(), errors='surrogate_or_strict')
|
||||
else:
|
||||
try:
|
||||
with open(repo, encoding='utf-8') as file:
|
||||
repofile_text = file.read()
|
||||
except IOError:
|
||||
module.fail_json(msg='Error opening .repo file from provided path')
|
||||
|
||||
repofile = configparser.ConfigParser()
|
||||
try:
|
||||
repofile.readfp(StringIO(repofile_text))
|
||||
except configparser.Error:
|
||||
module.fail_json(msg='Invalid format, .repo file could not be parsed')
|
||||
|
||||
# No support for .repo file with zero or more than one repository
|
||||
if len(repofile.sections()) != 1:
|
||||
err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections())
|
||||
module.fail_json(msg=err)
|
||||
|
||||
section = repofile.sections()[0]
|
||||
repofile_items = dict(repofile.items(section))
|
||||
# Only proceed if at least baseurl is available
|
||||
if 'baseurl' not in repofile_items:
|
||||
module.fail_json(msg='No baseurl found in .repo file')
|
||||
|
||||
# Set alias (name) and url based on values from .repo file
|
||||
alias = section
|
||||
repodata['alias'] = section
|
||||
repodata['url'] = repofile_items['baseurl']
|
||||
|
||||
# If gpgkey is part of the .repo file, auto import key
|
||||
if 'gpgkey' in repofile_items:
|
||||
auto_import_keys = True
|
||||
|
||||
# Map additional values, if available
|
||||
if 'name' in repofile_items:
|
||||
repodata['name'] = repofile_items['name']
|
||||
if 'enabled' in repofile_items:
|
||||
repodata['enabled'] = repofile_items['enabled']
|
||||
if 'autorefresh' in repofile_items:
|
||||
repodata['autorefresh'] = repofile_items['autorefresh']
|
||||
if 'gpgcheck' in repofile_items:
|
||||
repodata['gpgcheck'] = repofile_items['gpgcheck']
|
||||
|
||||
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
|
||||
|
||||
if repo:
|
||||
shortname = repo
|
||||
else:
|
||||
if alias:
|
||||
shortname = alias
|
||||
else:
|
||||
shortname = repo
|
||||
|
||||
if state == 'present':
|
||||
if exists and not mod:
|
||||
|
||||
1
plugins/modules/pipx.py
Symbolic link
1
plugins/modules/pipx.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./packaging/language/pipx.py
|
||||
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
cloud/misc/proxmox_tasks_info.py
|
||||
1
plugins/modules/redis_data.py
Symbolic link
1
plugins/modules/redis_data.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./database/misc/redis_data.py
|
||||
1
plugins/modules/redis_data_info.py
Symbolic link
1
plugins/modules/redis_data_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
database/misc/redis_data_info.py
|
||||
@@ -168,7 +168,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data }}"
|
||||
|
||||
- name: Get Lenovo FoD key collection resource via GetCollectionResource command
|
||||
@@ -180,7 +182,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data_list }}"
|
||||
|
||||
- name: Update ComputeSystem property AssetTag via PatchResource command
|
||||
|
||||
@@ -47,7 +47,9 @@ EXAMPLES = '''
|
||||
api_version: 500
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Data Centers
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Data Centers
|
||||
@@ -62,7 +64,9 @@ EXAMPLES = '''
|
||||
sort: 'name:descending'
|
||||
filter: 'state=Unmanaged'
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about paginated, filtered and sorted list of Data Centers
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather information about a Data Center by name
|
||||
@@ -74,7 +78,9 @@ EXAMPLES = '''
|
||||
name: "My Data Center"
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Data Center found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather information about the Data Center Visual Content
|
||||
@@ -88,9 +94,13 @@ EXAMPLES = '''
|
||||
- visualContent
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Data Center found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Data Center Visual Content
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.datacenter_visual_content }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -50,7 +50,9 @@ EXAMPLES = '''
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosures
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Enclosures
|
||||
@@ -67,7 +69,9 @@ EXAMPLES = '''
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about paginated, filtered ans sorted list of Enclosures
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather information about an Enclosure by name
|
||||
@@ -80,7 +84,9 @@ EXAMPLES = '''
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather information about an Enclosure by name with options
|
||||
@@ -97,13 +103,21 @@ EXAMPLES = '''
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure Script
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosure_script }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure Environmental Configuration
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosure_environmental_configuration }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure Utilization
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosure_utilization }}"
|
||||
|
||||
- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
|
||||
@@ -125,9 +139,13 @@ EXAMPLES = '''
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Enclosure Utilization
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enclosure_utilization }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -44,7 +44,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Ethernet Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather paginated and filtered information about Ethernet Networks
|
||||
@@ -58,7 +59,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about paginated and filtered list of Ethernet Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather information about an Ethernet Network by name
|
||||
@@ -68,7 +70,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Ethernet Network found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather information about an Ethernet Network by name with options
|
||||
@@ -81,9 +84,12 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Ethernet Network Associated Profiles
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enet_associated_profiles }}"
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about Ethernet Network Associated Uplink Groups
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.enet_associated_uplink_groups }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -39,7 +39,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Fibre Channel Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
|
||||
@@ -52,7 +53,9 @@ EXAMPLES = '''
|
||||
filter: 'fabricType=FabricAttach'
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information about paginated, filtered and sorted list of Fibre Channel Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
|
||||
- name: Gather information about a Fibre Channel Network by name
|
||||
@@ -62,7 +65,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Fibre Channel Network found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -38,7 +38,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about FCoE Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about FCoE Networks
|
||||
@@ -52,7 +53,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about paginated, filtered and sorted list of FCoE Networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
|
||||
- name: Gather information about a FCoE Network by name
|
||||
@@ -62,7 +64,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about FCoE Network found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -43,7 +43,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Logical Interconnect Groups
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
|
||||
@@ -61,7 +62,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about paginated, filtered and sorted list of Logical Interconnect Groups
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
|
||||
- name: Gather information about a Logical Interconnect Group by name
|
||||
@@ -75,7 +77,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Logical Interconnect Group found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -51,10 +51,11 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Network Sets
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather paginated, filtered, and sorted information about Network Sets
|
||||
- name: Gather paginated, filtered and sorted information about Network Sets
|
||||
community.general.oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
@@ -69,7 +70,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about paginated, filtered and sorted list of Network Sets
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about all Network Sets, excluding Ethernet networks
|
||||
@@ -84,7 +86,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Network Sets, excluding Ethernet networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about a Network Set by name
|
||||
@@ -98,7 +101,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Network Set found by name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about a Network Set by name, excluding Ethernet networks
|
||||
@@ -114,7 +118,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about Network Set found by name, excluding Ethernet networks
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -46,7 +46,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about SAN Managers
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about SAN Managers
|
||||
@@ -60,7 +61,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about paginated, filtered and sorted list of SAN Managers
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
|
||||
- name: Gather information about a SAN Manager by provider display name
|
||||
@@ -70,7 +72,8 @@ EXAMPLES = '''
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information about SAN Manager found by provider display name
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
'''
|
||||
|
||||
|
||||
@@ -56,7 +56,8 @@ options:
|
||||
required: false
|
||||
aliases: [ account_id ]
|
||||
description:
|
||||
- ID of account to delete/modify
|
||||
- ID of account to delete/modify.
|
||||
- Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request.
|
||||
type: str
|
||||
new_username:
|
||||
required: false
|
||||
@@ -207,6 +208,15 @@ options:
|
||||
description:
|
||||
- The transfer method to use with the image
|
||||
type: str
|
||||
strip_etag_quotes:
|
||||
description:
|
||||
- Removes surrounding quotes of etag used in C(If-Match) header
|
||||
of C(PATCH) requests.
|
||||
- Only use this option to resolve bad vendor implementation where
|
||||
C(If-Match) only matches the unquoted etag string.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.7.0
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
@@ -297,7 +307,7 @@ EXAMPLES = '''
|
||||
community.general.redfish_command:
|
||||
category: Systems
|
||||
command: SetOneTimeBoot
|
||||
bootnext: BiosSetup
|
||||
boot_next: BiosSetup
|
||||
boot_override_mode: Legacy
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
@@ -631,7 +641,8 @@ def main():
|
||||
transfer_protocol_type=dict(),
|
||||
transfer_method=dict(),
|
||||
)
|
||||
)
|
||||
),
|
||||
strip_etag_quotes=dict(type='bool', default=False),
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -686,10 +697,13 @@ def main():
|
||||
# VirtualMedia options
|
||||
virtual_media = module.params['virtual_media']
|
||||
|
||||
# Etag options
|
||||
strip_etag_quotes = module.params['strip_etag_quotes']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
|
||||
@@ -91,6 +91,15 @@ options:
|
||||
- setting dict of EthernetInterface on OOB controller
|
||||
type: dict
|
||||
version_added: '0.2.0'
|
||||
strip_etag_quotes:
|
||||
description:
|
||||
- Removes surrounding quotes of etag used in C(If-Match) header
|
||||
of C(PATCH) requests.
|
||||
- Only use this option to resolve bad vendor implementation where
|
||||
C(If-Match) only matches the unquoted etag string.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.7.0
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
@@ -237,7 +246,8 @@ def main():
|
||||
nic_config=dict(
|
||||
type='dict',
|
||||
default={}
|
||||
)
|
||||
),
|
||||
strip_etag_quotes=dict(type='bool', default=False),
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -275,10 +285,13 @@ def main():
|
||||
nic_addr = module.params['nic_addr']
|
||||
nic_config = module.params['nic_config']
|
||||
|
||||
# Etag options
|
||||
strip_etag_quotes = module.params['strip_etag_quotes']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
|
||||
@@ -67,7 +67,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
|
||||
|
||||
- name: Get CPU model
|
||||
@@ -78,7 +80,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
|
||||
|
||||
- name: Get memory inventory
|
||||
@@ -108,7 +112,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Volume Inventory
|
||||
@@ -119,7 +125,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Session information
|
||||
@@ -130,7 +137,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
|
||||
|
||||
- name: Get default inventory information
|
||||
@@ -139,7 +148,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts | to_nice_json }}"
|
||||
|
||||
- name: Get several inventories
|
||||
|
||||
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_executions_info.py
|
||||
1
plugins/modules/rundeck_job_run.py
Symbolic link
1
plugins/modules/rundeck_job_run.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_run.py
|
||||
@@ -97,7 +97,7 @@ author:
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: create a new webhook that triggers on push (password auth)
|
||||
- name: Create a new webhook that triggers on push (password auth)
|
||||
community.general.github_webhook:
|
||||
repository: ansible/ansible
|
||||
url: https://www.example.com/hooks/
|
||||
|
||||
@@ -149,7 +149,8 @@ class GitLabDeployKey(object):
|
||||
# GitLab REST API, so for that case we need to delete and
|
||||
# than recreate the key
|
||||
if self.deployKeyObject and self.deployKeyObject.key != key_key:
|
||||
self.deployKeyObject.delete()
|
||||
if not self._module.check_mode:
|
||||
self.deployKeyObject.delete()
|
||||
self.deployKeyObject = None
|
||||
|
||||
# Because we have already call existsDeployKey in main()
|
||||
@@ -211,7 +212,7 @@ class GitLabDeployKey(object):
|
||||
@param key_title Title of the key
|
||||
'''
|
||||
def findDeployKey(self, project, key_title):
|
||||
deployKeys = project.keys.list()
|
||||
deployKeys = project.keys.list(all=True)
|
||||
for deployKey in deployKeys:
|
||||
if (deployKey.title == key_title):
|
||||
return deployKey
|
||||
|
||||
@@ -61,6 +61,28 @@ options:
|
||||
choices: ["private", "internal", "public"]
|
||||
default: private
|
||||
type: str
|
||||
project_creation_level:
|
||||
description:
|
||||
- Determine if developers can create projects in the group.
|
||||
choices: ["developer", "maintainer", "noone"]
|
||||
type: str
|
||||
version_added: 3.7.0
|
||||
auto_devops_enabled:
|
||||
description:
|
||||
- Default to Auto DevOps pipeline for all projects within this group.
|
||||
type: bool
|
||||
version_added: 3.7.0
|
||||
subgroup_creation_level:
|
||||
description:
|
||||
- Allowed to create subgroups.
|
||||
choices: ["maintainer", "owner"]
|
||||
type: str
|
||||
version_added: 3.7.0
|
||||
require_two_factor_authentication:
|
||||
description:
|
||||
- Require all users in this group to setup two-factor authentication.
|
||||
type: bool
|
||||
version_added: 3.7.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -93,6 +115,20 @@ EXAMPLES = '''
|
||||
path: my_first_group
|
||||
state: present
|
||||
parent: "super_parent/parent"
|
||||
|
||||
# Other group which only allows sub-groups - no projects
|
||||
- name: "Create GitLab Group for SubGroups only"
|
||||
community.general.gitlab_group:
|
||||
api_url: https://gitlab.example.com/
|
||||
validate_certs: True
|
||||
api_username: dj-wasabi
|
||||
api_password: "MySecretPassword"
|
||||
name: my_main_group
|
||||
path: my_main_group
|
||||
state: present
|
||||
project_creation_level: noone
|
||||
auto_devops_enabled: false
|
||||
subgroup_creation_level: maintainer
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -166,17 +202,27 @@ class GitLabGroup(object):
|
||||
'name': name,
|
||||
'path': options['path'],
|
||||
'parent_id': parent_id,
|
||||
'visibility': options['visibility']
|
||||
'visibility': options['visibility'],
|
||||
'project_creation_level': options['project_creation_level'],
|
||||
'auto_devops_enabled': options['auto_devops_enabled'],
|
||||
'subgroup_creation_level': options['subgroup_creation_level'],
|
||||
}
|
||||
if options.get('description'):
|
||||
payload['description'] = options['description']
|
||||
if options.get('require_two_factor_authentication'):
|
||||
payload['require_two_factor_authentication'] = options['require_two_factor_authentication']
|
||||
group = self.createGroup(payload)
|
||||
changed = True
|
||||
else:
|
||||
changed, group = self.updateGroup(self.groupObject, {
|
||||
'name': name,
|
||||
'description': options['description'],
|
||||
'visibility': options['visibility']})
|
||||
'visibility': options['visibility'],
|
||||
'project_creation_level': options['project_creation_level'],
|
||||
'auto_devops_enabled': options['auto_devops_enabled'],
|
||||
'subgroup_creation_level': options['subgroup_creation_level'],
|
||||
'require_two_factor_authentication': options['require_two_factor_authentication'],
|
||||
})
|
||||
|
||||
self.groupObject = group
|
||||
if changed:
|
||||
@@ -258,6 +304,10 @@ def main():
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
parent=dict(type='str'),
|
||||
visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
|
||||
project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']),
|
||||
auto_devops_enabled=dict(type='bool'),
|
||||
subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']),
|
||||
require_two_factor_authentication=dict(type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -281,6 +331,10 @@ def main():
|
||||
state = module.params['state']
|
||||
parent_identifier = module.params['parent']
|
||||
group_visibility = module.params['visibility']
|
||||
project_creation_level = module.params['project_creation_level']
|
||||
auto_devops_enabled = module.params['auto_devops_enabled']
|
||||
subgroup_creation_level = module.params['subgroup_creation_level']
|
||||
require_two_factor_authentication = module.params['require_two_factor_authentication']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -314,7 +368,12 @@ def main():
|
||||
if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
|
||||
"path": group_path,
|
||||
"description": description,
|
||||
"visibility": group_visibility}):
|
||||
"visibility": group_visibility,
|
||||
"project_creation_level": project_creation_level,
|
||||
"auto_devops_enabled": auto_devops_enabled,
|
||||
"subgroup_creation_level": subgroup_creation_level,
|
||||
"require_two_factor_authentication": require_two_factor_authentication,
|
||||
}):
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
|
||||
|
||||
@@ -74,7 +74,7 @@ options:
|
||||
type: str
|
||||
purge_users:
|
||||
description:
|
||||
- Adds/remove users of the given access_level to match the given gitlab_user/gitlab_users_access list.
|
||||
- Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
|
||||
If omitted do not purge orphaned members.
|
||||
- Is only used when I(state=present).
|
||||
type: list
|
||||
@@ -104,7 +104,7 @@ EXAMPLES = r'''
|
||||
state: absent
|
||||
|
||||
- name: Add a list of Users to A GitLab Group
|
||||
gitlab_group_members:
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
@@ -115,7 +115,7 @@ EXAMPLES = r'''
|
||||
state: present
|
||||
|
||||
- name: Add a list of Users with Dedicated Access Levels to A GitLab Group
|
||||
gitlab_group_members:
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
@@ -127,7 +127,7 @@ EXAMPLES = r'''
|
||||
state: present
|
||||
|
||||
- name: Add a user, remove all others which might be on this access level
|
||||
gitlab_group_members:
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
@@ -137,7 +137,7 @@ EXAMPLES = r'''
|
||||
state: present
|
||||
|
||||
- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group
|
||||
gitlab_group_members:
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
@@ -179,9 +179,13 @@ class GitLabGroup(object):
|
||||
|
||||
# get group id if group exists
|
||||
def get_group_id(self, gitlab_group):
|
||||
group_exists = self._gitlab.groups.list(search=gitlab_group)
|
||||
if group_exists:
|
||||
return group_exists[0].id
|
||||
groups = self._gitlab.groups.list(search=gitlab_group)
|
||||
for group in groups:
|
||||
if group.full_path == gitlab_group:
|
||||
return group.id
|
||||
for group in groups:
|
||||
if group.path == gitlab_group or group.name == gitlab_group:
|
||||
return group.id
|
||||
|
||||
# get all members in a group
|
||||
def get_members_in_a_group(self, gitlab_group_id):
|
||||
|
||||
@@ -145,7 +145,16 @@ options:
|
||||
type: str
|
||||
choices: ["never", "always", "default_off", "default_on"]
|
||||
version_added: "3.4.0"
|
||||
|
||||
ci_config_path:
|
||||
description:
|
||||
- Custom path to the CI configuration file for this project.
|
||||
type: str
|
||||
version_added: "3.7.0"
|
||||
shared_runners_enabled:
|
||||
description:
|
||||
- Enable shared runners for this project.
|
||||
type: bool
|
||||
version_added: "3.7.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -252,6 +261,8 @@ class GitLabProject(object):
|
||||
'packages_enabled': options['packages_enabled'],
|
||||
'remove_source_branch_after_merge': options['remove_source_branch_after_merge'],
|
||||
'squash_option': options['squash_option'],
|
||||
'ci_config_path': options['ci_config_path'],
|
||||
'shared_runners_enabled': options['shared_runners_enabled'],
|
||||
}
|
||||
# Because we have already call userExists in main()
|
||||
if self.projectObject is None:
|
||||
@@ -364,6 +375,8 @@ def main():
|
||||
packages_enabled=dict(type='bool'),
|
||||
remove_source_branch_after_merge=dict(type='bool'),
|
||||
squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']),
|
||||
ci_config_path=dict(type='str'),
|
||||
shared_runners_enabled=dict(type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -402,6 +415,8 @@ def main():
|
||||
packages_enabled = module.params['packages_enabled']
|
||||
remove_source_branch_after_merge = module.params['remove_source_branch_after_merge']
|
||||
squash_option = module.params['squash_option']
|
||||
ci_config_path = module.params['ci_config_path']
|
||||
shared_runners_enabled = module.params['shared_runners_enabled']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -466,6 +481,8 @@ def main():
|
||||
"packages_enabled": packages_enabled,
|
||||
"remove_source_branch_after_merge": remove_source_branch_after_merge,
|
||||
"squash_option": squash_option,
|
||||
"ci_config_path": ci_config_path,
|
||||
"shared_runners_enabled": shared_runners_enabled,
|
||||
}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
|
||||
|
||||
@@ -48,20 +48,42 @@ options:
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- The name of the GitLab project the member is added to/removed from.
|
||||
- The name (or full path) of the GitLab project the member is added to/removed from.
|
||||
required: true
|
||||
type: str
|
||||
gitlab_user:
|
||||
description:
|
||||
- The username of the member to add to/remove from the GitLab project.
|
||||
required: true
|
||||
type: str
|
||||
- A username or a list of usernames to add to/remove from the GitLab project.
|
||||
- Mutually exclusive with I(gitlab_users_access).
|
||||
type: list
|
||||
elements: str
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
gitlab_users_access:
|
||||
description:
|
||||
- Provide a list of user to access level mappings.
|
||||
- Every dictionary in this list specifies a user (by username) and the access level the user should have.
|
||||
- Mutually exclusive with I(gitlab_user) and I(access_level).
|
||||
- Use together with I(purge_users) to remove all users not specified here from the project.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
description: A username or a list of usernames to add to/remove from the GitLab project.
|
||||
type: str
|
||||
required: true
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
required: true
|
||||
version_added: 3.7.0
|
||||
state:
|
||||
description:
|
||||
- State of the member in the project.
|
||||
@@ -70,6 +92,15 @@ options:
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
purge_users:
|
||||
description:
|
||||
- Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
|
||||
If omitted do not purge orphaned members.
|
||||
- Is only used when I(state=present).
|
||||
type: list
|
||||
elements: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
version_added: 3.7.0
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
'''
|
||||
@@ -93,6 +124,51 @@ EXAMPLES = r'''
|
||||
project: projectname
|
||||
gitlab_user: username
|
||||
state: absent
|
||||
|
||||
- name: Add a list of Users to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_project: projectname
|
||||
gitlab_user:
|
||||
- user1
|
||||
- user2
|
||||
access_level: developer
|
||||
state: present
|
||||
|
||||
- name: Add a list of Users with Dedicated Access Levels to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: present
|
||||
|
||||
- name: Add a user, remove all others which might be on this access level
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_user: username
|
||||
access_level: developer
|
||||
pruge_users: developer
|
||||
state: present
|
||||
|
||||
- name: Remove a list of Users with Dedicated Access Levels to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
@@ -118,9 +194,13 @@ class GitLabProjectMembers(object):
|
||||
self._gitlab = gl
|
||||
|
||||
def get_project(self, project_name):
|
||||
project_exists = self._gitlab.projects.list(search=project_name)
|
||||
if project_exists:
|
||||
return project_exists[0].id
|
||||
try:
|
||||
project_exists = self._gitlab.projects.get(project_name)
|
||||
return project_exists.id
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
project_exists = self._gitlab.projects.list(search=project_name)
|
||||
if project_exists:
|
||||
return project_exists[0].id
|
||||
|
||||
def get_user_id(self, gitlab_user):
|
||||
user_exists = self._gitlab.users.list(username=gitlab_user)
|
||||
@@ -132,6 +212,17 @@ class GitLabProjectMembers(object):
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
return project.members.list(all=True)
|
||||
|
||||
# get single member in a project by user name
|
||||
def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id):
|
||||
member = None
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
try:
|
||||
member = project.members.get(gitlab_user_id)
|
||||
if member:
|
||||
return member
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
return None
|
||||
|
||||
# check if the user is a member of the project
|
||||
def is_user_a_member(self, members, gitlab_user_id):
|
||||
for member in members:
|
||||
@@ -141,27 +232,14 @@ class GitLabProjectMembers(object):
|
||||
|
||||
# add user to a project
|
||||
def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level):
|
||||
try:
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
add_member = project.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
if add_member:
|
||||
return add_member.username
|
||||
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to add member to the project, project ID %s: %s" % (gitlab_project_id, e))
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
add_member = project.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
# remove user from a project
|
||||
def remove_user_from_project(self, gitlab_user_id, gitlab_project_id):
|
||||
try:
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
project.members.delete(gitlab_user_id)
|
||||
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to remove member from GitLab project, ID %s: %s" % (gitlab_project_id, e))
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
project.members.delete(gitlab_user_id)
|
||||
|
||||
# get user's access level
|
||||
def get_user_access_level(self, members, gitlab_user_id):
|
||||
@@ -173,12 +251,8 @@ class GitLabProjectMembers(object):
|
||||
def update_user_access_level(self, members, gitlab_user_id, access_level):
|
||||
for member in members:
|
||||
if member.id == gitlab_user_id:
|
||||
try:
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
|
||||
|
||||
def main():
|
||||
@@ -186,9 +260,20 @@ def main():
|
||||
argument_spec.update(dict(
|
||||
api_token=dict(type='str', required=True, no_log=True),
|
||||
project=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer'])
|
||||
access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']),
|
||||
purge_users=dict(type='list', elements='str', choices=[
|
||||
'guest', 'reporter', 'developer', 'maintainer']),
|
||||
gitlab_users_access=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
access_level=dict(type='str', choices=[
|
||||
'guest', 'reporter', 'developer', 'maintainer'], required=True),
|
||||
)
|
||||
),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -196,15 +281,19 @@ def main():
|
||||
mutually_exclusive=[
|
||||
['api_username', 'api_token'],
|
||||
['api_password', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
['access_level', 'gitlab_users_access'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password'],
|
||||
['gitlab_user', 'access_level'],
|
||||
],
|
||||
required_one_of=[
|
||||
['api_username', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
],
|
||||
required_if=[
|
||||
['state', 'present', ['access_level']],
|
||||
['state', 'present', ['access_level', 'gitlab_users_access'], True],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -212,71 +301,168 @@ def main():
|
||||
if not HAS_PY_GITLAB:
|
||||
module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
|
||||
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
}
|
||||
|
||||
gitlab_project = module.params['project']
|
||||
gitlab_user = module.params['gitlab_user']
|
||||
state = module.params['state']
|
||||
access_level = module.params['access_level']
|
||||
purge_users = module.params['purge_users']
|
||||
|
||||
# convert access level string input to int
|
||||
if access_level:
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS
|
||||
}
|
||||
|
||||
access_level = access_level_int[access_level]
|
||||
if purge_users:
|
||||
purge_users = [access_level_int[level] for level in purge_users]
|
||||
|
||||
# connect to gitlab server
|
||||
gl = gitlabAuthentication(module)
|
||||
|
||||
project = GitLabProjectMembers(module, gl)
|
||||
|
||||
gitlab_user_id = project.get_user_id(gitlab_user)
|
||||
gitlab_project_id = project.get_project(gitlab_project)
|
||||
|
||||
# project doesn't exist
|
||||
if not gitlab_project_id:
|
||||
module.fail_json(msg="project '%s' not found." % gitlab_project)
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the project" % gitlab_user)
|
||||
else:
|
||||
module.fail_json(msg="user '%s' not found." % gitlab_user)
|
||||
members = []
|
||||
if module.params['gitlab_user'] is not None:
|
||||
gitlab_users_access = []
|
||||
gitlab_users = module.params['gitlab_user']
|
||||
for gl_user in gitlab_users:
|
||||
gitlab_users_access.append(
|
||||
{'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None})
|
||||
elif module.params['gitlab_users_access'] is not None:
|
||||
gitlab_users_access = module.params['gitlab_users_access']
|
||||
for user_level in gitlab_users_access:
|
||||
user_level['access_level'] = access_level_int[user_level['access_level']]
|
||||
|
||||
members = project.get_members_in_a_project(gitlab_project_id)
|
||||
is_user_a_member = project.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the project
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the project
|
||||
if not module.check_mode:
|
||||
project.add_member_to_project(gitlab_user_id, gitlab_project_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully added user '%s' to the project." % gitlab_user)
|
||||
# state as absent
|
||||
else:
|
||||
module.exit_json(changed=False, result="User, '%s', is not a member in the project. No change to report" % gitlab_user)
|
||||
# in case that a user is a member
|
||||
if len(gitlab_users_access) == 1 and not purge_users:
|
||||
# only single user given
|
||||
members = [project.get_member_in_a_project(
|
||||
gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))]
|
||||
if members[0] is None:
|
||||
members = []
|
||||
elif len(gitlab_users_access) > 1 or purge_users:
|
||||
# list of users given
|
||||
members = project.get_members_in_a_project(gitlab_project_id)
|
||||
else:
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = project.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == access_level:
|
||||
module.exit_json(changed=False, result="User, '%s', is already a member in the project. No change to report" % gitlab_user)
|
||||
module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.",
|
||||
result_data=[])
|
||||
|
||||
changed = False
|
||||
error = False
|
||||
changed_users = []
|
||||
changed_data = []
|
||||
|
||||
for gitlab_user in gitlab_users_access:
|
||||
gitlab_user_id = project.get_user_id(gitlab_user['name'])
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
if not module.check_mode:
|
||||
project.update_user_access_level(members, gitlab_user_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
|
||||
error = True
|
||||
changed_users.append("user '%s' not found." % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "user '%s' not found." % gitlab_user['name']})
|
||||
continue
|
||||
|
||||
is_user_a_member = project.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the project
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the project
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully added user '%s' to project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully added user '%s' to project" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
# state as absent
|
||||
else:
|
||||
changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']})
|
||||
# in case that a user is a member
|
||||
else:
|
||||
# remove the user from the project
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(gitlab_user_id, gitlab_project_id)
|
||||
module.exit_json(changed=True, result="Successfully removed user, '%s', from the project" % gitlab_user)
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = project.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == gitlab_user['access_level']:
|
||||
changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabUpdateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
else:
|
||||
# remove the user from the project
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(gitlab_user_id, gitlab_project_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
# if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users
|
||||
if state == 'present' and purge_users:
|
||||
uppercase_names_in_gitlab_users_access = []
|
||||
for name in gitlab_users_access:
|
||||
uppercase_names_in_gitlab_users_access.append(name['name'].upper())
|
||||
|
||||
for member in members:
|
||||
if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access:
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(member.id, gitlab_project_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username)
|
||||
changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
if len(gitlab_users_access) == 1 and error:
|
||||
# if single user given and an error occurred return error for list errors will be per user
|
||||
module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data)
|
||||
elif error:
|
||||
module.fail_json(
|
||||
msg='FAILED: At least one given user/permission could not be set', result_data=changed_data)
|
||||
|
||||
module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com)
|
||||
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
|
||||
# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -38,6 +39,11 @@ options:
|
||||
description:
|
||||
- Your private token to interact with the GitLab API.
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- ID or full path of the project in the form of group/name.
|
||||
type: str
|
||||
version_added: '3.7.0'
|
||||
description:
|
||||
description:
|
||||
- The unique name of the runner.
|
||||
@@ -86,7 +92,7 @@ options:
|
||||
type: str
|
||||
maximum_timeout:
|
||||
description:
|
||||
- The maximum timeout that a runner has to pick up a specific job.
|
||||
- The maximum time that a runner has to complete a specific job.
|
||||
required: False
|
||||
default: 3600
|
||||
type: int
|
||||
@@ -131,6 +137,15 @@ EXAMPLES = '''
|
||||
description: Docker Machine t1
|
||||
owned: yes
|
||||
state: absent
|
||||
|
||||
- name: Register runner for a specific project
|
||||
community.general.gitlab_runner:
|
||||
api_url: https://gitlab.example.com/
|
||||
api_token: "{{ access_token }}"
|
||||
registration_token: 4gfdsg345
|
||||
description: MyProject runner
|
||||
state: present
|
||||
project: mygroup/mysubgroup/myproject
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -181,9 +196,13 @@ except NameError:
|
||||
|
||||
|
||||
class GitLabRunner(object):
|
||||
def __init__(self, module, gitlab_instance):
|
||||
def __init__(self, module, gitlab_instance, project=None):
|
||||
self._module = module
|
||||
self._gitlab = gitlab_instance
|
||||
# Whether to operate on GitLab-instance-wide or project-wide runners
|
||||
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774
|
||||
# for group runner token access
|
||||
self._runners_endpoint = project.runners if project else gitlab_instance.runners
|
||||
self.runnerObject = None
|
||||
|
||||
def createOrUpdateRunner(self, description, options):
|
||||
@@ -230,7 +249,7 @@ class GitLabRunner(object):
|
||||
return True
|
||||
|
||||
try:
|
||||
runner = self._gitlab.runners.create(arguments)
|
||||
runner = self._runners_endpoint.create(arguments)
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
|
||||
|
||||
@@ -265,19 +284,19 @@ class GitLabRunner(object):
|
||||
'''
|
||||
def findRunner(self, description, owned=False):
|
||||
if owned:
|
||||
runners = self._gitlab.runners.list(as_list=False)
|
||||
runners = self._runners_endpoint.list(as_list=False)
|
||||
else:
|
||||
runners = self._gitlab.runners.all(as_list=False)
|
||||
runners = self._runners_endpoint.all(as_list=False)
|
||||
|
||||
for runner in runners:
|
||||
# python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
|
||||
# object, so we need to handle both
|
||||
if hasattr(runner, "description"):
|
||||
if (runner.description == description):
|
||||
return self._gitlab.runners.get(runner.id)
|
||||
return self._runners_endpoint.get(runner.id)
|
||||
else:
|
||||
if (runner['description'] == description):
|
||||
return self._gitlab.runners.get(runner['id'])
|
||||
return self._runners_endpoint.get(runner['id'])
|
||||
|
||||
'''
|
||||
@param description Description of the runner
|
||||
@@ -313,6 +332,7 @@ def main():
|
||||
access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
|
||||
maximum_timeout=dict(type='int', default=3600),
|
||||
registration_token=dict(type='str', no_log=True),
|
||||
project=dict(type='str'),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
))
|
||||
|
||||
@@ -344,13 +364,20 @@ def main():
|
||||
access_level = module.params['access_level']
|
||||
maximum_timeout = module.params['maximum_timeout']
|
||||
registration_token = module.params['registration_token']
|
||||
project = module.params['project']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
|
||||
gitlab_instance = gitlabAuthentication(module)
|
||||
gitlab_project = None
|
||||
if project:
|
||||
try:
|
||||
gitlab_project = gitlab_instance.projects.get(project)
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
module.fail_json(msg='No such a project %s' % project, exception=to_native(e))
|
||||
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance)
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project)
|
||||
runner_exists = gitlab_runner.existsRunner(runner_description, owned)
|
||||
|
||||
if state == 'absent':
|
||||
|
||||
@@ -125,23 +125,16 @@ class ZPoolFacts(object):
|
||||
def __init__(self, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
self.name = module.params['name']
|
||||
self.parsable = module.params['parsable']
|
||||
self.properties = module.params['properties']
|
||||
|
||||
self._pools = defaultdict(dict)
|
||||
self.facts = []
|
||||
|
||||
def pool_exists(self):
|
||||
cmd = [self.module.get_bin_path('zpool'), 'list', self.name]
|
||||
|
||||
(rc, out, err) = self.module.run_command(cmd)
|
||||
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
rc, dummy, dummy = self.module.run_command(cmd)
|
||||
return rc == 0
|
||||
|
||||
def get_facts(self):
|
||||
cmd = [self.module.get_bin_path('zpool'), 'get', '-H']
|
||||
@@ -153,41 +146,36 @@ class ZPoolFacts(object):
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
(rc, out, err) = self.module.run_command(cmd)
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if rc == 0:
|
||||
for line in out.splitlines():
|
||||
pool, property, value = line.split('\t')
|
||||
for line in out.splitlines():
|
||||
pool, prop, value = line.split('\t')
|
||||
|
||||
self._pools[pool].update({property: value})
|
||||
self._pools[pool].update({prop: value})
|
||||
|
||||
for k, v in iteritems(self._pools):
|
||||
v.update({'name': k})
|
||||
self.facts.append(v)
|
||||
for k, v in iteritems(self._pools):
|
||||
v.update({'name': k})
|
||||
self.facts.append(v)
|
||||
|
||||
return {'ansible_zfs_pools': self.facts}
|
||||
else:
|
||||
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
return {'ansible_zfs_pools': self.facts}
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
|
||||
parsable=dict(required=False, default=False, type='bool'),
|
||||
properties=dict(required=False, default='all', type='str'),
|
||||
name=dict(aliases=['pool', 'zpool'], type='str'),
|
||||
parsable=dict(default=False, type='bool'),
|
||||
properties=dict(default='all', type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
zpool_facts = ZPoolFacts(module)
|
||||
|
||||
result = {}
|
||||
result['changed'] = False
|
||||
result['name'] = zpool_facts.name
|
||||
|
||||
result = {
|
||||
'changed': False,
|
||||
'name': zpool_facts.name,
|
||||
}
|
||||
if zpool_facts.parsable:
|
||||
result['parsable'] = zpool_facts.parsable
|
||||
|
||||
|
||||
@@ -148,57 +148,48 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
|
||||
|
||||
def lineDict(line):
|
||||
def line_dict(line):
|
||||
return {'line': line, 'line_type': 'unknown'}
|
||||
|
||||
|
||||
def optionDict(line, iface, option, value, address_family):
|
||||
def make_option_dict(line, iface, option, value, address_family):
|
||||
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
|
||||
|
||||
|
||||
def getValueFromLine(s):
|
||||
spaceRe = re.compile(r'\s+')
|
||||
for m in spaceRe.finditer(s):
|
||||
pass
|
||||
valueEnd = m.start()
|
||||
option = s.split()[0]
|
||||
optionStart = s.find(option)
|
||||
optionLen = len(option)
|
||||
valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
|
||||
return s[valueStart:valueEnd]
|
||||
def get_option_value(line):
|
||||
patt = re.compile(r'^\s+(?P<option>\S+)\s+(?P<value>\S?.*\S)\s*$')
|
||||
match = patt.match(line)
|
||||
if not match:
|
||||
return None, None
|
||||
return match.group("option"), match.group("value")
|
||||
|
||||
|
||||
def read_interfaces_file(module, filename):
|
||||
f = open(filename, 'r')
|
||||
return read_interfaces_lines(module, f)
|
||||
with open(filename, 'r') as f:
|
||||
return read_interfaces_lines(module, f)
|
||||
|
||||
|
||||
def _is_line_processing_none(first_word):
|
||||
return first_word in ("source", "source-dir", "source-directory", "auto", "no-auto-down", "no-scripts") or first_word.startswith("auto-")
|
||||
|
||||
|
||||
def read_interfaces_lines(module, line_strings):
|
||||
lines = []
|
||||
ifaces = {}
|
||||
iface_name = None
|
||||
address_family = None
|
||||
currif = {}
|
||||
currently_processing = None
|
||||
i = 0
|
||||
for line in line_strings:
|
||||
i += 1
|
||||
for i, line in enumerate(line_strings):
|
||||
words = line.split()
|
||||
if len(words) < 1:
|
||||
lines.append(lineDict(line))
|
||||
continue
|
||||
if words[0][0] == "#":
|
||||
lines.append(lineDict(line))
|
||||
if not words or words[0].startswith("#"):
|
||||
lines.append(line_dict(line))
|
||||
continue
|
||||
if words[0] == "mapping":
|
||||
# currmap = calloc(1, sizeof *currmap);
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
currently_processing = "MAPPING"
|
||||
elif words[0] == "source":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-dir":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-directory":
|
||||
lines.append(lineDict(line))
|
||||
elif _is_line_processing_none(words[0]):
|
||||
lines.append(line_dict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "iface":
|
||||
currif = {
|
||||
@@ -221,39 +212,26 @@ def read_interfaces_lines(module, line_strings):
|
||||
ifaces[iface_name] = currif
|
||||
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
|
||||
currently_processing = "IFACE"
|
||||
elif words[0] == "auto":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0].startswith("allow-"):
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-auto-down":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-scripts":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
else:
|
||||
if currently_processing == "IFACE":
|
||||
option_name = words[0]
|
||||
option_name, value = get_option_value(line)
|
||||
# TODO: if option_name in currif.options
|
||||
value = getValueFromLine(line)
|
||||
lines.append(optionDict(line, iface_name, option_name, value, address_family))
|
||||
lines.append(make_option_dict(line, iface_name, option_name, value, address_family))
|
||||
if option_name in ["pre-up", "up", "down", "post-up"]:
|
||||
currif[option_name].append(value)
|
||||
else:
|
||||
currif[option_name] = value
|
||||
elif currently_processing == "MAPPING":
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
elif currently_processing == "NONE":
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
else:
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
|
||||
return None, None
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i + 1))
|
||||
|
||||
return lines, ifaces
|
||||
|
||||
|
||||
def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
|
||||
def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None):
|
||||
value = str(raw_value)
|
||||
changed = False
|
||||
|
||||
@@ -262,57 +240,54 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state, address_f
|
||||
iface_lines = [item for item in iface_lines
|
||||
if "address_family" in item and item["address_family"] == address_family]
|
||||
|
||||
if len(iface_lines) < 1:
|
||||
if not iface_lines:
|
||||
# interface not found
|
||||
module.fail_json(msg="Error: interface %s not found" % iface)
|
||||
return changed, None
|
||||
|
||||
iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
|
||||
target_options = list(filter(lambda i: i['option'] == option, iface_options))
|
||||
iface_options = [il for il in iface_lines if il['line_type'] == 'option']
|
||||
target_options = [io for io in iface_options if io['option'] == option]
|
||||
|
||||
if state == "present":
|
||||
if len(target_options) < 1:
|
||||
changed = True
|
||||
if not target_options:
|
||||
# add new option
|
||||
last_line_dict = iface_lines[-1]
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
else:
|
||||
if option in ["pre-up", "up", "down", "post-up"]:
|
||||
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
else:
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
address_family = target_option['address_family']
|
||||
prefix_start = old_line.find(option)
|
||||
optionLen = len(option)
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
|
||||
start = old_value_position.start() + prefix_start + optionLen
|
||||
end = old_value_position.end() + prefix_start + optionLen
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = optionDict(line, iface, option, value, address_family)
|
||||
elif state == "absent":
|
||||
if len(target_options) >= 1:
|
||||
return add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
|
||||
if option in ["pre-up", "up", "down", "post-up"] and all(ito for ito in target_options if ito['value'] != value):
|
||||
return add_option_after_line(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
address_family = target_option['address_family']
|
||||
prefix_start = old_line.find(option)
|
||||
option_len = len(option)
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + option_len:])
|
||||
start = old_value_position.start() + prefix_start + option_len
|
||||
end = old_value_position.end() + prefix_start + option_len
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = make_option_dict(line, iface, option, value, address_family)
|
||||
return changed, lines
|
||||
|
||||
if state == "absent":
|
||||
if target_options:
|
||||
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
|
||||
for target_option in filter(lambda i: i['value'] == value, target_options):
|
||||
for target_option in [ito for ito in target_options if ito['value'] == value]:
|
||||
changed = True
|
||||
lines = list(filter(lambda ln: ln != target_option, lines))
|
||||
lines = [ln for ln in lines if ln != target_option]
|
||||
else:
|
||||
changed = True
|
||||
for target_option in target_options:
|
||||
lines = list(filter(lambda ln: ln != target_option, lines))
|
||||
else:
|
||||
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
||||
lines = [ln for ln in lines if ln != target_option]
|
||||
|
||||
return changed, lines
|
||||
|
||||
|
||||
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
def add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
# Changing method of interface is not an addition
|
||||
if option == 'method':
|
||||
changed = False
|
||||
@@ -328,23 +303,21 @@ def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_option
|
||||
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
|
||||
prefix = last_line[:prefix_start]
|
||||
|
||||
if len(iface_options) < 1:
|
||||
if not iface_options:
|
||||
# interface has no options, ident
|
||||
prefix += " "
|
||||
|
||||
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
|
||||
option_dict = optionDict(line, iface, option, value, address_family)
|
||||
option_dict = make_option_dict(line, iface, option, value, address_family)
|
||||
index = len(lines) - lines[::-1].index(last_line_dict)
|
||||
lines.insert(index, option_dict)
|
||||
return True, lines
|
||||
|
||||
|
||||
def write_changes(module, lines, dest):
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
f = os.fdopen(tmpfd, 'wb')
|
||||
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
|
||||
f.close()
|
||||
with os.fdopen(tmpfd, 'wb') as f:
|
||||
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
|
||||
module.atomic_move(tmpfile, os.path.realpath(dest))
|
||||
|
||||
|
||||
@@ -382,7 +355,7 @@ def main():
|
||||
changed = False
|
||||
|
||||
if option is not None:
|
||||
changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
|
||||
changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family)
|
||||
|
||||
if changed:
|
||||
dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Alexei Znamensky (@russoz) <russoz@gmail.com>
|
||||
# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -32,6 +33,7 @@ options:
|
||||
description:
|
||||
- If specified, use this blacklist file instead of
|
||||
C(/etc/modprobe.d/blacklist-ansible.conf).
|
||||
default: /etc/modprobe.d/blacklist-ansible.conf
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -43,110 +45,73 @@ EXAMPLES = '''
|
||||
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
|
||||
|
||||
|
||||
class Blacklist(object):
|
||||
def __init__(self, module, filename, checkmode):
|
||||
self.filename = filename
|
||||
self.module = module
|
||||
self.checkmode = checkmode
|
||||
|
||||
def create_file(self):
|
||||
if not self.checkmode and not os.path.exists(self.filename):
|
||||
open(self.filename, 'a').close()
|
||||
return True
|
||||
elif self.checkmode and not os.path.exists(self.filename):
|
||||
self.filename = os.devnull
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_pattern(self):
|
||||
return r'^blacklist\s*' + self.module + '$'
|
||||
|
||||
def readlines(self):
|
||||
f = open(self.filename, 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
return lines
|
||||
|
||||
def module_listed(self):
|
||||
lines = self.readlines()
|
||||
pattern = self.get_pattern()
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('#'):
|
||||
continue
|
||||
|
||||
if re.match(pattern, stripped):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def remove_module(self):
|
||||
lines = self.readlines()
|
||||
pattern = self.get_pattern()
|
||||
|
||||
if self.checkmode:
|
||||
f = open(os.devnull, 'w')
|
||||
else:
|
||||
f = open(self.filename, 'w')
|
||||
|
||||
for line in lines:
|
||||
if not re.match(pattern, line.strip()):
|
||||
f.write(line)
|
||||
|
||||
f.close()
|
||||
|
||||
def add_module(self):
|
||||
if self.checkmode:
|
||||
f = open(os.devnull, 'a')
|
||||
else:
|
||||
f = open(self.filename, 'a')
|
||||
|
||||
f.write('blacklist %s\n' % self.module)
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
class Blacklist(StateModuleHelper):
|
||||
output_params = ('name', 'state')
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
blacklist_file=dict(type='str')
|
||||
blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
args = dict(changed=False, failed=False,
|
||||
name=module.params['name'], state=module.params['state'])
|
||||
def __init_module__(self):
|
||||
self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
|
||||
self.vars.filename = self.vars.blacklist_file
|
||||
self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)
|
||||
if not self.vars.file_exists:
|
||||
with open(self.vars.filename, 'a'):
|
||||
pass
|
||||
self.vars.file_exists = True
|
||||
self.vars.set('lines', [], change=True, diff=True)
|
||||
else:
|
||||
with open(self.vars.filename) as fd:
|
||||
self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)
|
||||
self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)
|
||||
|
||||
filename = '/etc/modprobe.d/blacklist-ansible.conf'
|
||||
def _is_module_blocked(self):
|
||||
for line in self.vars.lines:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('#'):
|
||||
continue
|
||||
if self.pattern.match(stripped):
|
||||
return True
|
||||
return False
|
||||
|
||||
if module.params['blacklist_file']:
|
||||
filename = module.params['blacklist_file']
|
||||
def state_absent(self):
|
||||
if not self.vars.is_blacklisted:
|
||||
return
|
||||
self.vars.is_blacklisted = False
|
||||
self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]
|
||||
|
||||
blacklist = Blacklist(args['name'], filename, module.check_mode)
|
||||
def state_present(self):
|
||||
if self.vars.is_blacklisted:
|
||||
return
|
||||
self.vars.is_blacklisted = True
|
||||
self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]
|
||||
|
||||
if blacklist.create_file():
|
||||
args['changed'] = True
|
||||
else:
|
||||
args['changed'] = False
|
||||
def __quit_module__(self):
|
||||
if self.has_changed() and not self.module.check_mode:
|
||||
dummy, tmpfile = tempfile.mkstemp()
|
||||
try:
|
||||
os.remove(tmpfile)
|
||||
self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
|
||||
with open(tmpfile, 'w') as fd:
|
||||
fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
|
||||
self.module.atomic_move(tmpfile, self.vars.filename)
|
||||
finally:
|
||||
if os.path.exists(tmpfile):
|
||||
os.remove(tmpfile)
|
||||
|
||||
if blacklist.module_listed():
|
||||
if args['state'] == 'absent':
|
||||
blacklist.remove_module()
|
||||
args['changed'] = True
|
||||
else:
|
||||
if args['state'] == 'present':
|
||||
blacklist.add_module()
|
||||
args['changed'] = True
|
||||
|
||||
module.exit_json(**args)
|
||||
def main():
|
||||
Blacklist.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -451,7 +451,8 @@ def main():
|
||||
if this_lv is None:
|
||||
if state == 'present':
|
||||
if size_operator is not None:
|
||||
module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size))
|
||||
if size_operator == "-" or (size_whole not in ["VG", "PVS", "FREE", "ORIGIN", None]):
|
||||
module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size))
|
||||
# Require size argument except for snapshot of thin volumes
|
||||
if (lv or thinpool) and not size:
|
||||
for test_lv in lvs:
|
||||
|
||||
@@ -41,17 +41,27 @@ options:
|
||||
aliases: [ state ]
|
||||
node_auth:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.authmethod).
|
||||
- The value for C(node.session.auth.authmethod).
|
||||
type: str
|
||||
default: CHAP
|
||||
node_user:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.username).
|
||||
- The value for C(node.session.auth.username).
|
||||
type: str
|
||||
node_pass:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.password).
|
||||
- The value for C(node.session.auth.password).
|
||||
type: str
|
||||
node_user_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.username_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
node_pass_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.password_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
auto_node_startup:
|
||||
description:
|
||||
- Whether the target node should be automatically connected at startup.
|
||||
@@ -191,6 +201,8 @@ def target_login(module, target, portal=None, port=None):
|
||||
node_auth = module.params['node_auth']
|
||||
node_user = module.params['node_user']
|
||||
node_pass = module.params['node_pass']
|
||||
node_user_in = module.params['node_user_in']
|
||||
node_pass_in = module.params['node_pass_in']
|
||||
|
||||
if node_user:
|
||||
params = [('node.session.auth.authmethod', node_auth),
|
||||
@@ -200,6 +212,13 @@ def target_login(module, target, portal=None, port=None):
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value]
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
if node_user_in:
|
||||
params = [('node.session.auth.username_in', node_user_in),
|
||||
('node.session.auth.password_in', node_pass_in)]
|
||||
for (name, value) in params:
|
||||
cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login']
|
||||
if portal is not None and port is not None:
|
||||
cmd.append('--portal')
|
||||
@@ -277,6 +296,8 @@ def main():
|
||||
node_auth=dict(type='str', default='CHAP'),
|
||||
node_user=dict(type='str'),
|
||||
node_pass=dict(type='str', no_log=True),
|
||||
node_user_in=dict(type='str'),
|
||||
node_pass_in=dict(type='str', no_log=True),
|
||||
|
||||
# actions
|
||||
login=dict(type='bool', aliases=['state']),
|
||||
@@ -286,7 +307,7 @@ def main():
|
||||
show_nodes=dict(type='bool', default=False),
|
||||
),
|
||||
|
||||
required_together=[['node_user', 'node_pass']],
|
||||
required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']],
|
||||
required_if=[('discover', True, ['portal'])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
@@ -54,9 +54,12 @@ pids:
|
||||
sample: [100,200]
|
||||
'''
|
||||
|
||||
import abc
|
||||
import re
|
||||
from distutils.version import LooseVersion
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils import six
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
@@ -68,6 +71,100 @@ except ImportError:
|
||||
HAS_PSUTIL = False
|
||||
|
||||
|
||||
class PSAdapterError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PSAdapter(object):
|
||||
NAME_ATTRS = ('name', 'cmdline')
|
||||
PATTERN_ATTRS = ('name', 'exe', 'cmdline')
|
||||
|
||||
def __init__(self, psutil):
|
||||
self._psutil = psutil
|
||||
|
||||
@staticmethod
|
||||
def from_package(psutil):
|
||||
version = LooseVersion(psutil.__version__)
|
||||
if version < LooseVersion('2.0.0'):
|
||||
return PSAdapter100(psutil)
|
||||
elif version < LooseVersion('5.3.0'):
|
||||
return PSAdapter200(psutil)
|
||||
else:
|
||||
return PSAdapter530(psutil)
|
||||
|
||||
def get_pids_by_name(self, name):
|
||||
return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)]
|
||||
|
||||
def _process_iter(self, *attrs):
|
||||
return self._psutil.process_iter()
|
||||
|
||||
def _has_name(self, proc, name):
|
||||
attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS)
|
||||
return (compare_lower(attributes['name'], name) or
|
||||
attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name))
|
||||
|
||||
def _get_proc_attributes(self, proc, *attributes):
|
||||
return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes)
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
pass
|
||||
|
||||
def get_pids_by_pattern(self, pattern, ignore_case):
|
||||
flags = 0
|
||||
if ignore_case:
|
||||
flags |= re.I
|
||||
|
||||
try:
|
||||
regex = re.compile(pattern, flags)
|
||||
except re.error as e:
|
||||
raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e)))
|
||||
|
||||
return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)]
|
||||
|
||||
def _matches_regex(self, proc, regex):
|
||||
# See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information
|
||||
attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS)
|
||||
matches_name = regex.search(to_native(attributes['name']))
|
||||
matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe'])))
|
||||
matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline'])))
|
||||
|
||||
return any([matches_name, matches_exe, matches_cmd])
|
||||
|
||||
|
||||
class PSAdapter100(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter100, self).__init__(psutil)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
return getattr(proc, attribute)
|
||||
|
||||
|
||||
class PSAdapter200(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter200, self).__init__(psutil)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
method = getattr(proc, attribute)
|
||||
return method()
|
||||
|
||||
|
||||
class PSAdapter530(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter530, self).__init__(psutil)
|
||||
|
||||
def _process_iter(self, *attrs):
|
||||
return self._psutil.process_iter(attrs=attrs)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
return proc.info[attribute]
|
||||
|
||||
|
||||
def compare_lower(a, b):
|
||||
if a is None or b is None:
|
||||
# this could just be "return False" but would lead to surprising behavior if both a and b are None
|
||||
@@ -76,38 +173,36 @@ def compare_lower(a, b):
|
||||
return a.lower() == b.lower()
|
||||
|
||||
|
||||
def get_pid(name):
|
||||
pids = []
|
||||
class Pids(object):
|
||||
def __init__(self, module):
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib('psutil'))
|
||||
|
||||
try:
|
||||
for proc in psutil.process_iter(attrs=['name', 'cmdline']):
|
||||
if compare_lower(proc.info['name'], name) or \
|
||||
proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
|
||||
pids.append(proc.pid)
|
||||
except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given)
|
||||
for proc in psutil.process_iter():
|
||||
try: # EL7
|
||||
proc_name, proc_cmdline = proc.name(), proc.cmdline()
|
||||
except TypeError: # EL6: 'str' object is not callable
|
||||
proc_name, proc_cmdline = proc.name, proc.cmdline
|
||||
if compare_lower(proc_name, name) or \
|
||||
proc_cmdline and compare_lower(proc_cmdline[0], name):
|
||||
pids.append(proc.pid)
|
||||
return pids
|
||||
self._ps = PSAdapter.from_package(psutil)
|
||||
|
||||
self._module = module
|
||||
self._name = module.params['name']
|
||||
self._pattern = module.params['pattern']
|
||||
self._ignore_case = module.params['ignore_case']
|
||||
|
||||
def get_matching_command_pids(pattern, ignore_case):
|
||||
flags = 0
|
||||
if ignore_case:
|
||||
flags |= re.I
|
||||
self._pids = []
|
||||
|
||||
regex = re.compile(pattern, flags)
|
||||
# See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information
|
||||
return [p.pid for p in psutil.process_iter(["name", "exe", "cmdline"])
|
||||
if regex.search(to_native(p.info["name"]))
|
||||
or (p.info["exe"] and regex.search(basename(to_native(p.info["exe"]))))
|
||||
or (p.info["cmdline"] and regex.search(to_native(' '.join(p.cmdline()))))
|
||||
]
|
||||
def execute(self):
|
||||
if self._name:
|
||||
self._pids = self._ps.get_pids_by_name(self._name)
|
||||
else:
|
||||
try:
|
||||
self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case)
|
||||
except PSAdapterError as e:
|
||||
self._module.fail_json(msg=to_native(e))
|
||||
|
||||
return self._module.exit_json(**self.result)
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return {
|
||||
'pids': self._pids,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
@@ -126,22 +221,7 @@ def main():
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib('psutil'))
|
||||
|
||||
name = module.params["name"]
|
||||
pattern = module.params["pattern"]
|
||||
ignore_case = module.params["ignore_case"]
|
||||
|
||||
if name:
|
||||
response = dict(pids=get_pid(name))
|
||||
else:
|
||||
try:
|
||||
response = dict(pids=get_matching_command_pids(pattern, ignore_case))
|
||||
except re.error as e:
|
||||
module.fail_json(msg="'%s' is not a valid regular expression: %s" % (pattern, to_native(e)))
|
||||
|
||||
module.exit_json(**response)
|
||||
Pids(module).execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -54,6 +54,8 @@ options:
|
||||
description:
|
||||
- Insert the corresponding rule as rule number NUM.
|
||||
- Note that ufw numbers rules starting with 1.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: int
|
||||
insert_relative_to:
|
||||
description:
|
||||
@@ -120,6 +122,8 @@ options:
|
||||
delete:
|
||||
description:
|
||||
- Delete rule.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: bool
|
||||
default: false
|
||||
interface:
|
||||
@@ -511,12 +515,12 @@ def main():
|
||||
'interface_in and interface_out')
|
||||
# Rules are constructed according to the long format
|
||||
#
|
||||
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
|
||||
# [proto protocol] [app application] [comment COMMENT]
|
||||
cmd.append([module.boolean(params['route']), 'route'])
|
||||
cmd.append([module.boolean(params['delete']), 'delete'])
|
||||
if params['insert'] is not None:
|
||||
if params['insert'] is not None and not params['delete']:
|
||||
relative_to_cmd = params['insert_relative_to']
|
||||
if relative_to_cmd == 'zero':
|
||||
insert_to = params['insert']
|
||||
|
||||
@@ -62,7 +62,7 @@ options:
|
||||
clear:
|
||||
description:
|
||||
- Clear the existing files before trying to copy or link the original file.
|
||||
- Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
|
||||
- Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically.
|
||||
required: false
|
||||
default: no
|
||||
type: bool
|
||||
@@ -109,9 +109,9 @@ options:
|
||||
required: false
|
||||
aliases: [test_runner]
|
||||
notes:
|
||||
- C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
|
||||
- C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter
|
||||
is specified.
|
||||
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
|
||||
- This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already
|
||||
exist at the given location.
|
||||
- This module assumes English error messages for the C(createcachetable) command to detect table existence,
|
||||
unfortunately.
|
||||
@@ -306,7 +306,10 @@ def main():
|
||||
# these params always get tacked on the end of the command
|
||||
for param in end_of_command_params:
|
||||
if module.params[param]:
|
||||
run_cmd_args.append(module.params[param])
|
||||
if param in ('fixtures', 'apps'):
|
||||
run_cmd_args.extend(shlex.split(module.params[param]))
|
||||
else:
|
||||
run_cmd_args.append(module.params[param])
|
||||
|
||||
rc, out, err = module.run_command(run_cmd_args, cwd=project_path)
|
||||
if rc != 0:
|
||||
|
||||
@@ -142,7 +142,7 @@ def main():
|
||||
# Clean up old failed deployment
|
||||
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
|
||||
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
if is_failed(deploy_path, deployment):
|
||||
@@ -153,7 +153,7 @@ def main():
|
||||
if state == 'present' and deployed:
|
||||
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
|
||||
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
deployed = False
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
|
||||
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rundeck_job_executions_info
|
||||
short_description: Query executions for a Rundeck job
|
||||
description:
|
||||
- This module gets the list of executions for a specified Rundeck job.
|
||||
author: "Phillipe Smith (@phsmith)"
|
||||
version_added: 3.8.0
|
||||
options:
|
||||
job_id:
|
||||
type: str
|
||||
description:
|
||||
- The job unique ID.
|
||||
required: true
|
||||
status:
|
||||
type: str
|
||||
description:
|
||||
- The job status to filter.
|
||||
choices: [succeeded, failed, aborted, running]
|
||||
max:
|
||||
type: int
|
||||
description:
|
||||
- Max results to return.
|
||||
default: 20
|
||||
offset:
|
||||
type: int
|
||||
description:
|
||||
- The start point to return the results.
|
||||
default: 0
|
||||
extends_documentation_fragment:
|
||||
- community.general.rundeck
|
||||
- url
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Rundeck job executions info
|
||||
community.general.rundeck_job_executions_info:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
register: rundeck_job_executions_info
|
||||
|
||||
- name: Show Rundeck job executions info
|
||||
ansible.builtin.debug:
|
||||
var: rundeck_job_executions_info.executions
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
paging:
|
||||
description: Results pagination info.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
count:
|
||||
description: Number of results in the response.
|
||||
type: int
|
||||
returned: success
|
||||
total:
|
||||
description: Total number of results.
|
||||
type: int
|
||||
returned: success
|
||||
offset:
|
||||
description: Offset from first of all results.
|
||||
type: int
|
||||
returned: success
|
||||
max:
|
||||
description: Maximum number of results per page.
|
||||
type: int
|
||||
returned: success
|
||||
sample: {
|
||||
"count": 20,
|
||||
"total": 100,
|
||||
"offset": 0,
|
||||
"max": 20
|
||||
}
|
||||
executions:
|
||||
description: Job executions list.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"id": 1,
|
||||
"href": "https://rundeck.example.org/api/39/execution/1",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
|
||||
"status": "succeeded",
|
||||
"project": "myproject",
|
||||
"executionType": "user",
|
||||
"user": "admin",
|
||||
"date-started": {
|
||||
"unixtime": 1633525515026,
|
||||
"date": "2021-10-06T13:05:15Z"
|
||||
},
|
||||
"date-ended": {
|
||||
"unixtime": 1633525518386,
|
||||
"date": "2021-10-06T13:05:18Z"
|
||||
},
|
||||
"job": {
|
||||
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"averageDuration": 6381,
|
||||
"name": "Test",
|
||||
"group": "",
|
||||
"project": "myproject",
|
||||
"description": "",
|
||||
"options": {
|
||||
"exit_code": "0"
|
||||
},
|
||||
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
|
||||
},
|
||||
"description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
|
||||
"argstring": "-exit_code 0",
|
||||
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
# Modules import
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request
|
||||
)
|
||||
|
||||
|
||||
class RundeckJobExecutionsInfo(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = self.module.params["url"]
|
||||
self.api_version = self.module.params["api_version"]
|
||||
self.job_id = self.module.params["job_id"]
|
||||
self.offset = self.module.params["offset"]
|
||||
self.max = self.module.params["max"]
|
||||
self.status = self.module.params["status"] or ""
|
||||
|
||||
def job_executions(self):
|
||||
response, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="job/%s/executions?offset=%s&max=%s&status=%s"
|
||||
% (quote(self.job_id), self.offset, self.max, self.status),
|
||||
method="GET"
|
||||
)
|
||||
|
||||
if info["status"] != 200:
|
||||
self.module.fail_json(
|
||||
msg=info["msg"],
|
||||
executions=response
|
||||
)
|
||||
|
||||
self.module.exit_json(msg="Executions info result", **response)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
job_id=dict(required=True, type="str"),
|
||||
offset=dict(type="int", default=0),
|
||||
max=dict(type="int", default=20),
|
||||
status=dict(
|
||||
type="str",
|
||||
choices=["succeeded", "failed", "aborted", "running"]
|
||||
)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if module.params["api_version"] < 14:
|
||||
module.fail_json(msg="API version should be at least 14")
|
||||
|
||||
rundeck = RundeckJobExecutionsInfo(module)
|
||||
rundeck.job_executions()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
317
plugins/modules/web_infrastructure/rundeck_job_run.py
Normal file
317
plugins/modules/web_infrastructure/rundeck_job_run.py
Normal file
@@ -0,0 +1,317 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rundeck_job_run
|
||||
short_description: Run a Rundeck job
|
||||
description:
|
||||
- This module runs a Rundeck job specified by ID.
|
||||
author: "Phillipe Smith (@phsmith)"
|
||||
version_added: 3.8.0
|
||||
options:
|
||||
job_id:
|
||||
type: str
|
||||
description:
|
||||
- The job unique ID.
|
||||
required: true
|
||||
job_options:
|
||||
type: dict
|
||||
description:
|
||||
- The job options for the steps.
|
||||
- Numeric values must be quoted.
|
||||
filter_nodes:
|
||||
type: str
|
||||
description:
|
||||
- Filter the nodes where the jobs must run.
|
||||
- See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax).
|
||||
run_at_time:
|
||||
type: str
|
||||
description:
|
||||
- Schedule the job execution to run at specific date and time.
|
||||
- ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00).
|
||||
loglevel:
|
||||
type: str
|
||||
description:
|
||||
- Log level configuration.
|
||||
choices: [debug, verbose, info, warn, error]
|
||||
default: info
|
||||
wait_execution:
|
||||
type: bool
|
||||
description:
|
||||
- Wait until the job finished the execution.
|
||||
default: true
|
||||
wait_execution_delay:
|
||||
type: int
|
||||
description:
|
||||
- Delay, in seconds, between job execution status check requests.
|
||||
default: 5
|
||||
wait_execution_timeout:
|
||||
type: int
|
||||
description:
|
||||
- Job execution wait timeout in seconds.
|
||||
- If the timeout is reached, the job will be aborted.
|
||||
- Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check.
|
||||
default: 120
|
||||
abort_on_timeout:
|
||||
type: bool
|
||||
description:
|
||||
- Send a job abort request if exceeded the I(wait_execution_timeout) specified.
|
||||
default: false
|
||||
extends_documentation_fragment:
|
||||
- community.general.rundeck
|
||||
- url
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Run a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Show execution info
|
||||
ansible.builtin.debug:
|
||||
var: rundeck_job_run.execution_info
|
||||
|
||||
- name: Run a Rundeck job with options
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
job_options:
|
||||
option_1: "value_1"
|
||||
option_2: "value_3"
|
||||
option_3: "value_3"
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Run a Rundeck job with timeout, delay between status check and abort on timeout
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
wait_execution_timeout: 30
|
||||
wait_execution_delay: 10
|
||||
abort_on_timeout: true
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Schedule a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
run_at_time: "2021-10-05T15:45:00-03:00"
|
||||
register: rundeck_job_schedule
|
||||
|
||||
- name: Fire-and-forget a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
wait_execution: false
|
||||
register: rundeck_job_run
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
execution_info:
|
||||
description: Rundeck job execution metadata.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"msg": "Job execution succeeded!",
|
||||
"execution_info": {
|
||||
"id": 1,
|
||||
"href": "https://rundeck.example.org/api/39/execution/1",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
|
||||
"status": "succeeded",
|
||||
"project": "myproject",
|
||||
"executionType": "user",
|
||||
"user": "admin",
|
||||
"date-started": {
|
||||
"unixtime": 1633449020784,
|
||||
"date": "2021-10-05T15:50:20Z"
|
||||
},
|
||||
"date-ended": {
|
||||
"unixtime": 1633449026358,
|
||||
"date": "2021-10-05T15:50:26Z"
|
||||
},
|
||||
"job": {
|
||||
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"averageDuration": 4917,
|
||||
"name": "Test",
|
||||
"group": "",
|
||||
"project": "myproject",
|
||||
"description": "",
|
||||
"options": {
|
||||
"exit_code": "0"
|
||||
},
|
||||
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
|
||||
},
|
||||
"description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
|
||||
"argstring": "-exit_code 0",
|
||||
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
|
||||
"successfulNodes": [
|
||||
"localhost"
|
||||
],
|
||||
"output": "Test!"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
# Modules import
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from time import sleep
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request
|
||||
)
|
||||
|
||||
|
||||
class RundeckJobRun(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = self.module.params["url"]
|
||||
self.api_version = self.module.params["api_version"]
|
||||
self.job_id = self.module.params["job_id"]
|
||||
self.job_options = self.module.params["job_options"] or {}
|
||||
self.filter_nodes = self.module.params["filter_nodes"] or ""
|
||||
self.run_at_time = self.module.params["run_at_time"] or ""
|
||||
self.loglevel = self.module.params["loglevel"].upper()
|
||||
self.wait_execution = self.module.params['wait_execution']
|
||||
self.wait_execution_delay = self.module.params['wait_execution_delay']
|
||||
self.wait_execution_timeout = self.module.params['wait_execution_timeout']
|
||||
self.abort_on_timeout = self.module.params['abort_on_timeout']
|
||||
|
||||
for k, v in self.job_options.items():
|
||||
if not isinstance(v, str):
|
||||
self.module.exit_json(
|
||||
msg="Job option '%s' value must be a string" % k,
|
||||
execution_info={}
|
||||
)
|
||||
|
||||
def job_status_check(self, execution_id):
|
||||
response = dict()
|
||||
timeout = False
|
||||
due = datetime.now() + timedelta(seconds=self.wait_execution_timeout)
|
||||
|
||||
while not timeout:
|
||||
endpoint = "execution/%d" % execution_id
|
||||
response = api_request(module=self.module, endpoint=endpoint)[0]
|
||||
output = api_request(module=self.module,
|
||||
endpoint="execution/%d/output" % execution_id)
|
||||
log_output = "\n".join([x["log"] for x in output[0]["entries"]])
|
||||
response.update({"output": log_output})
|
||||
|
||||
if response["status"] == "aborted":
|
||||
break
|
||||
elif response["status"] == "scheduled":
|
||||
self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time,
|
||||
execution_info=response,
|
||||
changed=True)
|
||||
elif response["status"] == "failed":
|
||||
self.module.fail_json(msg="Job execution failed",
|
||||
execution_info=response)
|
||||
elif response["status"] == "succeeded":
|
||||
self.module.exit_json(msg="Job execution succeeded!",
|
||||
execution_info=response)
|
||||
|
||||
if datetime.now() >= due:
|
||||
timeout = True
|
||||
break
|
||||
|
||||
# Wait for 5s before continue
|
||||
sleep(self.wait_execution_delay)
|
||||
|
||||
response.update({"timed_out": timeout})
|
||||
return response
|
||||
|
||||
def job_run(self):
|
||||
response, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="job/%s/run" % quote(self.job_id),
|
||||
method="POST",
|
||||
data={
|
||||
"loglevel": self.loglevel,
|
||||
"options": self.job_options,
|
||||
"runAtTime": self.run_at_time,
|
||||
"filter": self.filter_nodes
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] != 200:
|
||||
self.module.fail_json(msg=info["msg"])
|
||||
|
||||
if not self.wait_execution:
|
||||
self.module.exit_json(msg="Job run send successfully!",
|
||||
execution_info=response)
|
||||
|
||||
job_status = self.job_status_check(response["id"])
|
||||
|
||||
if job_status["timed_out"]:
|
||||
if self.abort_on_timeout:
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="execution/%s/abort" % response['id'],
|
||||
method="GET"
|
||||
)
|
||||
|
||||
abort_status = self.job_status_check(response["id"])
|
||||
|
||||
self.module.fail_json(msg="Job execution aborted due the timeout specified",
|
||||
execution_info=abort_status)
|
||||
|
||||
self.module.fail_json(msg="Job execution timed out",
|
||||
execution_info=job_status)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
job_id=dict(required=True, type="str"),
|
||||
job_options=dict(type="dict"),
|
||||
filter_nodes=dict(type="str"),
|
||||
run_at_time=dict(type="str"),
|
||||
wait_execution=dict(type="bool", default=True),
|
||||
wait_execution_delay=dict(type="int", default=5),
|
||||
wait_execution_timeout=dict(type="int", default=120),
|
||||
abort_on_timeout=dict(type="bool", default=False),
|
||||
loglevel=dict(
|
||||
type="str",
|
||||
choices=["debug", "verbose", "info", "warn", "error"],
|
||||
default="info"
|
||||
)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if module.params["api_version"] < 14:
|
||||
module.fail_json(msg="API version should be at least 14")
|
||||
|
||||
rundeck = RundeckJobRun(module)
|
||||
rundeck.job_run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +1,4 @@
|
||||
destructive
|
||||
shippable/posix/group3
|
||||
skip/python2.6
|
||||
context/controller # While this is not really true, this module mainly is run on the controller, *and* needs access to the ansible-galaxy CLI tool
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
dependencies:
|
||||
- setup_remote_tmp_dir
|
||||
@@ -50,7 +50,7 @@
|
||||
###################################################
|
||||
- name:
|
||||
set_fact:
|
||||
reqs_file: '{{ output_dir }}/reqs.yaml'
|
||||
reqs_file: '{{ remote_tmp_dir }}/reqs.yaml'
|
||||
|
||||
- name: Copy requirements file
|
||||
copy:
|
||||
|
||||
@@ -2,3 +2,4 @@ needs/root
|
||||
shippable/posix/group2
|
||||
destructive
|
||||
skip/aix
|
||||
skip/osx # FIXME
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
- setup_remote_tmp_dir
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
register: backports_lzma_pip
|
||||
|
||||
- name: prep our files
|
||||
copy: src={{ item }} dest={{output_dir}}/{{ item }}
|
||||
copy: src={{ item }} dest={{remote_tmp_dir}}/{{ item }}
|
||||
with_items:
|
||||
- foo.txt
|
||||
- bar.txt
|
||||
|
||||
@@ -3,29 +3,29 @@
|
||||
- name: Create link - broken link ({{ format }})
|
||||
file:
|
||||
src: /nowhere
|
||||
dest: "{{ output_dir }}/nowhere.txt"
|
||||
dest: "{{ remote_tmp_dir }}/nowhere.txt"
|
||||
state: link
|
||||
force: yes
|
||||
|
||||
- name: Archive - broken link ({{ format }})
|
||||
archive:
|
||||
path: "{{ output_dir }}/*.txt"
|
||||
dest: "{{ output_dir }}/archive_broken_link.{{ format }}"
|
||||
path: "{{ remote_tmp_dir }}/*.txt"
|
||||
dest: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
|
||||
format: "{{ format }}"
|
||||
|
||||
- name: Verify archive exists - broken link ({{ format }})
|
||||
file:
|
||||
path: "{{ output_dir }}/archive_broken_link.{{ format }}"
|
||||
path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
|
||||
state: file
|
||||
|
||||
- name: Remove archive - broken link ({{ format }})
|
||||
file:
|
||||
path: "{{ output_dir }}/archive_broken_link.{{ format }}"
|
||||
path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
|
||||
state: absent
|
||||
|
||||
- name: Remove link - broken link ({{ format }})
|
||||
file:
|
||||
path: "{{ output_dir }}/nowhere.txt"
|
||||
path: "{{ remote_tmp_dir }}/nowhere.txt"
|
||||
state: absent
|
||||
# 'zip' does not support symlink's
|
||||
when: format != 'zip'
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user