mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
98 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d3a2a3bd4 | ||
|
|
686cdf2a6b | ||
|
|
4928810dda | ||
|
|
4dc2e14039 | ||
|
|
6ec769b051 | ||
|
|
e4d3d24b26 | ||
|
|
572e3f0814 | ||
|
|
e03ade818a | ||
|
|
54725bea77 | ||
|
|
db24f9857a | ||
|
|
c00147e532 | ||
|
|
0baceda7f6 | ||
|
|
c563813e4e | ||
|
|
1dbd7d4d00 | ||
|
|
41b72c0055 | ||
|
|
96a8390b5e | ||
|
|
25474f657a | ||
|
|
d7c4849473 | ||
|
|
0d459e5662 | ||
|
|
01bbab6b2c | ||
|
|
59a7064392 | ||
|
|
8e7b779ec9 | ||
|
|
1ba5344258 | ||
|
|
58e9454379 | ||
|
|
af3dec9b97 | ||
|
|
99a161bd06 | ||
|
|
feabad39f4 | ||
|
|
4a5276b589 | ||
|
|
e342dfb467 | ||
|
|
5b425fc297 | ||
|
|
d8328312a1 | ||
|
|
2ce326ca5b | ||
|
|
90ed2fa5c3 | ||
|
|
407d776610 | ||
|
|
951806c888 | ||
|
|
0fe7ea63a8 | ||
|
|
3a95a84963 | ||
|
|
2c3e93cc4d | ||
|
|
656b25a4a1 | ||
|
|
1863694297 | ||
|
|
c0f753dd21 | ||
|
|
369cde2320 | ||
|
|
e90872b486 | ||
|
|
b52d3504cb | ||
|
|
1e150cda01 | ||
|
|
db135b83dc | ||
|
|
ad4866bb3b | ||
|
|
83339c44b3 | ||
|
|
71633249c4 | ||
|
|
fdf244d488 | ||
|
|
5575d454ab | ||
|
|
d4633cfcd5 | ||
|
|
11315c8c69 | ||
|
|
6c387f87dd | ||
|
|
33cf4877f5 | ||
|
|
6e2fee77a7 | ||
|
|
502e5ceb79 | ||
|
|
4685a53f29 | ||
|
|
79616f47cb | ||
|
|
496218b6e6 | ||
|
|
8bd8ccd974 | ||
|
|
c802de865a | ||
|
|
1dfd6e395c | ||
|
|
25eabb39a6 | ||
|
|
869e0e60c2 | ||
|
|
cae5823685 | ||
|
|
3d0dbc1fb0 | ||
|
|
912583026f | ||
|
|
748304dadd | ||
|
|
253c2179de | ||
|
|
fcc72e5af1 | ||
|
|
d472953e10 | ||
|
|
c78d6c95d6 | ||
|
|
c9cb987eb7 | ||
|
|
099a99d288 | ||
|
|
26ea01d5b4 | ||
|
|
a9afbe59e5 | ||
|
|
dc9cab36ac | ||
|
|
99265c5126 | ||
|
|
57aede6b95 | ||
|
|
e51e41203a | ||
|
|
54644179ea | ||
|
|
7d6a1a4483 | ||
|
|
2715e4456c | ||
|
|
a335d1cc56 | ||
|
|
a89b43b110 | ||
|
|
1b599bde37 | ||
|
|
7bd987e2b9 | ||
|
|
8b0896a43d | ||
|
|
402bb01501 | ||
|
|
75afd83508 | ||
|
|
b25f0f3cd2 | ||
|
|
9226c4b0d5 | ||
|
|
fe3e262209 | ||
|
|
b9fac26dcd | ||
|
|
343e5a03a7 | ||
|
|
acea082a7c | ||
|
|
0cff1f116f |
@@ -1,3 +0,0 @@
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
@@ -1,315 +0,0 @@
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/general
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.9.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
### Sanity
|
||||
- stage: Sanity_devel
|
||||
displayName: Sanity devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: devel/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.10/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_9
|
||||
displayName: Sanity 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.9/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 11.4
|
||||
test: freebsd/11.4
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/{0}
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/{0}
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.0
|
||||
test: freebsd/12.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_9
|
||||
displayName: Docker 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Cloud
|
||||
- stage: Cloud_devel
|
||||
displayName: Cloud devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Upload code coverage reports to codecov.io.
|
||||
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
output_path="$1"
|
||||
|
||||
curl --silent --show-error https://codecov.io/bash > codecov.sh
|
||||
|
||||
for file in "${output_path}"/reports/coverage*.xml; do
|
||||
name="${file}"
|
||||
name="${name##*/}" # remove path
|
||||
name="${name##coverage=}" # remove 'coverage=' prefix if present
|
||||
name="${name%.xml}" # remove '.xml' suffix
|
||||
|
||||
bash codecov.sh \
|
||||
-f "${file}" \
|
||||
-n "${name}" \
|
||||
-X coveragepy \
|
||||
-X gcov \
|
||||
-X fix \
|
||||
-X search \
|
||||
-X xcode \
|
||||
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
|
||||
done
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,39 +0,0 @@
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
@@ -1,55 +0,0 @@
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
@@ -1,45 +0,0 @@
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
1112
.github/BOTMETA.yml
vendored
1112
.github/BOTMETA.yml
vendored
File diff suppressed because it is too large
Load Diff
1495
CHANGELOG.rst
1495
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
17
README.md
17
README.md
@@ -1,12 +1,9 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
[](https://app.shippable.com/github/ansible-collections/community.general/dashboard) [](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
|
||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||
@@ -17,7 +14,7 @@ Some modules and plugins require external libraries. Please check the requiremen
|
||||
|
||||
## Included content
|
||||
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general).
|
||||
|
||||
## Using this collection
|
||||
|
||||
@@ -38,14 +35,6 @@ See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_gui
|
||||
|
||||
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||
|
||||
For example, if you are working in the `~/dev` directory:
|
||||
|
||||
```
|
||||
cd ~/dev
|
||||
git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
|
||||
export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
||||
```
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
### Running tests
|
||||
@@ -76,7 +65,7 @@ Basic instructions without release branches:
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
12
galaxy.yml
12
galaxy.yml
@@ -1,16 +1,20 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 2.4.0
|
||||
version: 1.2.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
description: null
|
||||
license_file: COPYING
|
||||
tags: [community]
|
||||
# NOTE: No dependencies are expected to be added here
|
||||
# dependencies:
|
||||
# NOTE: No more dependencies can be added to this list
|
||||
dependencies:
|
||||
ansible.netcommon: '>=1.0.0'
|
||||
ansible.posix: '>=1.0.0'
|
||||
community.kubernetes: '>=1.0.0'
|
||||
google.cloud: '>=1.0.0'
|
||||
repository: https://github.com/ansible-collections/community.general
|
||||
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
||||
#documentation: https://github.com/ansible-collection-migration/community.general/tree/main/docs
|
||||
homepage: https://github.com/ansible-collections/community.general
|
||||
issues: https://github.com/ansible-collections/community.general/issues
|
||||
#type: flatmap
|
||||
|
||||
782
meta/runtime.yml
782
meta/runtime.yml
File diff suppressed because it is too large
Load Diff
@@ -98,9 +98,25 @@ class ActionModule(ActionBase):
|
||||
task_async,
|
||||
max_timeout))
|
||||
|
||||
# inject the async directory based on the shell option into the
|
||||
# module args
|
||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||
# BEGIN snippet from async_status action plugin
|
||||
env_async_dir = [e for e in self._task.environment if
|
||||
"ANSIBLE_ASYNC_DIR" in e]
|
||||
if len(env_async_dir) > 0:
|
||||
# for backwards compatibility we need to get the dir from
|
||||
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
|
||||
# deprecated and will be removed in favour of shell options
|
||||
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
|
||||
|
||||
msg = "Setting the async dir from the environment keyword " \
|
||||
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
|
||||
"shell option instead"
|
||||
display.deprecated(msg, version='2.0.0',
|
||||
collection_name='community.general') # was Ansible 2.12
|
||||
else:
|
||||
# inject the async directory based on the shell option into the
|
||||
# module args
|
||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||
# END snippet from async_status action plugin
|
||||
|
||||
# Bind the loop max duration to consistent values on both
|
||||
# remote and local sides (if not the same, make the loop
|
||||
@@ -109,18 +125,15 @@ class ActionModule(ActionBase):
|
||||
module_args['_back'] = '%s/iptables.state' % async_dir
|
||||
async_status_args = dict(_async_dir=async_dir)
|
||||
confirm_cmd = 'rm -f %s' % module_args['_back']
|
||||
starter_cmd = 'touch %s.starter' % module_args['_back']
|
||||
remaining_time = max(task_async, max_timeout)
|
||||
|
||||
# do work!
|
||||
result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
|
||||
|
||||
# Then the 3-steps "go ahead or rollback":
|
||||
# 1. Catch early errors of the module (in asynchronous task) if any.
|
||||
# Touch a file on the target to signal the module to process now.
|
||||
# 2. Reset connection to ensure a persistent one will not be reused.
|
||||
# 3. Confirm the restored state by removing the backup on the remote.
|
||||
# Retrieve the results of the asynchronous task to return them.
|
||||
# - reset connection to ensure a persistent one will not be reused
|
||||
# - confirm the restored state by removing the backup on the remote
|
||||
# - retrieve the results of the asynchronous task to return them
|
||||
if '_back' in module_args:
|
||||
async_status_args['jid'] = result.get('ansible_job_id', None)
|
||||
if async_status_args['jid'] is None:
|
||||
@@ -130,18 +143,12 @@ class ActionModule(ActionBase):
|
||||
# option type/value, missing required system command, etc.
|
||||
result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
|
||||
|
||||
# The module is aware to not process the main iptables-restore
|
||||
# command before finding (and deleting) the 'starter' cookie on
|
||||
# the host, so the previous query will not reach ssh timeout.
|
||||
garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
|
||||
# As the main command is not yet executed on the target, here
|
||||
# 'finished' means 'failed before main command be executed'.
|
||||
if not result['finished']:
|
||||
try:
|
||||
self._connection.reset()
|
||||
display.v("%s: reset connection" % (module_name))
|
||||
except AttributeError:
|
||||
pass
|
||||
display.warning("Connection plugin does not allow to reset the connection.")
|
||||
|
||||
for x in range(max_timeout):
|
||||
time.sleep(1)
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: doas
|
||||
become: doas
|
||||
short_description: Do As user
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: dzdo
|
||||
become: dzdo
|
||||
short_description: Centrify's Direct Authorize
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: ksu
|
||||
become: ksu
|
||||
short_description: Kerberos substitute user
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: machinectl
|
||||
become: machinectl
|
||||
short_description: Systemd's machinectl privilege escalation
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pbrun
|
||||
become: pbrun
|
||||
short_description: PowerBroker run
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pfexec
|
||||
become: pfexec
|
||||
short_description: profile based execution
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pmrun
|
||||
become: pmrun
|
||||
short_description: Privilege Manager run
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sesu
|
||||
become: sesu
|
||||
short_description: CA Privileged Access Manager
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
become: sudosu
|
||||
short_description: Run tasks using sudo su -
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
version_added: 2.4.0
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task.
|
||||
default: root
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_user
|
||||
- section: sudo_become_plugin
|
||||
key: user
|
||||
vars:
|
||||
- name: ansible_become_user
|
||||
- name: ansible_sudo_user
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_USER
|
||||
- name: ANSIBLE_SUDO_USER
|
||||
become_flags:
|
||||
description: Options to pass to C(sudo).
|
||||
default: -H -S -n
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_flags
|
||||
- section: sudo_become_plugin
|
||||
key: flags
|
||||
vars:
|
||||
- name: ansible_become_flags
|
||||
- name: ansible_sudo_flags
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_FLAGS
|
||||
- name: ANSIBLE_SUDO_FLAGS
|
||||
become_pass:
|
||||
description: Password to pass to C(sudo).
|
||||
required: false
|
||||
vars:
|
||||
- name: ansible_become_password
|
||||
- name: ansible_become_pass
|
||||
- name: ansible_sudo_pass
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_PASS
|
||||
- name: ANSIBLE_SUDO_PASS
|
||||
ini:
|
||||
- section: sudo_become_plugin
|
||||
key: password
|
||||
"""
|
||||
|
||||
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
class BecomeModule(BecomeBase):
|
||||
|
||||
name = 'community.general.sudosu'
|
||||
|
||||
# messages for detecting prompted password issues
|
||||
fail = ('Sorry, try again.',)
|
||||
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
|
||||
|
||||
def build_become_command(self, cmd, shell):
|
||||
super(BecomeModule, self).build_become_command(cmd, shell)
|
||||
|
||||
if not cmd:
|
||||
return cmd
|
||||
|
||||
becomecmd = 'sudo'
|
||||
|
||||
flags = self.get_option('become_flags') or ''
|
||||
prompt = ''
|
||||
if self.get_option('become_pass'):
|
||||
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
|
||||
if flags: # this could be simplified, but kept as is for now for backwards string matching
|
||||
flags = flags.replace('-n', '')
|
||||
prompt = '-p "%s"' % (self.prompt)
|
||||
|
||||
user = self.get_option('become_user') or ''
|
||||
if user:
|
||||
user = '%s' % (user)
|
||||
|
||||
return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
|
||||
17
plugins/cache/memcached.py
vendored
17
plugins/cache/memcached.py
vendored
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: memcached
|
||||
cache: memcached
|
||||
short_description: Use memcached DB for cache
|
||||
description:
|
||||
- This cache uses JSON formatted, per host records saved in memcached.
|
||||
@@ -53,14 +53,12 @@ from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common._collections_compat import MutableSet
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
import memcache
|
||||
HAS_MEMCACHE = True
|
||||
except ImportError:
|
||||
HAS_MEMCACHE = False
|
||||
raise AnsibleError("python-memcached is required for the memcached fact cache")
|
||||
|
||||
display = Display()
|
||||
|
||||
@@ -162,7 +160,7 @@ class CacheModuleKeys(MutableSet):
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
for k in list(self._keyset.keys()):
|
||||
for k in self._keyset.keys():
|
||||
t = self._keyset[k]
|
||||
if s_min < t < s_max:
|
||||
del self._keyset[k]
|
||||
@@ -181,17 +179,14 @@ class CacheModule(BaseCacheModule):
|
||||
self._timeout = self.get_option('_timeout')
|
||||
self._prefix = self.get_option('_prefix')
|
||||
except KeyError:
|
||||
# TODO: remove once we no longer support Ansible 2.9
|
||||
if not ansible_base_version.startswith('2.9.'):
|
||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||
display.deprecated('Rather than importing CacheModules directly, '
|
||||
'use ansible.plugins.loader.cache_loader',
|
||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||
if C.CACHE_PLUGIN_CONNECTION:
|
||||
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
||||
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
||||
self._prefix = C.CACHE_PLUGIN_PREFIX
|
||||
|
||||
if not HAS_MEMCACHE:
|
||||
raise AnsibleError("python-memcached is required for the memcached fact cache")
|
||||
|
||||
self._cache = {}
|
||||
self._db = ProxyClientPool(connection, debug=0)
|
||||
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
|
||||
|
||||
2
plugins/cache/pickle.py
vendored
2
plugins/cache/pickle.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: pickle
|
||||
cache: pickle
|
||||
short_description: Pickle formatted files.
|
||||
description:
|
||||
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
||||
|
||||
90
plugins/cache/redis.py
vendored
90
plugins/cache/redis.py
vendored
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: redis
|
||||
cache: redis
|
||||
short_description: Use Redis DB for cache
|
||||
description:
|
||||
- This cache uses JSON formatted, per host records saved in Redis.
|
||||
@@ -18,7 +18,6 @@ DOCUMENTATION = '''
|
||||
- A colon separated string of connection information for Redis.
|
||||
- The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
|
||||
- To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
|
||||
- To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
|
||||
required: True
|
||||
env:
|
||||
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
|
||||
@@ -33,23 +32,6 @@ DOCUMENTATION = '''
|
||||
ini:
|
||||
- key: fact_caching_prefix
|
||||
section: defaults
|
||||
_keyset_name:
|
||||
description: User defined name for cache keyset name.
|
||||
default: ansible_cache_keys
|
||||
env:
|
||||
- name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
|
||||
ini:
|
||||
- key: fact_caching_redis_keyset_name
|
||||
section: defaults
|
||||
version_added: 1.3.0
|
||||
_sentinel_service_name:
|
||||
description: The redis sentinel service name (or referenced as cluster name).
|
||||
env:
|
||||
- name: ANSIBLE_CACHE_REDIS_SENTINEL
|
||||
ini:
|
||||
- key: fact_caching_redis_sentinel
|
||||
section: defaults
|
||||
version_added: 1.3.0
|
||||
_timeout:
|
||||
default: 86400
|
||||
description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
|
||||
@@ -66,17 +48,14 @@ import json
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
from redis import StrictRedis, VERSION
|
||||
HAS_REDIS = True
|
||||
except ImportError:
|
||||
HAS_REDIS = False
|
||||
raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
|
||||
|
||||
display = Display()
|
||||
|
||||
@@ -90,8 +69,6 @@ class CacheModule(BaseCacheModule):
|
||||
to expire keys. This mechanism is used or a pattern matched 'scan' for
|
||||
performance.
|
||||
"""
|
||||
_sentinel_service_name = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
uri = ''
|
||||
|
||||
@@ -101,70 +78,25 @@ class CacheModule(BaseCacheModule):
|
||||
uri = self.get_option('_uri')
|
||||
self._timeout = float(self.get_option('_timeout'))
|
||||
self._prefix = self.get_option('_prefix')
|
||||
self._keys_set = self.get_option('_keyset_name')
|
||||
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
||||
except KeyError:
|
||||
# TODO: remove once we no longer support Ansible 2.9
|
||||
if not ansible_base_version.startswith('2.9.'):
|
||||
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||
display.deprecated('Rather than importing CacheModules directly, '
|
||||
'use ansible.plugins.loader.cache_loader',
|
||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
||||
if C.CACHE_PLUGIN_CONNECTION:
|
||||
uri = C.CACHE_PLUGIN_CONNECTION
|
||||
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
||||
self._prefix = C.CACHE_PLUGIN_PREFIX
|
||||
self._keys_set = 'ansible_cache_keys'
|
||||
|
||||
if not HAS_REDIS:
|
||||
raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
|
||||
|
||||
self._cache = {}
|
||||
kw = {}
|
||||
|
||||
# tls connection
|
||||
tlsprefix = 'tls://'
|
||||
if uri.startswith(tlsprefix):
|
||||
kw['ssl'] = True
|
||||
uri = uri[len(tlsprefix):]
|
||||
|
||||
# redis sentinel connection
|
||||
if self._sentinel_service_name:
|
||||
self._db = self._get_sentinel_connection(uri, kw)
|
||||
# normal connection
|
||||
else:
|
||||
connection = uri.split(':')
|
||||
self._db = StrictRedis(*connection, **kw)
|
||||
|
||||
display.vv('Redis connection: %s' % self._db)
|
||||
|
||||
def _get_sentinel_connection(self, uri, kw):
|
||||
"""
|
||||
get sentinel connection details from _uri
|
||||
"""
|
||||
try:
|
||||
from redis.sentinel import Sentinel
|
||||
except ImportError:
|
||||
raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
|
||||
|
||||
if ';' not in uri:
|
||||
raise AnsibleError('_uri does not have sentinel syntax.')
|
||||
|
||||
# format: "localhost:26379;localhost2:26379;0:changeme"
|
||||
connections = uri.split(';')
|
||||
connection_args = connections.pop(-1)
|
||||
if len(connection_args) > 0: # hanle if no db nr is given
|
||||
connection_args = connection_args.split(':')
|
||||
kw['db'] = connection_args.pop(0)
|
||||
try:
|
||||
kw['password'] = connection_args.pop(0)
|
||||
except IndexError:
|
||||
pass # password is optional
|
||||
|
||||
sentinels = [tuple(shost.split(':')) for shost in connections]
|
||||
display.vv('\nUsing redis sentinels: %s' % sentinels)
|
||||
scon = Sentinel(sentinels, **kw)
|
||||
try:
|
||||
return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
|
||||
except Exception as exc:
|
||||
raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
|
||||
connection = uri.split(':')
|
||||
self._db = StrictRedis(*connection, **kw)
|
||||
self._keys_set = 'ansible_cache_keys'
|
||||
|
||||
def _make_key(self, key):
|
||||
return self._prefix + key
|
||||
@@ -217,12 +149,14 @@ class CacheModule(BaseCacheModule):
|
||||
self._db.zrem(self._keys_set, key)
|
||||
|
||||
def flush(self):
|
||||
for key in list(self.keys()):
|
||||
for key in self.keys():
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
# TODO: there is probably a better way to do this in redis
|
||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
|
||||
2
plugins/cache/yaml.py
vendored
2
plugins/cache/yaml.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: yaml
|
||||
cache: yaml
|
||||
short_description: YAML formatted files.
|
||||
description:
|
||||
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
||||
|
||||
61
plugins/callback/actionable.py
Normal file
61
plugins/callback/actionable.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: actionable
|
||||
type: stdout
|
||||
short_description: shows only items that need attention
|
||||
description:
|
||||
- Use this callback when you dont care about OK nor Skipped.
|
||||
- This callback suppresses any non Failed or Changed status.
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
requirements:
|
||||
- set as stdout callback in configuration
|
||||
# Override defaults from 'default' callback plugin
|
||||
options:
|
||||
display_skipped_hosts:
|
||||
name: Show skipped hosts
|
||||
description: "Toggle to control displaying skipped task/host results in a task"
|
||||
type: bool
|
||||
default: no
|
||||
env:
|
||||
- name: DISPLAY_SKIPPED_HOSTS
|
||||
deprecated:
|
||||
why: environment variables without "ANSIBLE_" prefix are deprecated
|
||||
version: "2.0.0" # was Ansible 2.12
|
||||
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
|
||||
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
|
||||
ini:
|
||||
- key: display_skipped_hosts
|
||||
section: defaults
|
||||
display_ok_hosts:
|
||||
name: Show 'ok' hosts
|
||||
description: "Toggle to control displaying 'ok' task/host results in a task"
|
||||
type: bool
|
||||
default: no
|
||||
env:
|
||||
- name: ANSIBLE_DISPLAY_OK_HOSTS
|
||||
ini:
|
||||
- key: display_ok_hosts
|
||||
section: defaults
|
||||
'''
|
||||
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.actionable'
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: cgroup_memory_recap
|
||||
callback: cgroup_memory_recap
|
||||
type: aggregate
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: context_demo
|
||||
callback: context_demo
|
||||
type: aggregate
|
||||
short_description: demo callback that adds play/task context
|
||||
description:
|
||||
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: counter_enabled
|
||||
callback: counter_enabled
|
||||
type: stdout
|
||||
short_description: adds counters to the output items (tasks and hosts/task)
|
||||
description:
|
||||
|
||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: dense
|
||||
callback: dense
|
||||
type: stdout
|
||||
short_description: minimal stdout output
|
||||
extends_documentation_fragment:
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: diy
|
||||
callback: diy
|
||||
type: stdout
|
||||
short_description: Customize the output
|
||||
version_added: 0.2.0
|
||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
||||
for attr in _stats_attributes:
|
||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||
|
||||
return _ret
|
||||
|
||||
|
||||
76
plugins/callback/full_skip.py
Normal file
76
plugins/callback/full_skip.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: full_skip
|
||||
type: stdout
|
||||
short_description: suppresses tasks if all hosts skipped
|
||||
description:
|
||||
- Use this plugin when you do not care about any output for tasks that were completely skipped
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
requirements:
|
||||
- set as stdout in configuration
|
||||
'''
|
||||
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.full_skip'
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
self.outlines = []
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.outlines = []
|
||||
self.outlines.append("TASK [%s]" % task.get_name().strip())
|
||||
if self._display.verbosity >= 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self.outlines.append("task path: %s" % path)
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_playbook_item_on_ok(result)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.display()
|
||||
super(CallbackModule, self).v2_runner_on_ok(result)
|
||||
|
||||
def display(self):
|
||||
if len(self.outlines) == 0:
|
||||
return
|
||||
(first, rest) = self.outlines[0], self.outlines[1:]
|
||||
self._display.banner(first)
|
||||
for line in rest:
|
||||
self._display.display(line)
|
||||
self.outlines = []
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: hipchat
|
||||
callback: hipchat
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration.
|
||||
@@ -173,7 +173,8 @@ class CallbackModule(CallbackBase):
|
||||
# Displays info about playbook being started by a person on an
|
||||
# inventory, as well as Tags, Skip Tags and Limits
|
||||
if not self.printed_playbook:
|
||||
self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
|
||||
self.playbook_name, _ = os.path.splitext(
|
||||
os.path.basename(self.play.playbook.filename))
|
||||
host_list = self.play.playbook.inventory.host_list
|
||||
inventory = os.path.basename(os.path.realpath(host_list))
|
||||
self.send_msg("%s: Playbook initiated by %s against %s" %
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: jabber
|
||||
callback: jabber
|
||||
type: notification
|
||||
short_description: post task events to a jabber server
|
||||
description:
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: log_plays
|
||||
callback: log_plays
|
||||
type: notification
|
||||
short_description: write playbook output to log file
|
||||
description:
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
callback: loganalytics
|
||||
type: aggregate
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
- This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
|
||||
- Credits to authors of splunk callback plugin.
|
||||
version_added: "2.4.0"
|
||||
requirements:
|
||||
- Whitelisting this callback plugin.
|
||||
- An Azure log analytics work space has been established.
|
||||
options:
|
||||
workspace_id:
|
||||
description: Workspace ID of the Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_ID
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: workspace_id
|
||||
shared_key:
|
||||
description: Shared key to connect to Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_SHARED_KEY
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: shared_key
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Whitelist the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callback_whitelist = community.general.loganalytics
|
||||
Set the environment variable:
|
||||
export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
|
||||
export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
Or configure the plugin in ansible.cfg in the callback_loganalytics block:
|
||||
[callback_loganalytics]
|
||||
workspace_id = 01234567-0123-0123-0123-01234567890a
|
||||
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
'''
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import socket
|
||||
import getpass
|
||||
|
||||
from datetime import datetime
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class AzureLogAnalyticsSource(object):
|
||||
def __init__(self):
|
||||
self.ansible_check_mode = False
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = ""
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
self.user = getpass.getuser()
|
||||
self.extra_vars = ""
|
||||
|
||||
def __build_signature(self, date, workspace_id, shared_key, content_length):
|
||||
# Build authorisation signature for Azure log analytics API call
|
||||
sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
|
||||
str(content_length), date)
|
||||
utf8_sigs = sigs.encode('utf-8')
|
||||
decoded_shared_key = base64.b64decode(shared_key)
|
||||
hmac_sha256_sigs = hmac.new(
|
||||
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
|
||||
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
|
||||
signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
|
||||
return signature
|
||||
|
||||
def __build_workspace_url(self, workspace_id):
|
||||
return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
|
||||
|
||||
def __rfc1123date(self):
|
||||
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
|
||||
def send_event(self, workspace_id, shared_key, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
if result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = \
|
||||
result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
if result._task._role:
|
||||
ansible_role = str(result._task._role)
|
||||
else:
|
||||
ansible_role = None
|
||||
|
||||
data = {}
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
data['status'] = state
|
||||
data['timestamp'] = self.__rfc1123date()
|
||||
data['host'] = self.host
|
||||
data['user'] = self.user
|
||||
data['runtime'] = runtime
|
||||
data['ansible_version'] = self.ansible_version
|
||||
data['ansible_check_mode'] = self.ansible_check_mode
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_playbook'] = self.ansible_playbook
|
||||
data['ansible_role'] = ansible_role
|
||||
data['ansible_task'] = result._task_fields
|
||||
# Removing args since it can contain sensitive data
|
||||
if 'args' in data['ansible_task']:
|
||||
data['ansible_task'].pop('args')
|
||||
data['ansible_result'] = result._result
|
||||
if 'content' in data['ansible_result']:
|
||||
data['ansible_result'].pop('content')
|
||||
|
||||
# Adding extra vars info
|
||||
data['extra_vars'] = self.extra_vars
|
||||
|
||||
# Preparing the playbook logs as JSON format and send to Azure log analytics
|
||||
jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
|
||||
content_length = len(jsondata)
|
||||
rfc1123date = self.__rfc1123date()
|
||||
signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
|
||||
workspace_url = self.__build_workspace_url(workspace_id)
|
||||
|
||||
open_url(
|
||||
workspace_url,
|
||||
jsondata,
|
||||
headers={
|
||||
'content-type': 'application/json',
|
||||
'Authorization': signature,
|
||||
'Log-Type': 'ansible_playbook',
|
||||
'x-ms-date': rfc1123date
|
||||
},
|
||||
method='POST'
|
||||
)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.start_datetimes = {} # Collect task start times
|
||||
self.workspace_id = None
|
||||
self.shared_key = None
|
||||
self.loganalytics = AzureLogAnalyticsSource()
|
||||
|
||||
def _seconds_since_start(self, result):
|
||||
return (
|
||||
datetime.utcnow() -
|
||||
self.start_datetimes[result._task._uuid]
|
||||
).total_seconds()
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
self.workspace_id = self.get_option('workspace_id')
|
||||
self.shared_key = self.get_option('shared_key')
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
vm = play.get_variable_manager()
|
||||
extra_vars = vm.extra_vars
|
||||
self.loganalytics.extra_vars = extra_vars
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.loganalytics.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'OK',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def runner_on_async_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logdna
|
||||
callback: logdna
|
||||
type: aggregate
|
||||
short_description: Sends playbook logs to LogDNA
|
||||
description:
|
||||
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logentries
|
||||
callback: logentries
|
||||
type: notification
|
||||
short_description: Sends events to Logentries
|
||||
description:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
||||
# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
|
||||
# (C) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
author: Yevhen Khmelenko (@ujenmr)
|
||||
name: logstash
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: logstash
|
||||
type: notification
|
||||
short_description: Sends events to Logstash
|
||||
description:
|
||||
@@ -43,61 +43,16 @@ DOCUMENTATION = r'''
|
||||
key: type
|
||||
version_added: 1.0.0
|
||||
default: ansible
|
||||
pre_command:
|
||||
description: Executes command before run and result put to ansible_pre_command_output field.
|
||||
version_added: 2.0.0
|
||||
ini:
|
||||
- section: callback_logstash
|
||||
key: pre_command
|
||||
env:
|
||||
- name: LOGSTASH_PRE_COMMAND
|
||||
format_version:
|
||||
description: Logging format
|
||||
type: str
|
||||
version_added: 2.0.0
|
||||
ini:
|
||||
- section: callback_logstash
|
||||
key: format_version
|
||||
env:
|
||||
- name: LOGSTASH_FORMAT_VERSION
|
||||
default: v1
|
||||
choices:
|
||||
- v1
|
||||
- v2
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
ansible.cfg: |
|
||||
# Enable Callback plugin
|
||||
[defaults]
|
||||
callback_whitelist = community.general.logstash
|
||||
|
||||
[callback_logstash]
|
||||
server = logstash.example.com
|
||||
port = 5000
|
||||
pre_command = git rev-parse HEAD
|
||||
type = ansible
|
||||
|
||||
11-input-tcp.conf: |
|
||||
# Enable Logstash TCP Input
|
||||
input {
|
||||
tcp {
|
||||
port => 5000
|
||||
codec => json
|
||||
add_field => { "[@metadata][beat]" => "notify" }
|
||||
add_field => { "[@metadata][type]" => "ansible" }
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import logging
|
||||
|
||||
try:
|
||||
import logstash
|
||||
HAS_LOGSTASH = True
|
||||
@@ -108,78 +63,76 @@ from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
ansible logstash callback plugin
|
||||
ansible.cfg:
|
||||
callback_plugins = <path_to_callback_plugins_folder>
|
||||
callback_whitelist = logstash
|
||||
and put the plugin in <path_to_callback_plugins_folder>
|
||||
|
||||
logstash config:
|
||||
input {
|
||||
tcp {
|
||||
port => 5000
|
||||
codec => json
|
||||
}
|
||||
}
|
||||
|
||||
Requires:
|
||||
python-logstash
|
||||
|
||||
This plugin makes use of the following environment variables or ini config:
|
||||
LOGSTASH_SERVER (optional): defaults to localhost
|
||||
LOGSTASH_PORT (optional): defaults to 5000
|
||||
LOGSTASH_TYPE (optional): defaults to ansible
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'community.general.logstash'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
|
||||
if not HAS_LOGSTASH:
|
||||
self.disabled = True
|
||||
self._display.warning("The required python-logstash/python3-logstash is not installed. "
|
||||
"pip install python-logstash for Python 2"
|
||||
"pip install python3-logstash for Python 3")
|
||||
self._display.warning("The required python-logstash is not installed. "
|
||||
"pip install python-logstash")
|
||||
|
||||
self.start_time = datetime.utcnow()
|
||||
|
||||
def _init_plugin(self):
|
||||
if not self.disabled:
|
||||
self.logger = logging.getLogger('python-logstash-logger')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.handler = logstash.TCPLogstashHandler(
|
||||
self.ls_server,
|
||||
self.ls_port,
|
||||
version=1,
|
||||
message_type=self.ls_type
|
||||
)
|
||||
|
||||
self.logger.addHandler(self.handler)
|
||||
self.hostname = socket.gethostname()
|
||||
self.session = str(uuid.uuid4())
|
||||
self.errors = 0
|
||||
|
||||
self.base_data = {
|
||||
'session': self.session,
|
||||
'host': self.hostname
|
||||
}
|
||||
|
||||
if self.ls_pre_command is not None:
|
||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||
self.ls_pre_command).read()
|
||||
|
||||
if self._options is not None:
|
||||
self.base_data['ansible_checkmode'] = self._options.check
|
||||
self.base_data['ansible_tags'] = self._options.tags
|
||||
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||
self.base_data['inventory'] = self._options.inventory
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
self.ls_server = self.get_option('server')
|
||||
self.ls_port = int(self.get_option('port'))
|
||||
self.ls_type = self.get_option('type')
|
||||
self.ls_pre_command = self.get_option('pre_command')
|
||||
self.ls_format_version = self.get_option('format_version')
|
||||
self.logger = logging.getLogger('python-logstash-logger')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
self._init_plugin()
|
||||
self.logstash_server = self.get_option('server')
|
||||
self.logstash_port = self.get_option('port')
|
||||
self.logstash_type = self.get_option('type')
|
||||
self.handler = logstash.TCPLogstashHandler(
|
||||
self.logstash_server,
|
||||
int(self.logstash_port),
|
||||
version=1,
|
||||
message_type=self.logstash_type
|
||||
)
|
||||
self.logger.addHandler(self.handler)
|
||||
self.hostname = socket.gethostname()
|
||||
self.session = str(uuid.uuid1())
|
||||
self.errors = 0
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "start"
|
||||
data['status'] = "OK"
|
||||
data['ansible_playbook'] = playbook._file_name
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible start", extra=data)
|
||||
self.playbook = playbook._file_name
|
||||
data = {
|
||||
'status': "OK",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "start",
|
||||
'ansible_playbook': self.playbook,
|
||||
}
|
||||
self.logger.info("ansible start", extra=data)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
end_time = datetime.utcnow()
|
||||
@@ -193,201 +146,103 @@ class CallbackModule(CallbackBase):
|
||||
else:
|
||||
status = "FAILED"
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "finish"
|
||||
data['status'] = status
|
||||
data['ansible_playbook_duration'] = runtime.total_seconds()
|
||||
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible stats", extra=data)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_id = str(play._uuid)
|
||||
|
||||
if play.name:
|
||||
self.play_name = play.name
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "start"
|
||||
data['status'] = "OK"
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("START PLAY | %s", self.play_name, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible play", extra=data)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.task_id = str(task._uuid)
|
||||
|
||||
'''
|
||||
Tasks and handler tasks are dealt with here
|
||||
'''
|
||||
data = {
|
||||
'status': status,
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "finish",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_playbook_duration': runtime.total_seconds(),
|
||||
'ansible_result': json.dumps(summarize_stat),
|
||||
}
|
||||
self.logger.info("ansible stats", extra=data)
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
if task_name == 'setup':
|
||||
data['ansible_type'] = "setup"
|
||||
data['status'] = "OK"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_facts'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible facts", extra=data)
|
||||
else:
|
||||
if 'changed' in result._result.keys():
|
||||
data['ansible_changed'] = result._result['changed']
|
||||
else:
|
||||
data['ansible_changed'] = False
|
||||
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "OK"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info(
|
||||
"TASK OK | %s | RESULT | %s",
|
||||
task_name, self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.info("ansible ok", extra=data)
|
||||
data = {
|
||||
'status': "OK",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.logger.info("ansible ok", extra=data)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "SKIPPED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible skipped", extra=data)
|
||||
data = {
|
||||
'status': "SKIPPED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_task': result._task,
|
||||
'ansible_host': result._host.name
|
||||
}
|
||||
self.logger.info("ansible skipped", extra=data)
|
||||
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "import"
|
||||
data['status'] = "IMPORTED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['imported_file'] = imported_file
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("IMPORT | %s", imported_file, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible import", extra=data)
|
||||
data = {
|
||||
'status': "IMPORTED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "import",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'imported_file': imported_file
|
||||
}
|
||||
self.logger.info("ansible import", extra=data)
|
||||
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "import"
|
||||
data['status'] = "NOT IMPORTED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['imported_file'] = missing_file
|
||||
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
|
||||
else:
|
||||
self.logger.info("ansible import", extra=data)
|
||||
data = {
|
||||
'status': "NOT IMPORTED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "import",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'missing_file': missing_file
|
||||
}
|
||||
self.logger.info("ansible import", extra=data)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
if 'changed' in result._result.keys():
|
||||
data['ansible_changed'] = result._result['changed']
|
||||
else:
|
||||
data['ansible_changed'] = False
|
||||
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "FAILED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
data = {
|
||||
'status': "FAILED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"TASK FAILED | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible failed", extra=data)
|
||||
self.logger.error("ansible failed", extra=data)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "UNREACHABLE"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible unreachable", extra=data)
|
||||
data = {
|
||||
'status': "UNREACHABLE",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.logger.error("ansible unreachable", extra=data)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||
|
||||
data = self.base_data.copy()
|
||||
data['ansible_type'] = "task"
|
||||
data['status'] = "FAILED"
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_play_id'] = self.play_id
|
||||
data['ansible_play_name'] = self.play_name
|
||||
data['ansible_task'] = task_name
|
||||
data['ansible_task_id'] = self.task_id
|
||||
data['ansible_result'] = self._dump_results(result._result)
|
||||
|
||||
data = {
|
||||
'status': "FAILED",
|
||||
'host': self.hostname,
|
||||
'session': self.session,
|
||||
'ansible_type': "task",
|
||||
'ansible_playbook': self.playbook,
|
||||
'ansible_host': result._host.name,
|
||||
'ansible_task': result._task,
|
||||
'ansible_result': self._dump_results(result._result)
|
||||
}
|
||||
self.errors += 1
|
||||
if (self.ls_format_version == "v2"):
|
||||
self.logger.error(
|
||||
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
|
||||
task_name, self.hostname,
|
||||
self._dump_results(result._result), extra=data
|
||||
)
|
||||
else:
|
||||
self.logger.error("ansible async", extra=data)
|
||||
self.logger.error("ansible async", extra=data)
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: mail
|
||||
callback: mail
|
||||
type: notification
|
||||
short_description: Sends failure events via email
|
||||
description:
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: nrdp
|
||||
callback: nrdp
|
||||
type: notification
|
||||
author: "Remi VERCHERE (@rverchere)"
|
||||
short_description: post task result to a nagios server through nrdp
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: 'null'
|
||||
callback: 'null'
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: say
|
||||
callback: say
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: selective
|
||||
callback: selective
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
@@ -41,16 +41,7 @@ import difflib
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
try:
|
||||
codeCodes = C.COLOR_CODES
|
||||
except AttributeError:
|
||||
# This constant was moved to ansible.constants in
|
||||
# https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
|
||||
# (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
|
||||
# we include from the original location.
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
DONT_COLORIZE = False
|
||||
COLORS = {
|
||||
@@ -67,7 +58,7 @@ COLORS = {
|
||||
|
||||
def dict_diff(prv, nxt):
|
||||
"""Return a dict of keys that differ with another config object."""
|
||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
||||
keys = set(prv.keys() + nxt.keys())
|
||||
result = {}
|
||||
for k in keys:
|
||||
if prv.get(k) != nxt.get(k):
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: slack
|
||||
callback: slack
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
|
||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: splunk
|
||||
callback: splunk
|
||||
type: aggregate
|
||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||
@@ -57,17 +57,6 @@ DOCUMENTATION = '''
|
||||
type: bool
|
||||
default: true
|
||||
version_added: '1.0.0'
|
||||
include_milliseconds:
|
||||
description: Whether to include milliseconds as part of the generated timestamp field in the event
|
||||
sent to the Splunk HTTP collector
|
||||
env:
|
||||
- name: SPLUNK_INCLUDE_MILLISECONDS
|
||||
ini:
|
||||
- section: callback_splunk
|
||||
key: include_milliseconds
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.0.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -107,7 +96,7 @@ class SplunkHTTPCollectorSource(object):
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
self.user = getpass.getuser()
|
||||
|
||||
def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
|
||||
def send_event(self, url, authtoken, validate_certs, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
@@ -127,13 +116,8 @@ class SplunkHTTPCollectorSource(object):
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
data['status'] = state
|
||||
|
||||
if include_milliseconds:
|
||||
time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
|
||||
else:
|
||||
time_format = '%Y-%m-%d %H:%M:%S +0000'
|
||||
|
||||
data['timestamp'] = datetime.utcnow().strftime(time_format)
|
||||
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
|
||||
'+0000')
|
||||
data['host'] = self.host
|
||||
data['ip_address'] = self.ip_address
|
||||
data['user'] = self.user
|
||||
@@ -174,7 +158,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url = None
|
||||
self.authtoken = None
|
||||
self.validate_certs = None
|
||||
self.include_milliseconds = None
|
||||
self.splunk = SplunkHTTPCollectorSource()
|
||||
|
||||
def _runtime(self, result):
|
||||
@@ -210,8 +193,6 @@ class CallbackModule(CallbackBase):
|
||||
|
||||
self.validate_certs = self.get_option('validate_certs')
|
||||
|
||||
self.include_milliseconds = self.get_option('include_milliseconds')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.splunk.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
@@ -226,7 +207,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
'OK',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -237,7 +217,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -248,7 +227,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
'FAILED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -259,7 +237,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
'FAILED',
|
||||
result,
|
||||
self._runtime(result)
|
||||
@@ -270,7 +247,6 @@ class CallbackModule(CallbackBase):
|
||||
self.url,
|
||||
self.authtoken,
|
||||
self.validate_certs,
|
||||
self.include_milliseconds,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._runtime(result)
|
||||
|
||||
71
plugins/callback/stderr.py
Normal file
71
plugins/callback/stderr.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# (c) 2017, Frederic Van Espen <github@freh.be>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: stderr
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: Splits output, sending failed tasks to stderr
|
||||
deprecated:
|
||||
why: The 'default' callback plugin now supports this functionality
|
||||
removed_in: '2.0.0' # was Ansible 2.11
|
||||
alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
description:
|
||||
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
|
||||
- Also it does not output skipped host/task/item status
|
||||
'''
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the stderr callback plugin, which reuses the default
|
||||
callback plugin but sends error output to stderr.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'community.general.stderr'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.super_ref = super(CallbackModule, self)
|
||||
self.super_ref.__init__()
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
self._clean_results(result._result, result._task.action)
|
||||
|
||||
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
||||
self._print_task_banner(result._task)
|
||||
|
||||
self._handle_exception(result._result, use_stderr=True)
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
|
||||
self._dump_results(result._result)), color=C.COLOR_ERROR,
|
||||
stderr=True)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
|
||||
color=C.COLOR_ERROR, stderr=True)
|
||||
|
||||
if ignore_errors:
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sumologic
|
||||
callback: sumologic
|
||||
type: aggregate
|
||||
short_description: Sends task result events to Sumologic
|
||||
author: "Ryan Currah (@ryancurrah)"
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: syslog_json
|
||||
callback: syslog_json
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
|
||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: unixy
|
||||
callback: unixy
|
||||
type: stdout
|
||||
author: Allyson Bowles (@akatch)
|
||||
short_description: condensed Ansible output
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: yaml
|
||||
callback: yaml
|
||||
type: stdout
|
||||
short_description: yaml-ized Ansible screen output
|
||||
description:
|
||||
@@ -50,7 +50,7 @@ def my_represent_scalar(self, tag, value, style=None):
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
value = ''.join(x for x in value if x in string.printable)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
||||
name: chroot
|
||||
connection: chroot
|
||||
short_description: Interact with local chroot
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
||||
|
||||
364
plugins/connection/docker.py
Normal file
364
plugins/connection/docker.py
Normal file
@@ -0,0 +1,364 @@
|
||||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
connection: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
options:
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line
|
||||
default: ''
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.compat import selectors
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.general.docker'
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we don't check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
else:
|
||||
self.docker_cmd = distutils.spawn.find_executable('docker')
|
||||
if not self.docker_cmd:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
docker_version = self._get_docker_version()
|
||||
if docker_version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
|
||||
# The remote user we will request from docker (if supported)
|
||||
self.remote_user = None
|
||||
# The actual user which will execute commands in docker (if known)
|
||||
self.actual_user = None
|
||||
|
||||
if self._play_context.remote_user is not None:
|
||||
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
self.remote_user = self._play_context.remote_user
|
||||
self.actual_user = self.remote_user
|
||||
else:
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
if self.actual_user != self._play_context.remote_user:
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(docker_version, self.actual_user or u'?'))
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
return re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
|
||||
def _old_docker_version(self):
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
return out.strip() or u'root'
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._play_context.docker_extra_args:
|
||||
local_cmd += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
local_cmd += [b'exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self._play_context.remote_addr
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker doesn't have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Michael Scherer (@msherer) <misc@zarb.org>
|
||||
name: funcd
|
||||
connection: funcd
|
||||
short_description: Use funcd to connect to target
|
||||
description:
|
||||
- This transport permits you to use Ansible over Func.
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
||||
name: iocage
|
||||
connection: iocage
|
||||
short_description: Run tasks in iocage jails
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing iocage jail
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Ansible Core Team
|
||||
name: jail
|
||||
connection: jail
|
||||
short_description: Run tasks in jails
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing jail
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
||||
name: lxc
|
||||
connection: lxc
|
||||
short_description: Run tasks in lxc containers via lxc python library
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
||||
@@ -86,7 +86,7 @@ class Connection(ConnectionBase):
|
||||
write_fds = []
|
||||
while len(read_fds) > 0 or len(write_fds) > 0:
|
||||
try:
|
||||
ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, [])
|
||||
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
|
||||
except select.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
|
||||
@@ -7,14 +7,14 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Matt Clay (@mattclay) <matt@mystile.com>
|
||||
name: lxd
|
||||
connection: lxd
|
||||
short_description: Run tasks in lxc containers via lxc CLI
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- Container identifier.
|
||||
- Container identifier
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
@@ -26,19 +26,6 @@ DOCUMENTATION = '''
|
||||
vars:
|
||||
- name: ansible_executable
|
||||
- name: ansible_lxd_executable
|
||||
remote:
|
||||
description:
|
||||
- Name of the LXD remote to use.
|
||||
default: local
|
||||
vars:
|
||||
- name: ansible_lxd_remote
|
||||
version_added: 2.0.0
|
||||
project:
|
||||
description:
|
||||
- Name of the LXD project to use.
|
||||
vars:
|
||||
- name: ansible_lxd_project
|
||||
version_added: 2.0.0
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -83,15 +70,7 @@ class Connection(ConnectionBase):
|
||||
|
||||
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"exec",
|
||||
"%s:%s" % (self.get_option("remote"), self._host),
|
||||
"--",
|
||||
self._play_context.executable, "-c", cmd
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
||||
@@ -119,14 +98,7 @@ class Connection(ConnectionBase):
|
||||
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"file", "push",
|
||||
in_path,
|
||||
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
@@ -139,14 +111,7 @@ class Connection(ConnectionBase):
|
||||
|
||||
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
||||
|
||||
local_cmd = [self._lxc_cmd]
|
||||
if self.get_option("project"):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"file", "pull",
|
||||
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
||||
out_path
|
||||
])
|
||||
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
|
||||
173
plugins/connection/oc.py
Normal file
173
plugins/connection/oc.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# Based on the docker connection plugin
|
||||
#
|
||||
# Connection plugin for configuring kubernetes containers with kubectl
|
||||
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- xuxinkun (!UNKNOWN)
|
||||
|
||||
connection: oc
|
||||
|
||||
short_description: Execute tasks in pods running on OpenShift.
|
||||
|
||||
description:
|
||||
- Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
|
||||
container platform.
|
||||
|
||||
|
||||
requirements:
|
||||
- oc (go binary)
|
||||
|
||||
options:
|
||||
oc_pod:
|
||||
description:
|
||||
- Pod name. Required when the host name does not match pod name.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_pod
|
||||
env:
|
||||
- name: K8S_AUTH_POD
|
||||
oc_container:
|
||||
description:
|
||||
- Container name. Required when a pod contains more than one container.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_container
|
||||
env:
|
||||
- name: K8S_AUTH_CONTAINER
|
||||
oc_namespace:
|
||||
description:
|
||||
- The namespace of the pod
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_namespace
|
||||
env:
|
||||
- name: K8S_AUTH_NAMESPACE
|
||||
oc_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the oc command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_extra_args
|
||||
env:
|
||||
- name: K8S_AUTH_EXTRA_ARGS
|
||||
oc_kubeconfig:
|
||||
description:
|
||||
- Path to a oc config file. Defaults to I(~/.kube/conig)
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_kubeconfig
|
||||
- name: ansible_oc_config
|
||||
env:
|
||||
- name: K8S_AUTH_KUBECONFIG
|
||||
oc_context:
|
||||
description:
|
||||
- The name of a context found in the K8s config file.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_context
|
||||
env:
|
||||
- name: K8S_AUTH_CONTEXT
|
||||
oc_host:
|
||||
description:
|
||||
- URL for accessing the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_host
|
||||
- name: ansible_oc_server
|
||||
env:
|
||||
- name: K8S_AUTH_HOST
|
||||
- name: K8S_AUTH_SERVER
|
||||
oc_token:
|
||||
description:
|
||||
- API authentication bearer token.
|
||||
vars:
|
||||
- name: ansible_oc_token
|
||||
- name: ansible_oc_api_key
|
||||
env:
|
||||
- name: K8S_AUTH_TOKEN
|
||||
- name: K8S_AUTH_API_KEY
|
||||
client_cert:
|
||||
description:
|
||||
- Path to a certificate used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_cert_file
|
||||
- name: ansible_oc_client_cert
|
||||
env:
|
||||
- name: K8S_AUTH_CERT_FILE
|
||||
aliases: [ oc_cert_file ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to a key file used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_key_file
|
||||
- name: ansible_oc_client_key
|
||||
env:
|
||||
- name: K8S_AUTH_KEY_FILE
|
||||
aliases: [ oc_key_file ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Path to a CA certificate used to authenticate with the API.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_ssl_ca_cert
|
||||
- name: ansible_oc_ca_cert
|
||||
env:
|
||||
- name: K8S_AUTH_SSL_CA_CERT
|
||||
aliases: [ oc_ssl_ca_cert ]
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_oc_verify_ssl
|
||||
- name: ansible_oc_validate_certs
|
||||
env:
|
||||
- name: K8S_AUTH_VERIFY_SSL
|
||||
aliases: [ oc_verify_ssl ]
|
||||
'''
|
||||
|
||||
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
|
||||
|
||||
|
||||
CONNECTION_TRANSPORT = 'community.general.oc'
|
||||
|
||||
CONNECTION_OPTIONS = {
|
||||
'oc_container': '-c',
|
||||
'oc_namespace': '-n',
|
||||
'oc_kubeconfig': '--config',
|
||||
'oc_context': '--context',
|
||||
'oc_host': '--server',
|
||||
'client_cert': '--client-certificate',
|
||||
'client_key': '--client-key',
|
||||
'ca_cert': '--certificate-authority',
|
||||
'validate_certs': '--insecure-skip-tls-verify',
|
||||
'oc_token': '--token'
|
||||
}
|
||||
|
||||
|
||||
class Connection(KubectlConnection):
|
||||
''' Local oc based connections '''
|
||||
transport = CONNECTION_TRANSPORT
|
||||
connection_options = CONNECTION_OPTIONS
|
||||
documentation = DOCUMENTATION
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: qubes
|
||||
connection: qubes
|
||||
short_description: Interact with an existing QubesOS AppVM
|
||||
|
||||
description:
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
||||
name: saltstack
|
||||
connection: saltstack
|
||||
short_description: Allow ansible to piggyback on salt minions
|
||||
description:
|
||||
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
||||
@@ -19,7 +19,6 @@ DOCUMENTATION = '''
|
||||
import re
|
||||
import os
|
||||
import pty
|
||||
import codecs
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
@@ -86,9 +85,9 @@ class Connection(ConnectionBase):
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
with open(in_path, 'rb') as in_fh:
|
||||
with open(in_path) as in_fh:
|
||||
content = in_fh.read()
|
||||
self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
|
||||
self.client.cmd(self.host, 'file.write', [out_path, content])
|
||||
|
||||
# TODO test it
|
||||
def fetch_file(self, in_path, out_path):
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Ansible Core Team
|
||||
name: zone
|
||||
connection: zone
|
||||
short_description: Run tasks in a zone instance
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing zone
|
||||
|
||||
62
plugins/doc_fragments/_gcp.py
Normal file
62
plugins/doc_fragments/_gcp.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Google Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# GCP doc fragment.
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
project:
|
||||
description:
|
||||
- The Google Cloud Platform project to use.
|
||||
type: str
|
||||
auth_kind:
|
||||
description:
|
||||
- The type of credential used.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ application, machineaccount, serviceaccount ]
|
||||
service_account_contents:
|
||||
description:
|
||||
- The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
|
||||
type: jsonarg
|
||||
service_account_file:
|
||||
description:
|
||||
- The path of a Service Account JSON file if serviceaccount is selected as type.
|
||||
type: path
|
||||
service_account_email:
|
||||
description:
|
||||
- An optional service account email address if machineaccount is selected
|
||||
and the user does not wish to use the default email.
|
||||
type: str
|
||||
scopes:
|
||||
description:
|
||||
- Array of scopes to be used.
|
||||
type: list
|
||||
elements: str
|
||||
env_type:
|
||||
description:
|
||||
- Specifies which Ansible environment you're running this module within.
|
||||
- This should not be set unless you know what you're doing.
|
||||
- This only alters the User Agent string for any API requests.
|
||||
type: str
|
||||
notes:
|
||||
- for authentication, you can set service_account_file using the
|
||||
c(gcp_service_account_file) env variable.
|
||||
- for authentication, you can set service_account_contents using the
|
||||
c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
|
||||
- For authentication, you can set service_account_email using the
|
||||
C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
|
||||
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
|
||||
variable.
|
||||
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
|
||||
- Environment variables values will only be used if the playbook values are
|
||||
not set.
|
||||
- The I(service_account_email) and I(service_account_file) options are
|
||||
mutually exclusive.
|
||||
'''
|
||||
@@ -19,6 +19,7 @@ options:
|
||||
region:
|
||||
description:
|
||||
- The target region.
|
||||
choices:
|
||||
- Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
|
||||
- They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
|
||||
- Note that the default value "na" stands for "North America".
|
||||
|
||||
136
plugins/doc_fragments/docker.py
Normal file
136
plugins/doc_fragments/docker.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r'''
|
||||
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix://var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: localhost
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_cert:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ssl_version:
|
||||
description:
|
||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||
used instead.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||
install the C(docker) Python module. Note that both modules should *not*
|
||||
be installed at the same time. Also note that when both modules are installed
|
||||
and one of them is uninstalled, the other might no longer function and a
|
||||
reinstall of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
23
plugins/doc_fragments/hetzner.py
Normal file
23
plugins/doc_fragments/hetzner.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
hetzner_user:
|
||||
description: The username for the Robot webservice user.
|
||||
type: str
|
||||
required: yes
|
||||
hetzner_password:
|
||||
description: The password for the Robot webservice user.
|
||||
type: str
|
||||
required: yes
|
||||
'''
|
||||
@@ -40,7 +40,6 @@ options:
|
||||
path:
|
||||
description:
|
||||
- The path on which InfluxDB server is accessible
|
||||
- Only available when using python-influxdb >= 5.1.0
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
validate_certs:
|
||||
@@ -53,7 +52,6 @@ options:
|
||||
description:
|
||||
- Use https instead of http to connect to InfluxDB server.
|
||||
type: bool
|
||||
default: false
|
||||
timeout:
|
||||
description:
|
||||
- Number of seconds Requests will wait for client to establish a connection.
|
||||
@@ -62,14 +60,12 @@ options:
|
||||
description:
|
||||
- Number of retries client will try before aborting.
|
||||
- C(0) indicates try until success.
|
||||
- Only available when using python-influxdb >= 4.1.0
|
||||
type: int
|
||||
default: 3
|
||||
use_udp:
|
||||
description:
|
||||
- Use UDP to connect to InfluxDB server.
|
||||
type: bool
|
||||
default: false
|
||||
udp_port:
|
||||
description:
|
||||
- UDP port to connect to InfluxDB server.
|
||||
|
||||
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
133
plugins/doc_fragments/kubevirt_common_options.py
Normal file
@@ -0,0 +1,133 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
resource_definition:
|
||||
description:
|
||||
- "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
|
||||
resource parameters not covered by this module's parameters."
|
||||
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
|
||||
I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
|
||||
aliases:
|
||||
- definition
|
||||
- inline
|
||||
type: dict
|
||||
wait:
|
||||
description:
|
||||
- "I(True) if the module should wait for the resource to get into desired state."
|
||||
type: bool
|
||||
default: yes
|
||||
force:
|
||||
description:
|
||||
- If set to C(no), and I(state) is C(present), an existing object will be replaced.
|
||||
type: bool
|
||||
default: no
|
||||
wait_timeout:
|
||||
description:
|
||||
- The amount of time in seconds the module should wait for the resource to get into desired state.
|
||||
type: int
|
||||
default: 120
|
||||
wait_sleep:
|
||||
description:
|
||||
- Number of seconds to sleep between checks.
|
||||
default: 5
|
||||
version_added: '0.2.0'
|
||||
memory:
|
||||
description:
|
||||
- The amount of memory to be requested by virtual machine.
|
||||
- For example 1024Mi.
|
||||
type: str
|
||||
memory_limit:
|
||||
description:
|
||||
- The maximum memory to be used by virtual machine.
|
||||
- For example 1024Mi.
|
||||
type: str
|
||||
machine_type:
|
||||
description:
|
||||
- QEMU machine type is the actual chipset of the virtual machine.
|
||||
type: str
|
||||
merge_type:
|
||||
description:
|
||||
- Whether to override the default patch merge approach with a specific type.
|
||||
- If more than one merge type is given, the merge types will be tried in order.
|
||||
- "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
|
||||
on resource kinds that combine Custom Resources and built-in resources, as
|
||||
Custom Resource Definitions typically aren't updatable by the usual strategic merge."
|
||||
- "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
|
||||
type: list
|
||||
choices: [ json, merge, strategic-merge ]
|
||||
cpu_shares:
|
||||
description:
|
||||
- "Specify CPU shares."
|
||||
type: int
|
||||
cpu_limit:
|
||||
description:
|
||||
- "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
|
||||
every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
|
||||
type: int
|
||||
cpu_cores:
|
||||
description:
|
||||
- "Number of CPU cores."
|
||||
type: int
|
||||
cpu_model:
|
||||
description:
|
||||
- "CPU model."
|
||||
- "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
|
||||
- "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
|
||||
- "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
|
||||
- "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
|
||||
type: str
|
||||
bootloader:
|
||||
description:
|
||||
- "Specify the bootloader of the virtual machine."
|
||||
- "All virtual machines use BIOS by default for booting."
|
||||
type: str
|
||||
smbios_uuid:
|
||||
description:
|
||||
- "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
|
||||
type: str
|
||||
cpu_features:
|
||||
description:
|
||||
- "List of dictionary to fine-tune features provided by the selected CPU model."
|
||||
- "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
|
||||
- "I(Note): In case a policy is omitted for a feature, it will default to require."
|
||||
- "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
|
||||
type: list
|
||||
headless:
|
||||
description:
|
||||
- "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
|
||||
- "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
|
||||
compatible and comes with a memory size of 16 MB."
|
||||
hugepage_size:
|
||||
description:
|
||||
- "Specify huge page size."
|
||||
type: str
|
||||
tablets:
|
||||
description:
|
||||
- "Specify tablets to be used as input devices"
|
||||
type: list
|
||||
hostname:
|
||||
description:
|
||||
- "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
|
||||
name will be used."
|
||||
subdomain:
|
||||
description:
|
||||
- "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
|
||||
the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
|
||||
itself can pick up a hostname."
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- openshift >= 0.8.2
|
||||
notes:
|
||||
- "In order to use this module you have to install Openshift Python SDK.
|
||||
To ensure it's installed with correct version you can create the following task:
|
||||
I(pip: name=openshift>=0.8.2)"
|
||||
'''
|
||||
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
103
plugins/doc_fragments/kubevirt_vm_options.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard oVirt documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
disks:
|
||||
description:
|
||||
- List of dictionaries which specify disks of the virtual machine.
|
||||
- "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
|
||||
- "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
|
||||
- Each disk must have specified a I(volume) that declares which volume type of the disk
|
||||
All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
|
||||
type: list
|
||||
labels:
|
||||
description:
|
||||
- Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
|
||||
specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
|
||||
imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
|
||||
Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
|
||||
- More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
|
||||
type: dict
|
||||
interfaces:
|
||||
description:
|
||||
- An interface defines a virtual network interface of a virtual machine (also called a frontend).
|
||||
- All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
|
||||
- Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
|
||||
All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
|
||||
type: list
|
||||
cloud_init_nocloud:
|
||||
description:
|
||||
- "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
|
||||
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
||||
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
||||
type: dict
|
||||
affinity:
|
||||
description:
|
||||
- "Describes node affinity scheduling rules for the vm."
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
|
||||
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
|
||||
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
|
||||
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
|
||||
C(term); the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
|
||||
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
|
||||
each C(term) are intersected, i.e. all terms must be satisfied."
|
||||
type: dict
|
||||
node_affinity:
|
||||
description:
|
||||
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
|
||||
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
|
||||
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
|
||||
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
|
||||
match_expressions; the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
|
||||
may or may not try to eventually evict the vm from its node."
|
||||
type: dict
|
||||
anti_affinity:
|
||||
description:
|
||||
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
|
||||
type: dict
|
||||
suboptions:
|
||||
soft:
|
||||
description:
|
||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
|
||||
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
|
||||
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
|
||||
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
|
||||
the corresponding C(term); the nodes with the highest sum are the most preferred."
|
||||
type: dict
|
||||
hard:
|
||||
description:
|
||||
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
|
||||
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
|
||||
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
|
||||
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
|
||||
type: dict
|
||||
'''
|
||||
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
|
||||
options:
|
||||
bind_dn:
|
||||
description:
|
||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
|
||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
|
||||
- If this is blank, we'll use an anonymous bind.
|
||||
type: str
|
||||
bind_pw:
|
||||
@@ -27,15 +27,6 @@ options:
|
||||
description:
|
||||
- The DN of the entry to add or remove.
|
||||
type: str
|
||||
referrals_chasing:
|
||||
choices: [disabled, anonymous]
|
||||
default: anonymous
|
||||
type: str
|
||||
description:
|
||||
- Set the referrals chasing behavior.
|
||||
- C(anonymous) follow referrals anonymously. This is the default behavior.
|
||||
- C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
|
||||
version_added: 2.0.0
|
||||
server_uri:
|
||||
description:
|
||||
- A URI to the LDAP server.
|
||||
@@ -53,12 +44,4 @@ options:
|
||||
- This should only be used on sites using self-signed certificates.
|
||||
type: bool
|
||||
default: yes
|
||||
sasl_class:
|
||||
description:
|
||||
- The class to use for SASL authentication.
|
||||
- possible choices are C(external), C(gssapi).
|
||||
type: str
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
version_added: "2.0.0"
|
||||
'''
|
||||
|
||||
@@ -15,14 +15,14 @@ options:
|
||||
manageiq_connection:
|
||||
description:
|
||||
- ManageIQ connection configuration information.
|
||||
required: false
|
||||
required: true
|
||||
type: dict
|
||||
suboptions:
|
||||
url:
|
||||
description:
|
||||
- ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
|
||||
type: str
|
||||
required: false
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
ca_cert:
|
||||
description:
|
||||
- The path to a CA bundle file or directory with certificates. defaults to None.
|
||||
type: str
|
||||
type: path
|
||||
aliases: [ ca_bundle_path ]
|
||||
|
||||
requirements:
|
||||
|
||||
@@ -24,6 +24,7 @@ options:
|
||||
- Value can also be specified using C(INFOBLOX_HOST) environment
|
||||
variable.
|
||||
type: str
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- Configures the username to use to authenticate the connection to
|
||||
@@ -78,24 +79,6 @@ options:
|
||||
variable.
|
||||
type: int
|
||||
default: 1000
|
||||
http_pool_connections:
|
||||
description:
|
||||
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: int
|
||||
default: 10
|
||||
http_pool_maxsize:
|
||||
description:
|
||||
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: int
|
||||
default: 10
|
||||
silent_ssl_warnings:
|
||||
description:
|
||||
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
||||
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||
type: bool
|
||||
default: true
|
||||
notes:
|
||||
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
||||
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- FQDN of Nomad server.
|
||||
required: true
|
||||
type: str
|
||||
use_ssl:
|
||||
description:
|
||||
- Use TLS/SSL connection.
|
||||
type: bool
|
||||
default: true
|
||||
timeout:
|
||||
description:
|
||||
- Timeout (in seconds) for the request to Nomad.
|
||||
type: int
|
||||
default: 5
|
||||
validate_certs:
|
||||
description:
|
||||
- Enable TLS/SSL certificate validation.
|
||||
type: bool
|
||||
default: true
|
||||
client_cert:
|
||||
description:
|
||||
- Path of certificate for TLS/SSL.
|
||||
type: path
|
||||
client_key:
|
||||
description:
|
||||
- Path of certificate's private key for TLS/SSL.
|
||||
type: path
|
||||
namespace:
|
||||
description:
|
||||
- Namespace for Nomad.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- ACL token for authentification.
|
||||
type: str
|
||||
'''
|
||||
@@ -13,32 +13,12 @@ class ModuleDocFragment(object):
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
config:
|
||||
description:
|
||||
description:
|
||||
- Path to a .json configuration file containing the OneView client configuration.
|
||||
The configuration file is optional and when used should be present in the host running the ansible commands.
|
||||
If the file path is not provided, the configuration will be loaded from environment variables.
|
||||
For links to example configuration files or how to use the environment variables verify the notes section.
|
||||
type: path
|
||||
api_version:
|
||||
description:
|
||||
- OneView API Version.
|
||||
type: int
|
||||
image_streamer_hostname:
|
||||
description:
|
||||
- IP address or hostname for the HPE Image Streamer REST API.
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
- IP address or hostname for the appliance.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Username for API authentication.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Password for API authentication.
|
||||
type: str
|
||||
type: path
|
||||
|
||||
requirements:
|
||||
- python >= 2.7.9
|
||||
|
||||
@@ -15,7 +15,6 @@ options:
|
||||
description:
|
||||
- Online OAuth token.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ oauth_token ]
|
||||
api_url:
|
||||
description:
|
||||
|
||||
@@ -47,7 +47,7 @@ class ModuleDocFragment(object):
|
||||
OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
|
||||
not specified through a configuration file (See C(config_file_location)). If the key is encrypted
|
||||
with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
|
||||
type: path
|
||||
type: str
|
||||
api_user_key_pass_phrase:
|
||||
description:
|
||||
- Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
|
||||
|
||||
@@ -20,5 +20,4 @@ class ModuleDocFragment(object):
|
||||
identify an instance of the resource. By default, all the attributes of a resource except
|
||||
I(freeform_tags) are used to uniquely identify a resource.
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
@@ -19,7 +19,6 @@ options:
|
||||
Only the attributes of the current entity. User can configure to fetch other
|
||||
attributes of the nested entities by specifying C(nested_attributes).
|
||||
type: bool
|
||||
default: false
|
||||
nested_attributes:
|
||||
description:
|
||||
- Specifies list of the attributes which should be fetched from the API.
|
||||
|
||||
62
plugins/doc_fragments/postgres.py
Normal file
62
plugins/doc_fragments/postgres.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# Postgres documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
type: str
|
||||
default: postgres
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
login_host:
|
||||
description:
|
||||
- Host running the database.
|
||||
type: str
|
||||
login_unix_socket:
|
||||
description:
|
||||
- Path to a Unix domain socket for local connections.
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Database port to connect to.
|
||||
type: int
|
||||
default: 5432
|
||||
aliases: [ login_port ]
|
||||
ssl_mode:
|
||||
description:
|
||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
||||
- Default of C(prefer) matches libpq default.
|
||||
type: str
|
||||
default: prefer
|
||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
||||
ca_cert:
|
||||
description:
|
||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
||||
type: str
|
||||
aliases: [ ssl_rootcert ]
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
|
||||
- To avoid "Peer authentication failed for user postgres" error,
|
||||
use postgres user as a I(become_user).
|
||||
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
||||
ensure that psycopg2 is installed on the host before using this module.
|
||||
- If the remote host is the PostgreSQL server (which is the default case), then
|
||||
PostgreSQL must also be installed on the remote host.
|
||||
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
||||
on the remote host before using this module.
|
||||
- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
|
||||
requirements: [ psycopg2 ]
|
||||
'''
|
||||
@@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
pritunl_url:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- URL and port of the Pritunl server on which the API is enabled.
|
||||
|
||||
pritunl_api_token:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Token of a Pritunl admin user.
|
||||
- It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
|
||||
|
||||
pritunl_api_secret:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Secret found in Administrators > USERNAME > API Secret.
|
||||
|
||||
validate_certs:
|
||||
type: bool
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- If certificates should be validated or not.
|
||||
- This should never be set to C(false), except if you are very sure that
|
||||
your connection to the server can not be subject to a Man In The Middle
|
||||
attack.
|
||||
"""
|
||||
@@ -1,64 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# Common parameters for Proxmox VE modules
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- Specify the target host of the Proxmox VE cluster.
|
||||
type: str
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- Specify the user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- Specify the password to authenticate with.
|
||||
- You can use C(PROXMOX_PASSWORD) environment variable.
|
||||
type: str
|
||||
api_token_id:
|
||||
description:
|
||||
- Specify the token ID.
|
||||
type: str
|
||||
version_added: 1.3.0
|
||||
api_token_secret:
|
||||
description:
|
||||
- Specify the token secret.
|
||||
type: str
|
||||
version_added: 1.3.0
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated.
|
||||
- This should only be used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: no
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
'''
|
||||
|
||||
SELECTION = r'''
|
||||
options:
|
||||
vmid:
|
||||
description:
|
||||
- Specifies the instance ID.
|
||||
- If not set the next available ID will be fetched from ProxmoxAPI.
|
||||
type: int
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node on which to operate.
|
||||
- Only required for I(state=present).
|
||||
- For every other states it will be autodiscovered.
|
||||
type: str
|
||||
pool:
|
||||
description:
|
||||
- Add the new VM to the specified pool.
|
||||
type: str
|
||||
'''
|
||||
@@ -32,6 +32,7 @@ options:
|
||||
description:
|
||||
- Region to create an instance in.
|
||||
type: str
|
||||
default: DFW
|
||||
username:
|
||||
description:
|
||||
- Rackspace username, overrides I(credentials).
|
||||
@@ -58,45 +59,37 @@ notes:
|
||||
OPENSTACK = r'''
|
||||
options:
|
||||
api_key:
|
||||
type: str
|
||||
description:
|
||||
- Rackspace API key, overrides I(credentials).
|
||||
aliases: [ password ]
|
||||
auth_endpoint:
|
||||
type: str
|
||||
description:
|
||||
- The URI of the authentication service.
|
||||
- If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
|
||||
default: https://identity.api.rackspacecloud.com/v2.0/
|
||||
credentials:
|
||||
type: path
|
||||
description:
|
||||
- File to find the Rackspace credentials in. Ignored if I(api_key) and
|
||||
I(username) are provided.
|
||||
aliases: [ creds_file ]
|
||||
env:
|
||||
type: str
|
||||
description:
|
||||
- Environment as configured in I(~/.pyrax.cfg),
|
||||
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
|
||||
identity_type:
|
||||
type: str
|
||||
description:
|
||||
- Authentication mechanism to use, such as rackspace or keystone.
|
||||
default: rackspace
|
||||
region:
|
||||
type: str
|
||||
description:
|
||||
- Region to create an instance in.
|
||||
default: DFW
|
||||
tenant_id:
|
||||
type: str
|
||||
description:
|
||||
- The tenant ID used for authentication.
|
||||
tenant_name:
|
||||
type: str
|
||||
description:
|
||||
- The tenant name used for authentication.
|
||||
username:
|
||||
type: str
|
||||
description:
|
||||
- Rackspace username, overrides I(credentials).
|
||||
validate_certs:
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def dict_kv(value, key):
|
||||
'''Return a dictionary with a single key-value pair
|
||||
|
||||
Example:
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
myvar: myvalue
|
||||
tasks:
|
||||
- debug:
|
||||
msg: "{{ myvar | dict_kv('thatsmyvar') }}"
|
||||
|
||||
produces:
|
||||
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"thatsmyvar": "myvalue"
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
common_config:
|
||||
type: host
|
||||
database: all
|
||||
myservers:
|
||||
- server1
|
||||
- server2
|
||||
tasks:
|
||||
- debug:
|
||||
msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
|
||||
|
||||
produces:
|
||||
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server1",
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server2",
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
return {key: value}
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Query filter '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'dict_kv': dict_kv
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||
DialectNotAvailableError,
|
||||
CustomDialectFailureError)
|
||||
|
||||
|
||||
def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
|
||||
|
||||
dialect_params = {
|
||||
"delimiter": delimiter,
|
||||
"skipinitialspace": skipinitialspace,
|
||||
"strict": strict,
|
||||
}
|
||||
|
||||
try:
|
||||
dialect = initialize_dialect(dialect, **dialect_params)
|
||||
except (CustomDialectFailureError, DialectNotAvailableError) as e:
|
||||
raise AnsibleFilterError(to_native(e))
|
||||
|
||||
reader = read_csv(data, dialect, fieldnames)
|
||||
|
||||
data_list = []
|
||||
|
||||
try:
|
||||
for row in reader:
|
||||
data_list.append(row)
|
||||
except CSVError as e:
|
||||
raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
|
||||
|
||||
return data_list
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'from_csv': from_csv
|
||||
}
|
||||
@@ -35,9 +35,6 @@ def json_query(data, expr):
|
||||
raise AnsibleError('You need to install "jmespath" prior to running '
|
||||
'json_query filter')
|
||||
|
||||
# Hack to handle Ansible String Types
|
||||
# See issue: https://github.com/ansible-collections/community.general/issues/320
|
||||
jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
|
||||
try:
|
||||
return jmespath.search(expr, data)
|
||||
except jmespath.exceptions.JMESPathError as e:
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Vladimir Botka <vbotka@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleFilterError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
|
||||
|
||||
def lists_mergeby(l1, l2, index):
|
||||
''' merge lists by attribute index. Example:
|
||||
- debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" '''
|
||||
|
||||
if not isinstance(l1, Sequence):
|
||||
raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||
(l1, type(l1)))
|
||||
|
||||
if not isinstance(l2, Sequence):
|
||||
raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||
(l2, type(l2)))
|
||||
|
||||
if not isinstance(index, string_types):
|
||||
raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' %
|
||||
(index, type(index)))
|
||||
|
||||
d = defaultdict(dict)
|
||||
for l in (l1, l2):
|
||||
for elem in l:
|
||||
if not isinstance(elem, Mapping):
|
||||
raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem))
|
||||
if index in elem.keys():
|
||||
d[elem[index]].update(elem)
|
||||
return sorted(d.values(), key=itemgetter(index))
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Ansible list filters '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'lists_mergeby': lists_mergeby,
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
|
||||
def version_sort(value, reverse=False):
|
||||
'''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
|
||||
return sorted(value, key=LooseVersion, reverse=reverse)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Version sort filter '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'version_sort': version_sort
|
||||
}
|
||||
@@ -8,6 +8,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
author: Orion Poplawski (@opoplawski)
|
||||
name: cobbler
|
||||
plugin_type: inventory
|
||||
short_description: Cobbler inventory source
|
||||
version_added: 1.0.0
|
||||
description:
|
||||
|
||||
272
plugins/inventory/docker_machine.py
Normal file
272
plugins/inventory/docker_machine.py
Normal file
@@ -0,0 +1,272 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_machine
|
||||
plugin_type: inventory
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine', 'community.general.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||
A warning will be issued for any skipped host if the choice is C(require).
|
||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: yes
|
||||
verbose_output:
|
||||
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example
|
||||
plugin: community.general.docker_machine
|
||||
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), e.g.:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
|
||||
NAME = 'community.general.docker_machine'
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node):
|
||||
try:
|
||||
ip_addr = self._run_command(['ip', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if self.node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = self.node_attrs['Driver']['IPAddress']
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
255
plugins/inventory/docker_swarm.py
Normal file
255
plugins/inventory/docker_swarm.py
Normal file
@@ -0,0 +1,255 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ docker_swarm, community.general.docker_swarm ]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add e.g. x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add e.g. linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
try:
|
||||
import docker
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'community.general.docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
@@ -9,6 +9,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: gitlab_runners
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
||||
|
||||
256
plugins/inventory/kubevirt.py
Normal file
256
plugins/inventory/kubevirt.py
Normal file
@@ -0,0 +1,256 @@
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: kubevirt
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- KubeVirt Team (@kubevirt)
|
||||
|
||||
short_description: KubeVirt inventory source
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
- constructed
|
||||
description:
|
||||
- Fetch running VirtualMachines for one or more namespaces.
|
||||
- Groups by namespace, namespace_vms and labels.
|
||||
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'kubevirt' plugin.
|
||||
required: True
|
||||
choices: ['kubevirt', 'community.general.kubevirt']
|
||||
type: str
|
||||
host_format:
|
||||
description:
|
||||
- Specify the format of the host in the inventory group.
|
||||
default: "{namespace}-{name}-{uid}"
|
||||
connections:
|
||||
type: list
|
||||
description:
|
||||
- Optional list of cluster connection settings. If no connections are provided, the default
|
||||
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
|
||||
the active user is authorized to access.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
|
||||
and port.
|
||||
type: str
|
||||
kubeconfig:
|
||||
description:
|
||||
- Path to an existing Kubernetes config file. If not provided, and no other connection
|
||||
options are provided, the OpenShift client will attempt to load the default
|
||||
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
|
||||
environment variable.
|
||||
type: str
|
||||
context:
|
||||
description:
|
||||
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
|
||||
variable.
|
||||
type: str
|
||||
host:
|
||||
description:
|
||||
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
|
||||
type: str
|
||||
api_key:
|
||||
description:
|
||||
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
|
||||
variable.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
|
||||
environment variable.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
|
||||
environment variable.
|
||||
type: str
|
||||
cert_file:
|
||||
description:
|
||||
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
|
||||
environment variable.
|
||||
type: str
|
||||
key_file:
|
||||
description:
|
||||
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
|
||||
environment variable.
|
||||
type: str
|
||||
ssl_ca_cert:
|
||||
description:
|
||||
- Path to a CA certificate used to authenticate with the API. Can also be specified via
|
||||
K8S_AUTH_SSL_CA_CERT environment variable.
|
||||
type: str
|
||||
verify_ssl:
|
||||
description:
|
||||
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
|
||||
K8S_AUTH_VERIFY_SSL environment variable."
|
||||
type: bool
|
||||
namespaces:
|
||||
description:
|
||||
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
|
||||
to access.
|
||||
type: list
|
||||
network_name:
|
||||
description:
|
||||
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
|
||||
address.
|
||||
type: str
|
||||
aliases: [ interface_name ]
|
||||
api_version:
|
||||
description:
|
||||
- "Specify the KubeVirt API version."
|
||||
type: str
|
||||
annotation_variable:
|
||||
description:
|
||||
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
|
||||
- "Note, that the value in ansible annotations should be json."
|
||||
type: str
|
||||
default: 'ansible'
|
||||
requirements:
|
||||
- "openshift >= 0.6"
|
||||
- "PyYAML >= 3.11"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# File must be named kubevirt.yaml or kubevirt.yml
|
||||
|
||||
# Authenticate with token, and return all virtual machines for all namespaces
|
||||
plugin: community.general.kubevirt
|
||||
connections:
|
||||
- host: https://kubevirt.io
|
||||
token: xxxxxxxxxxxxxxxx
|
||||
ssl_verify: false
|
||||
|
||||
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
|
||||
# connected to network myovsnetwork and from namespace vms
|
||||
plugin: community.general.kubevirt
|
||||
connections:
|
||||
- namespaces:
|
||||
- vms
|
||||
network_name: myovsnetwork
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
|
||||
|
||||
try:
|
||||
from openshift.dynamic.exceptions import DynamicApiError
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
API_VERSION = 'kubevirt.io/v1alpha3'
|
||||
|
||||
|
||||
class InventoryModule(K8sInventoryModule):
|
||||
NAME = 'community.general.kubevirt'
|
||||
|
||||
def setup(self, config_data, cache, cache_key):
|
||||
self.config_data = config_data
|
||||
super(InventoryModule, self).setup(config_data, cache, cache_key)
|
||||
|
||||
def fetch_objects(self, connections):
|
||||
client = self.get_api_client()
|
||||
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
|
||||
|
||||
if connections:
|
||||
for connection in connections:
|
||||
client = self.get_api_client(**connection)
|
||||
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
||||
if connection.get('namespaces'):
|
||||
namespaces = connection['namespaces']
|
||||
else:
|
||||
namespaces = self.get_available_namespaces(client)
|
||||
interface_name = connection.get('network_name')
|
||||
api_version = connection.get('api_version', API_VERSION)
|
||||
annotation_variable = connection.get('annotation_variable', 'ansible')
|
||||
for namespace in namespaces:
|
||||
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
|
||||
else:
|
||||
name = self.get_default_host_name(client.configuration.host)
|
||||
namespaces = self.get_available_namespaces(client)
|
||||
for namespace in namespaces:
|
||||
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
|
||||
|
||||
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
|
||||
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
|
||||
try:
|
||||
obj = v1_vm.get(namespace=namespace)
|
||||
except DynamicApiError as exc:
|
||||
self.display.debug(exc)
|
||||
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
|
||||
|
||||
namespace_group = 'namespace_{0}'.format(namespace)
|
||||
namespace_vms_group = '{0}_vms'.format(namespace_group)
|
||||
|
||||
name = self._sanitize_group_name(name)
|
||||
namespace_group = self._sanitize_group_name(namespace_group)
|
||||
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_group(namespace_group)
|
||||
self.inventory.add_child(name, namespace_group)
|
||||
self.inventory.add_group(namespace_vms_group)
|
||||
self.inventory.add_child(namespace_group, namespace_vms_group)
|
||||
for vm in obj.items:
|
||||
if not (vm.status and vm.status.interfaces):
|
||||
continue
|
||||
|
||||
# Find interface by its name:
|
||||
if interface_name is None:
|
||||
interface = vm.status.interfaces[0]
|
||||
else:
|
||||
interface = next(
|
||||
(i for i in vm.status.interfaces if i.name == interface_name),
|
||||
None
|
||||
)
|
||||
|
||||
# If interface is not found or IP address is not reported skip this VM:
|
||||
if interface is None or interface.ipAddress is None:
|
||||
continue
|
||||
|
||||
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
|
||||
vm_ip = interface.ipAddress
|
||||
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
|
||||
|
||||
self.inventory.add_host(vm_name)
|
||||
|
||||
if vm.metadata.labels:
|
||||
# create a group for each label_value
|
||||
for key, value in vm.metadata.labels:
|
||||
group_name = 'label_{0}_{1}'.format(key, value)
|
||||
group_name = self._sanitize_group_name(group_name)
|
||||
self.inventory.add_group(group_name)
|
||||
self.inventory.add_child(group_name, vm_name)
|
||||
vm_labels = dict(vm.metadata.labels)
|
||||
else:
|
||||
vm_labels = {}
|
||||
|
||||
self.inventory.add_child(namespace_vms_group, vm_name)
|
||||
|
||||
# add hostvars
|
||||
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
|
||||
self.inventory.set_variable(vm_name, 'labels', vm_labels)
|
||||
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
|
||||
self.inventory.set_variable(vm_name, 'object_type', 'vm')
|
||||
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
|
||||
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
|
||||
|
||||
# Add all variables which are listed in 'ansible' annotation:
|
||||
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
|
||||
for k, v in annotations_data.items():
|
||||
self.inventory.set_variable(vm_name, k, v)
|
||||
|
||||
def verify_file(self, path):
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
|
||||
return True
|
||||
return False
|
||||
@@ -6,6 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: linode
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Luke Murphy (@decentral1se)
|
||||
short_description: Ansible dynamic inventory plugin for Linode.
|
||||
@@ -16,10 +17,7 @@ DOCUMENTATION = r'''
|
||||
- Reads inventories from the Linode API v4.
|
||||
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
||||
- Linode labels are used by default as the hostnames.
|
||||
- The default inventory groups are built from groups (deprecated by
|
||||
Linode) and not tags.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- The inventory groups are built from groups and not tags.
|
||||
options:
|
||||
plugin:
|
||||
description: marks this as an instance of the 'linode' plugin
|
||||
@@ -34,23 +32,12 @@ DOCUMENTATION = r'''
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
tags:
|
||||
description: Populate inventory only with instances which have at least one of the tags listed here.
|
||||
default: []
|
||||
type: list
|
||||
version_added: 2.0.0
|
||||
required: false
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
strict:
|
||||
version_added: 2.0.0
|
||||
compose:
|
||||
version_added: 2.0.0
|
||||
groups:
|
||||
version_added: 2.0.0
|
||||
keyed_groups:
|
||||
version_added: 2.0.0
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -64,38 +51,23 @@ regions:
|
||||
- eu-west
|
||||
types:
|
||||
- g5-standard-2
|
||||
|
||||
# Example with keyed_groups, groups, and compose
|
||||
plugin: community.general.linode
|
||||
access_token: foobar
|
||||
keyed_groups:
|
||||
- key: tags
|
||||
separator: ''
|
||||
- key: region
|
||||
prefix: region
|
||||
groups:
|
||||
webservers: "'web' in (tags|list)"
|
||||
mailservers: "'mail' in (tags|list)"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
|
||||
|
||||
try:
|
||||
from linode_api4 import LinodeClient
|
||||
from linode_api4.errors import ApiError as LinodeApiError
|
||||
HAS_LINODE = True
|
||||
except ImportError:
|
||||
HAS_LINODE = False
|
||||
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
NAME = 'community.general.linode'
|
||||
|
||||
@@ -138,7 +110,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
for linode_group in self.linode_groups:
|
||||
self.inventory.add_group(linode_group)
|
||||
|
||||
def _filter_by_config(self, regions, types, tags):
|
||||
def _filter_by_config(self, regions, types):
|
||||
"""Filter instances by user specified configuration."""
|
||||
if regions:
|
||||
self.instances = [
|
||||
@@ -152,12 +124,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
if instance.type.id in types
|
||||
]
|
||||
|
||||
if tags:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
if any(tag in instance.tags for tag in tags)
|
||||
]
|
||||
|
||||
def _add_instances_to_groups(self):
|
||||
"""Add instance names to their dynamic inventory groups."""
|
||||
for instance in self.instances:
|
||||
@@ -202,10 +168,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('types', [])
|
||||
},
|
||||
'tags': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('tags', [])
|
||||
},
|
||||
}
|
||||
|
||||
for name in options:
|
||||
@@ -217,9 +179,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
regions = options['regions']['value']
|
||||
types = options['types']['value']
|
||||
tags = options['tags']['value']
|
||||
|
||||
return regions, types, tags
|
||||
return regions, types
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Verify the Linode configuration file."""
|
||||
@@ -233,35 +194,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Dynamically parse Linode the cloud inventory."""
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
if not HAS_LINODE:
|
||||
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
|
||||
|
||||
config_data = self._read_config_data(path)
|
||||
self._build_client()
|
||||
|
||||
self._get_instances_inventory()
|
||||
|
||||
strict = self.get_option('strict')
|
||||
regions, types, tags = self._get_query_options(config_data)
|
||||
self._filter_by_config(regions, types, tags)
|
||||
regions, types = self._get_query_options(config_data)
|
||||
self._filter_by_config(regions, types)
|
||||
|
||||
self._add_groups()
|
||||
self._add_instances_to_groups()
|
||||
self._add_hostvars_for_instances()
|
||||
for instance in self.instances:
|
||||
variables = self.inventory.get_host(instance.label).get_vars()
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option('groups'),
|
||||
variables,
|
||||
instance.label,
|
||||
strict=strict)
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option('keyed_groups'),
|
||||
variables,
|
||||
instance.label,
|
||||
strict=strict)
|
||||
self._set_composite_vars(
|
||||
self.get_option('compose'),
|
||||
variables,
|
||||
instance.label,
|
||||
strict=strict)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user