Compare commits

..

2 Commits
2.0.1 ... 1.0.0

Author SHA1 Message Date
Felix Fontein
acea082a7c Add changelog for 1.0.0. 2020-07-31 13:54:10 +02:00
Felix Fontein
0cff1f116f Add release summary. 2020-07-31 13:52:28 +02:00
1552 changed files with 100480 additions and 27862 deletions

View File

@@ -1,3 +0,0 @@
## Azure Pipelines Configuration
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.

View File

@@ -1,313 +0,0 @@
trigger:
batch: true
branches:
include:
- main
- stable-*
pr:
autoCancel: true
branches:
include:
- main
- stable-*
schedules:
- cron: 0 9 * * *
displayName: Nightly
always: true
branches:
include:
- main
- stable-*
variables:
- name: checkoutPath
value: ansible_collections/community/general
- name: coverageBranches
value: main
- name: pipelinesCoverage
value: coverage
- name: entryPoint
value: tests/utils/shippable/shippable.sh
- name: fetchDepth
value: 0
resources:
containers:
- container: default
image: quay.io/ansible/azure-pipelines-test-container:1.7.1
pool: Standard
stages:
### Sanity
- stage: Sanity_devel
displayName: Sanity devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
testFormat: devel/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
- test: extra
- stage: Sanity_2_10
displayName: Sanity 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
testFormat: 2.10/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
- stage: Sanity_2_9
displayName: Sanity 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
testFormat: 2.9/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
### Units
- stage: Units_devel
displayName: Units devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: devel/units/{0}/1
targets:
- test: 2.6
- test: 2.7
- test: 3.5
- test: 3.6
- test: 3.7
- test: 3.8
- test: 3.9
- stage: Units_2_10
displayName: Units 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.10/units/{0}/1
targets:
- test: 2.6
- test: 2.7
- test: 3.5
- test: 3.6
- test: 3.7
- test: 3.8
- test: 3.9
- stage: Units_2_9
displayName: Units 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.9/units/{0}/1
targets:
- test: 2.6
- test: 2.7
- test: 3.5
- test: 3.6
- test: 3.7
- test: 3.8
## Remote
- stage: Remote_devel
displayName: Remote devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: devel/{0}
targets:
- name: OS X 10.11
test: osx/10.11
- name: macOS 10.15
test: macos/10.15
- name: macOS 11.1
test: macos/11.1
- name: RHEL 7.8
test: rhel/7.8
- name: RHEL 8.2
test: rhel/8.2
- name: FreeBSD 11.4
test: freebsd/11.4
- name: FreeBSD 12.2
test: freebsd/12.2
groups:
- 1
- 2
- 3
- stage: Remote_2_10
displayName: Remote 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.10/{0}
targets:
- name: macOS 11.1
test: macos/11.1
- name: RHEL 8.2
test: rhel/8.2
- name: FreeBSD 12.1
test: freebsd/12.1
groups:
- 1
- 2
- stage: Remote_2_9
displayName: Remote 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.9/{0}
targets:
- name: RHEL 8.2
test: rhel/8.2
- name: FreeBSD 12.0
test: freebsd/12.0
groups:
- 1
- 2
### Docker
- stage: Docker_devel
displayName: Docker devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: devel/linux/{0}
targets:
- name: CentOS 6
test: centos6
- name: CentOS 7
test: centos7
- name: CentOS 8
test: centos8
- name: Fedora 32
test: fedora32
- name: Fedora 33
test: fedora33
- name: openSUSE 15 py2
test: opensuse15py2
- name: openSUSE 15 py3
test: opensuse15
- name: Ubuntu 16.04
test: ubuntu1604
- name: Ubuntu 18.04
test: ubuntu1804
- name: Ubuntu 20.04
test: ubuntu2004
groups:
- 1
- 2
- 3
- stage: Docker_2_10
displayName: Docker 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.10/linux/{0}
targets:
- name: CentOS 8
test: centos8
- name: Fedora 32
test: fedora32
- name: openSUSE 15 py3
test: opensuse15
groups:
- 2
- 3
- stage: Docker_2_9
displayName: Docker 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.9/linux/{0}
targets:
- name: CentOS 8
test: centos8
- name: Fedora 31
test: fedora31
- name: openSUSE 15 py3
test: opensuse15
groups:
- 2
- 3
### Cloud
- stage: Cloud_devel
displayName: Cloud devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: devel/cloud/{0}/1
targets:
- test: 2.7
- test: 3.6
- stage: Cloud_2_10
displayName: Cloud 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.10/cloud/{0}/1
targets:
- test: 3.6
- stage: Cloud_2_9
displayName: Cloud 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.9/cloud/{0}/1
targets:
- test: 3.6
- stage: Summary
condition: succeededOrFailed()
dependsOn:
- Sanity_devel
- Sanity_2_9
- Sanity_2_10
- Units_devel
- Units_2_9
- Units_2_10
- Remote_devel
- Remote_2_9
- Remote_2_10
- Docker_devel
- Docker_2_9
- Docker_2_10
- Cloud_devel
- Cloud_2_9
- Cloud_2_10
jobs:
- template: templates/coverage.yml

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# Aggregate code coverage results for later processing.
set -o pipefail -eu
agent_temp_directory="$1"
PATH="${PWD}/bin:${PATH}"
mkdir "${agent_temp_directory}/coverage/"
options=(--venv --venv-system-site-packages --color -v)
ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
# Only analyze coverage if the installed version of ansible-test supports it.
# Doing so allows this script to work unmodified for multiple Ansible versions.
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
fi

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python
"""
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import shutil
import sys
def main():
"""Main program entry point."""
source_directory = sys.argv[1]
if '/ansible_collections/' in os.getcwd():
output_path = "tests/output"
else:
output_path = "test/results"
destination_directory = os.path.join(output_path, 'coverage')
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
jobs = {}
count = 0
for name in os.listdir(source_directory):
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
label = match.group('label')
attempt = int(match.group('attempt'))
jobs[label] = max(attempt, jobs.get(label, 0))
for label, attempt in jobs.items():
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
source = os.path.join(source_directory, name)
source_files = os.listdir(source)
for source_file in source_files:
source_path = os.path.join(source, source_file)
destination_path = os.path.join(destination_directory, source_file + '.' + label)
print('"%s" -> "%s"' % (source_path, destination_path))
shutil.copyfile(source_path, destination_path)
count += 1
print('Coverage file count: %d' % count)
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
if __name__ == '__main__':
main()

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# Check the test results and set variables for use in later steps.
set -o pipefail -eu
if [[ "$PWD" =~ /ansible_collections/ ]]; then
output_path="tests/output"
else
output_path="test/results"
fi
echo "##vso[task.setVariable variable=outputPath]${output_path}"
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
echo "##vso[task.setVariable variable=haveTestResults]true"
fi
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
echo "##vso[task.setVariable variable=haveBotResults]true"
fi
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
echo "##vso[task.setVariable variable=haveCoverageData]true"
fi

View File

@@ -1,27 +0,0 @@
#!/usr/bin/env bash
# Upload code coverage reports to codecov.io.
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
set -o pipefail -eu
output_path="$1"
curl --silent --show-error https://codecov.io/bash > codecov.sh
for file in "${output_path}"/reports/coverage*.xml; do
name="${file}"
name="${name##*/}" # remove path
name="${name##coverage=}" # remove 'coverage=' prefix if present
name="${name%.xml}" # remove '.xml' suffix
bash codecov.sh \
-f "${file}" \
-n "${name}" \
-X coveragepy \
-X gcov \
-X fix \
-X search \
-X xcode \
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
done

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env bash
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
set -o pipefail -eu
PATH="${PWD}/bin:${PATH}"
if ! ansible-test --help >/dev/null 2>&1; then
# Install the devel version of ansible-test for generating code coverage reports.
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
fi
ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v

View File

@@ -1,34 +0,0 @@
#!/usr/bin/env bash
# Configure the test environment and run the tests.
set -o pipefail -eu
entry_point="$1"
test="$2"
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
export COMMIT_MESSAGE
export COMPLETE
export COVERAGE
export IS_PULL_REQUEST
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
IS_PULL_REQUEST=true
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
else
IS_PULL_REQUEST=
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
fi
COMPLETE=
COVERAGE=
if [ "${BUILD_REASON}" = "Schedule" ]; then
COMPLETE=yes
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
COVERAGE=yes
fi
fi
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env python
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import time
def main():
"""Main program entry point."""
start = time.time()
sys.stdin.reconfigure(errors='surrogateescape')
sys.stdout.reconfigure(errors='surrogateescape')
for line in sys.stdin:
seconds = time.time() - start
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
sys.stdout.flush()
if __name__ == '__main__':
main()

View File

@@ -1,39 +0,0 @@
# This template adds a job for processing code coverage data.
# It will upload results to Azure Pipelines and codecov.io.
# Use it from a job stage that completes after all other jobs have completed.
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
jobs:
- job: Coverage
displayName: Code Coverage
container: default
workspace:
clean: all
steps:
- checkout: self
fetchDepth: $(fetchDepth)
path: $(checkoutPath)
- task: DownloadPipelineArtifact@2
displayName: Download Coverage Data
inputs:
path: coverage/
patterns: "Coverage */*=coverage.combined"
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
displayName: Combine Coverage Data
- bash: .azure-pipelines/scripts/report-coverage.sh
displayName: Generate Coverage Report
condition: gt(variables.coverageFileCount, 0)
- task: PublishCodeCoverageResults@1
inputs:
codeCoverageTool: Cobertura
# Azure Pipelines only accepts a single coverage data file.
# That means only Python or PowerShell coverage can be uploaded, but not both.
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
displayName: Publish to Azure Pipelines
condition: gt(variables.coverageFileCount, 0)
- bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
displayName: Publish to codecov.io
condition: gt(variables.coverageFileCount, 0)
continueOnError: true

View File

@@ -1,55 +0,0 @@
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
parameters:
# A required list of dictionaries, one per test target.
# Each item in the list must contain a "test" or "name" key.
# Both may be provided. If one is omitted, the other will be used.
- name: targets
type: object
# An optional list of values which will be used to multiply the targets list into a matrix.
# Values can be strings or numbers.
- name: groups
type: object
default: []
# An optional format string used to generate the job name.
# - {0} is the name of an item in the targets list.
- name: nameFormat
type: string
default: "{0}"
# An optional format string used to generate the test name.
# - {0} is the name of an item in the targets list.
- name: testFormat
type: string
default: "{0}"
# An optional format string used to add the group to the job name.
# {0} is the formatted name of an item in the targets list.
# {{1}} is the group -- be sure to include the double "{{" and "}}".
- name: nameGroupFormat
type: string
default: "{0} - {{1}}"
# An optional format string used to add the group to the test name.
# {0} is the formatted test of an item in the targets list.
# {{1}} is the group -- be sure to include the double "{{" and "}}".
- name: testGroupFormat
type: string
default: "{0}/{{1}}"
jobs:
- template: test.yml
parameters:
jobs:
- ${{ if eq(length(parameters.groups), 0) }}:
- ${{ each target in parameters.targets }}:
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
- ${{ if not(eq(length(parameters.groups), 0)) }}:
- ${{ each group in parameters.groups }}:
- ${{ each target in parameters.targets }}:
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}

View File

@@ -1,45 +0,0 @@
# This template uses the provided list of jobs to create test one or more test jobs.
# It can be used directly if needed, or through the matrix template.
parameters:
# A required list of dictionaries, one per test job.
# Each item in the list must contain a "job" and "name" key.
- name: jobs
type: object
jobs:
- ${{ each job in parameters.jobs }}:
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
displayName: ${{ job.name }}
container: default
workspace:
clean: all
steps:
- checkout: self
fetchDepth: $(fetchDepth)
path: $(checkoutPath)
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
displayName: Run Tests
- bash: .azure-pipelines/scripts/process-results.sh
condition: succeededOrFailed()
displayName: Process Results
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
condition: eq(variables.haveCoverageData, 'true')
displayName: Aggregate Coverage Data
- task: PublishTestResults@2
condition: eq(variables.haveTestResults, 'true')
inputs:
testResultsFiles: "$(outputPath)/junit/*.xml"
displayName: Publish Test Results
- task: PublishPipelineArtifact@1
condition: eq(variables.haveBotResults, 'true')
displayName: Publish Bot Results
inputs:
targetPath: "$(outputPath)/bot/"
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
- task: PublishPipelineArtifact@1
condition: eq(variables.haveCoverageData, 'true')
displayName: Publish Coverage Data
inputs:
targetPath: "$(Agent.TempDirectory)/coverage/"
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"

1094
.github/BOTMETA.yml vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +0,0 @@
---
backport_branch_prefix: patchback/backports/
backport_label_prefix: backport-
target_branch_prefix: stable-
...

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,9 @@
# Community General Collection
[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-2)](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
[![Run Status](https://api.shippable.com/projects/5e664a167c32620006c9fa50/badge?branch=main)](https://app.shippable.com/github/ansible-collections/community.general/dashboard) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
## Tested with Ansible
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
@@ -17,7 +14,7 @@ Some modules and plugins require external libraries. Please check the requiremen
## Included content
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general).
## Using this collection
@@ -38,14 +35,6 @@ See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_gui
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
For example, if you are working in the `~/dev` directory:
```
cd ~/dev
git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
```
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
### Running tests
@@ -76,7 +65,7 @@ Basic instructions without release branches:
## Release notes
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
## Roadmap

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,20 @@
namespace: community
name: general
version: 2.0.1
version: 1.0.0
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description: null
license_file: COPYING
tags: [community]
# NOTE: No dependencies are expected to be added here
# dependencies:
tags: null
# NOTE: No more dependencies can be added to this list
dependencies:
ansible.netcommon: '>=1.0.0'
ansible.posix: '>=1.0.0'
community.kubernetes: '>=0.11.1' # check https://galaxy.ansible.com/community/kubernetes
google.cloud: '>=0.10.1' # check https://galaxy.ansible.com/google/cloud
repository: https://github.com/ansible-collections/community.general
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
#documentation: https://github.com/ansible-collection-migration/community.general/tree/main/docs
homepage: https://github.com/ansible-collections/community.general
issues: https://github.com/ansible-collections/community.general/issues
#type: flatmap

View File

@@ -1,6 +1,36 @@
---
requires_ansible: '>=2.9.10'
action_groups:
docker:
- docker_swarm
- docker_image_facts
- docker_service
- docker_compose
- docker_config
- docker_container
- docker_container_info
- docker_host_info
- docker_image
- docker_image_info
- docker_login
- docker_network
- docker_network_info
- docker_node
- docker_node_info
- docker_prune
- docker_secret
- docker_swarm
- docker_swarm_info
- docker_swarm_service
- docker_swarm_service_info
- docker_volume
- docker_volume_info
k8s:
- kubevirt_cdi_upload
- kubevirt_preset
- kubevirt_pvc
- kubevirt_rs
- kubevirt_template
- kubevirt_vm
ovirt:
- ovirt_affinity_label_facts
- ovirt_api_facts
@@ -27,174 +57,252 @@ action_groups:
- ovirt_vm_facts
- ovirt_vmpool_facts
plugin_routing:
connection:
docker:
redirect: community.docker.docker
oc:
redirect: community.okd.oc
lookup:
gcp_storage_file:
redirect: community.google.gcp_storage_file
hashi_vault:
redirect: community.hashi_vault.hashi_vault
conjur_variable:
redirect: cyberark.conjur.conjur_variable
deprecation:
removal_version: 2.0.0
warning_text: The conjur_variable lookup has been moved to the cyberark.conjur collection.
modules:
ali_instance_facts:
deprecation:
removal_version: 3.0.0
warning_text: see plugin documentation for details
docker_compose:
redirect: community.docker.docker_compose
docker_config:
redirect: community.docker.docker_config
docker_container:
redirect: community.docker.docker_container
docker_container_info:
redirect: community.docker.docker_container_info
docker_host_info:
redirect: community.docker.docker_host_info
docker_image:
redirect: community.docker.docker_image
digital_ocean:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean
digital_ocean_account_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_account_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_account_facts
digital_ocean_account_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_account_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_account_info
digital_ocean_block_storage:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_block_storage module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_block_storage
digital_ocean_certificate:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_certificate module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_certificate
digital_ocean_certificate_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_certificate_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_certificate_facts
digital_ocean_certificate_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_certificate_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_certificate_info
digital_ocean_domain:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_domain module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_domain
digital_ocean_domain_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_domain_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_domain_facts
digital_ocean_domain_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_domain_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_domain_info
digital_ocean_droplet:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_droplet module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_droplet
digital_ocean_firewall_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_firewall_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_firewall_facts
digital_ocean_firewall_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_firewall_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_firewall_info
digital_ocean_floating_ip:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_floating_ip module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_floating_ip
digital_ocean_floating_ip_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_floating_ip_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_floating_ip_facts
digital_ocean_floating_ip_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_floating_ip_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_floating_ip_info
digital_ocean_image_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_image_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_image_facts
digital_ocean_image_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_image_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_image_info
digital_ocean_load_balancer_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_load_balancer_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_load_balancer_facts
digital_ocean_load_balancer_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_load_balancer_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_load_balancer_info
digital_ocean_region_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_region_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_region_facts
digital_ocean_region_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_region_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_region_info
digital_ocean_size_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_size_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_size_facts
digital_ocean_size_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_size_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_size_info
digital_ocean_snapshot_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_snapshot_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_snapshot_facts
digital_ocean_snapshot_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_snapshot_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_snapshot_info
digital_ocean_sshkey:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_sshkey module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_sshkey
digital_ocean_sshkey_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_sshkey_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_sshkey_facts
digital_ocean_sshkey_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_sshkey_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_sshkey_info
digital_ocean_tag:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_tag module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_tag
digital_ocean_tag_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_tag_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_tag_facts
digital_ocean_tag_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_tag_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_tag_info
digital_ocean_volume_facts:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_volume_facts module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_volume_facts
digital_ocean_volume_info:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean_volume_info module has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean_volume_info
docker_image_facts:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use community.docker.docker_image_info instead.
docker_image_info:
redirect: community.docker.docker_image_info
docker_login:
redirect: community.docker.docker_login
docker_network:
redirect: community.docker.docker_network
docker_network_info:
redirect: community.docker.docker_network_info
docker_node:
redirect: community.docker.docker_node
docker_node_info:
redirect: community.docker.docker_node_info
docker_prune:
redirect: community.docker.docker_prune
docker_secret:
redirect: community.docker.docker_secret
warning_text: see plugin documentation for details
docker_service:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use community.docker.docker_compose instead.
docker_stack:
redirect: community.docker.docker_stack
docker_stack_info:
redirect: community.docker.docker_stack_info
docker_stack_task_info:
redirect: community.docker.docker_stack_task_info
docker_swarm:
redirect: community.docker.docker_swarm
docker_swarm_info:
redirect: community.docker.docker_swarm_info
docker_swarm_service:
redirect: community.docker.docker_swarm_service
docker_swarm_service_info:
redirect: community.docker.docker_swarm_service_info
docker_volume:
redirect: community.docker.docker_volume
docker_volume_info:
redirect: community.docker.docker_volume_info
warning_text: see plugin documentation for details
firewalld:
deprecation:
removal_version: 2.0.0
warning_text: The firewalld module has been moved to the ansible.posix collection.
redirect: ansible.posix.firewalld
foreman:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use the modules from the theforeman.foreman collection instead.
gc_storage:
redirect: community.google.gc_storage
warning_text: see plugin documentation for details
gcdns_record:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_dns_resource_record_set instead.
warning_text: see plugin documentation for details
gcdns_zone:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_dns_managed_zone instead.
warning_text: see plugin documentation for details
gce:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_instance instead.
gce_eip:
redirect: community.google.gce_eip
gce_img:
redirect: community.google.gce_img
gce_instance_template:
redirect: community.google.gce_instance_template
gce_labels:
redirect: community.google.gce_labels
gce_lb:
redirect: community.google.gce_lb
gce_mig:
redirect: community.google.gce_mig
gce_net:
redirect: community.google.gce_net
gce_pd:
redirect: community.google.gce_pd
gce_snapshot:
redirect: community.google.gce_snapshot
gce_tag:
redirect: community.google.gce_tag
warning_text: see plugin documentation for details
gcp_backend_service:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_backend_service instead.
warning_text: see plugin documentation for details
gcp_forwarding_rule:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead.
warning_text: see plugin documentation for details
gcp_healthcheck:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead.
warning_text: see plugin documentation for details
gcp_target_proxy:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_target_http_proxy instead.
warning_text: see plugin documentation for details
gcp_url_map:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_url_map instead.
gcpubsub:
redirect: community.google.gcpubsub
gcpubsub_info:
redirect: community.google.gcpubsub_info
warning_text: see plugin documentation for details
gcpubsub_facts:
redirect: community.google.gcpubsub_info
deprecation:
removal_version: 3.0.0
warning_text: Use community.google.gcpubsub_info instead.
warning_text: see plugin documentation for details
gcspanner:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead.
warning_text: see plugin documentation for details
github_hooks:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
gluster_heal_info:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
gluster_peer:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
gluster_volume:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
warning_text: see plugin documentation for details
helm:
deprecation:
removal_version: 3.0.0
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
hetzner_failover_ip:
redirect: community.hrobot.failover_ip
hetzner_failover_ip_info:
redirect: community.hrobot.failover_ip_info
hetzner_firewall:
redirect: community.hrobot.firewall
hetzner_firewall_info:
redirect: community.hrobot.firewall_info
hpilo_facts:
deprecation:
removal_version: 3.0.0
@@ -203,26 +311,44 @@ plugin_routing:
deprecation:
removal_version: 3.0.0
warning_text: see plugin documentation for details
infini_export:
redirect: infinidat.infinibox.infini_export
deprecation:
removal_version: 2.0.0
warning_text: The infini_export module has been moved to the infinidat collection.
infini_export_client:
redirect: infinidat.infinibox.infini_export_client
deprecation:
removal_version: 2.0.0
warning_text: The infini_export_client module has been moved to the infinidat collection.
infini_fs:
redirect: infinidat.infinibox.infini_fs
deprecation:
removal_version: 2.0.0
warning_text: The infini_fs module has been moved to the infinidat collection.
infini_host:
redirect: infinidat.infinibox.infini_host
deprecation:
removal_version: 2.0.0
warning_text: The infini_host module has been moved to the infinidat collection.
infini_pool:
redirect: infinidat.infinibox.infini_pool
deprecation:
removal_version: 2.0.0
warning_text: The infini_pool module has been moved to the infinidat collection.
infini_vol:
redirect: infinidat.infinibox.infini_vol
deprecation:
removal_version: 2.0.0
warning_text: The infini_vol module has been moved to the infinidat collection.
jenkins_job_facts:
deprecation:
removal_version: 3.0.0
warning_text: see plugin documentation for details
katello:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use the modules from the theforeman.foreman collection instead.
kubevirt_cdi_upload:
redirect: community.kubevirt.kubevirt_cdi_upload
kubevirt_preset:
redirect: community.kubevirt.kubevirt_preset
kubevirt_pvc:
redirect: community.kubevirt.kubevirt_pvc
kubevirt_rs:
redirect: community.kubevirt.kubevirt_rs
kubevirt_template:
redirect: community.kubevirt.kubevirt_template
kubevirt_vm:
redirect: community.kubevirt.kubevirt_vm
warning_text: see plugin documentation for details
ldap_attr:
deprecation:
removal_version: 3.0.0
@@ -243,38 +369,68 @@ plugin_routing:
deprecation:
removal_version: 3.0.0
warning_text: see plugin documentation for details
mysql_db:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_db module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_db
mysql_info:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_info module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_info
mysql_query:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_query module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_query
mysql_replication:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_replication module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_replication
mysql_user:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_user module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_user
mysql_variables:
deprecation:
removal_version: 2.0.0
warning_text: The mysql_variables module has been moved to the community.mysql collection.
redirect: community.mysql.mysql_variables
na_cdot_aggregate:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_aggregate instead.
warning_text: see plugin documentation for details
na_cdot_license:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_license instead.
warning_text: see plugin documentation for details
na_cdot_lun:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_lun instead.
warning_text: see plugin documentation for details
na_cdot_qtree:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_qtree instead.
warning_text: see plugin documentation for details
na_cdot_svm:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_svm instead.
warning_text: see plugin documentation for details
na_cdot_user:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_user instead.
warning_text: see plugin documentation for details
na_cdot_user_role:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_user_role instead.
warning_text: see plugin documentation for details
na_cdot_volume:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_volume instead.
warning_text: see plugin documentation for details
na_ontap_gather_facts:
deprecation:
removal_version: 3.0.0
@@ -431,50 +587,41 @@ plugin_routing:
deprecation:
removal_version: 3.0.0
warning_text: see plugin documentation for details
postgresql_copy:
redirect: community.postgresql.postgresql_copy
postgresql_db:
redirect: community.postgresql.postgresql_db
postgresql_ext:
redirect: community.postgresql.postgresql_ext
postgresql_idx:
redirect: community.postgresql.postgresql_idx
postgresql_info:
redirect: community.postgresql.postgresql_info
postgresql_lang:
redirect: community.postgresql.postgresql_lang
postgresql_membership:
redirect: community.postgresql.postgresql_membership
postgresql_owner:
redirect: community.postgresql.postgresql_owner
postgresql_pg_hba:
redirect: community.postgresql.postgresql_pg_hba
postgresql_ping:
redirect: community.postgresql.postgresql_ping
postgresql_privs:
redirect: community.postgresql.postgresql_privs
postgresql_publication:
redirect: community.postgresql.postgresql_publication
postgresql_query:
redirect: community.postgresql.postgresql_query
postgresql_schema:
redirect: community.postgresql.postgresql_schema
postgresql_sequence:
redirect: community.postgresql.postgresql_sequence
postgresql_set:
redirect: community.postgresql.postgresql_set
postgresql_slot:
redirect: community.postgresql.postgresql_slot
postgresql_subscription:
redirect: community.postgresql.postgresql_subscription
postgresql_table:
redirect: community.postgresql.postgresql_table
postgresql_tablespace:
redirect: community.postgresql.postgresql_tablespace
postgresql_user_obj_stat_info:
redirect: community.postgresql.postgresql_user_obj_stat_info
postgresql_user:
redirect: community.postgresql.postgresql_user
proxysql_backend_servers:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_backend_servers module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_backend_servers
proxysql_global_variables:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_global_variables module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_global_variables
proxysql_manage_config:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_manage_config module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_manage_config
proxysql_mysql_users:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_mysql_users module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_mysql_users
proxysql_query_rules:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_query_rules module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_query_rules
proxysql_replication_hostgroups:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_replication_hostgroups module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_replication_hostgroups
proxysql_scheduler:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql_scheduler module has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql_scheduler
purefa_facts:
deprecation:
removal_version: 3.0.0
@@ -520,25 +667,25 @@ plugin_routing:
removal_version: 3.0.0
warning_text: see plugin documentation for details
sf_account_manager:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_account instead.
warning_text: see plugin documentation for details
sf_check_connections:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_check_connections instead.
warning_text: see plugin documentation for details
sf_snapshot_schedule_manager:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead.
warning_text: see plugin documentation for details
sf_volume_access_group_manager:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_access_group instead.
warning_text: see plugin documentation for details
sf_volume_manager:
tombstone:
deprecation:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
warning_text: see plugin documentation for details
smartos_image_facts:
deprecation:
removal_version: 3.0.0
@@ -552,52 +699,44 @@ plugin_routing:
removal_version: 3.0.0
warning_text: see plugin documentation for details
doc_fragments:
_gcp:
redirect: community.google._gcp
docker:
redirect: community.docker.docker
hetzner:
redirect: community.hrobot.robot
kubevirt_common_options:
redirect: community.kubevirt.kubevirt_common_options
kubevirt_vm_options:
redirect: community.kubevirt.kubevirt_vm_options
postgresql:
redirect: community.postgresql.postgresql
digital_ocean:
deprecation:
removal_version: 2.0.0
warning_text: The digital_ocean docs_fragment has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean
infinibox:
redirect: infinidat.infinibox.infinibox
deprecation:
removal_version: 2.0.0
warning_text: The infinibox doc_fragments plugin has been moved to the infinidat.infinibox collection.
mysql:
deprecation:
removal_version: 2.0.0
warning_text: The mysql docs_fragment has been moved to the community.mysql collection.
redirect: community.mysql.mysql
proxysql:
deprecation:
removal_version: 2.0.0
warning_text: The proxysql docs_fragment has been moved to the community.proxysql collection.
redirect: community.proxysql.proxysql
module_utils:
docker.common:
redirect: community.docker.common
docker.swarm:
redirect: community.docker.swarm
gcdns:
redirect: community.google.gcdns
gce:
redirect: community.google.gce
gcp:
redirect: community.google.gcp
hetzner:
redirect: community.hrobot.robot
kubevirt:
redirect: community.kubevirt.kubevirt
postgresql:
redirect: community.postgresql.postgresql
callback:
actionable:
tombstone:
digital_ocean:
deprecation:
removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options.
full_skip:
tombstone:
warning_text: The digital_ocean module_utils has been moved to the community.digitalocean collection.
redirect: community.digitalocean.digital_ocean
firewalld:
deprecation:
removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option.
stderr:
tombstone:
warning_text: The firewalld module_utils has been moved to the ansible.posix collection.
redirect: ansible.posix.firewalld
infinibox:
redirect: infinidat.infinibox.infinibox
deprecation:
removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option.
inventory:
docker_machine:
redirect: community.docker.docker_machine
docker_swarm:
redirect: community.docker.docker_swarm
kubevirt:
redirect: community.kubevirt.kubevirt
warning_text: The infinibox module_utils plugin has been moved to the infinidat.infinibox collection.
mysql:
deprecation:
removal_version: 2.0.0
warning_text: The mysql module_utils has been moved to the community.mysql collection.
redirect: community.mysql.mysql

View File

@@ -1 +0,0 @@
./system/iptables_state.py

View File

@@ -1 +0,0 @@
./system/shutdown.py

View File

@@ -1,182 +0,0 @@
# Copyright: (c) 2020, quidame <quidame@poivron.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import time
from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
from ansible.utils.vars import merge_hash
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
# Keep internal params away from user interactions
_VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
DEFAULT_SUDOABLE = True
MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
"This module doesn't support async>0 and poll>0 when its 'state' param "
"is set to 'restored'. To enable its rollback feature (that needs the "
"module to run asynchronously on the remote), please set task attribute "
"'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
"'ansible_timeout' (=%s) (recommended).")
MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
"Attempts to restore iptables state without rollback in case of mistake "
"may lead the ansible controller to loose access to the hosts and never "
"regain it before fixing firewall rules through a serial console, or any "
"other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
"'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
"(recommended).")
MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
"You attempt to restore iptables state with rollback in case of mistake, "
"but with settings that will lead this rollback to happen AFTER that the "
"controller will reach its own timeout. Please set task attribute 'poll' "
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
"'ansible_timeout' (=%s) (recommended).")
def _async_result(self, module_args, task_vars, timeout):
'''
Retrieve results of the asynchonous task, and display them in place of
the async wrapper results (those with the ansible_job_id key).
'''
# At least one iteration is required, even if timeout is 0.
for i in range(max(1, timeout)):
async_result = self._execute_module(
module_name='ansible.builtin.async_status',
module_args=module_args,
task_vars=task_vars,
wrap_async=False)
if async_result['finished'] == 1:
break
time.sleep(min(1, timeout))
return async_result
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if not result.get('skipped'):
# FUTURE: better to let _execute_module calculate this internally?
wrap_async = self._task.async_val and not self._connection.has_native_async
# Set short names for values we'll have to compare or reuse
task_poll = self._task.poll
task_async = self._task.async_val
check_mode = self._play_context.check_mode
max_timeout = self._connection._play_context.timeout
module_name = self._task.action
module_args = self._task.args
if module_args.get('state', None) == 'restored':
if not wrap_async:
if not check_mode:
display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
task_poll,
task_async,
max_timeout))
elif task_poll:
raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
task_poll,
task_async,
max_timeout))
else:
if task_async > max_timeout and not check_mode:
display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
task_poll,
task_async,
max_timeout))
# inject the async directory based on the shell option into the
# module args
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
# Bind the loop max duration to consistent values on both
# remote and local sides (if not the same, make the loop
# longer on the controller); and set a backup file path.
module_args['_timeout'] = task_async
module_args['_back'] = '%s/iptables.state' % async_dir
async_status_args = dict(_async_dir=async_dir)
confirm_cmd = 'rm -f %s' % module_args['_back']
starter_cmd = 'touch %s.starter' % module_args['_back']
remaining_time = max(task_async, max_timeout)
# do work!
result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
# Then the 3-steps "go ahead or rollback":
# 1. Catch early errors of the module (in asynchronous task) if any.
# Touch a file on the target to signal the module to process now.
# 2. Reset connection to ensure a persistent one will not be reused.
# 3. Confirm the restored state by removing the backup on the remote.
# Retrieve the results of the asynchronous task to return them.
if '_back' in module_args:
async_status_args['jid'] = result.get('ansible_job_id', None)
if async_status_args['jid'] is None:
raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
# Catch early errors due to missing mandatory option, bad
# option type/value, missing required system command, etc.
result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
# The module is aware to not process the main iptables-restore
# command before finding (and deleting) the 'starter' cookie on
# the host, so the previous query will not reach ssh timeout.
garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
# As the main command is not yet executed on the target, here
# 'finished' means 'failed before main command be executed'.
if not result['finished']:
try:
self._connection.reset()
except AttributeError:
pass
for x in range(max_timeout):
time.sleep(1)
remaining_time -= 1
# - AnsibleConnectionFailure covers rejected requests (i.e.
# by rules with '--jump REJECT')
# - ansible_timeout is able to cover dropped requests (due
# to a rule or policy DROP) if not lower than async_val.
try:
garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
break
except AnsibleConnectionFailure:
continue
result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
# Cleanup async related stuff and internal params
for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
if result.get(key):
del result[key]
if result.get('invocation', {}).get('module_args'):
if '_timeout' in result['invocation']['module_args']:
del result['invocation']['module_args']['_back']
del result['invocation']['module_args']['_timeout']
async_status_args['mode'] = 'cleanup'
garbage = self._execute_module(
module_name='ansible.builtin.async_status',
module_args=async_status_args,
task_vars=task_vars,
wrap_async=False)
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result

View File

@@ -1,211 +0,0 @@
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.collections import is_string
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'msg',
'delay',
'search_paths'
))
DEFAULT_CONNECT_TIMEOUT = None
DEFAULT_PRE_SHUTDOWN_DELAY = 0
DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
DEFAULT_SUDOABLE = True
SHUTDOWN_COMMANDS = {
'alpine': 'poweroff',
'vmkernel': 'halt',
}
SHUTDOWN_COMMAND_ARGS = {
'alpine': '',
'void': '-h +{delay_min} "{message}"',
'freebsd': '-h +{delay_sec}s "{message}"',
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
'macosx': '-h +{delay_min} "{message}"',
'openbsd': '-h +{delay_min} "{message}"',
'solaris': '-y -g {delay_sec} -i 5 "{message}"',
'sunos': '-y -g {delay_sec} -i 5 "{message}"',
'vmkernel': '-d {delay_sec}',
'aix': '-Fh',
}
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(*args, **kwargs)
@property
def delay(self):
return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
def _check_delay(self, key, default):
"""Ensure that the value is positive or zero"""
value = int(self._task.args.get(key, default))
if value < 0:
value = 0
return value
def _get_value_from_facts(self, variable_name, distribution, default_value):
"""Get dist+version specific args first, then distribution, then family, lastly use default"""
attr = getattr(self, variable_name)
value = attr.get(
distribution['name'] + distribution['version'],
attr.get(
distribution['name'],
attr.get(
distribution['family'],
getattr(self, default_value))))
return value
def get_shutdown_command_args(self, distribution):
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
# Convert seconds to minutes. If less that 60, set it to 0.
delay_sec = self.delay
shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
distribution = {}
display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
module_output = self._execute_module(
task_vars=task_vars,
module_name='ansible.legacy.setup',
module_args={'gather_subset': 'min'})
try:
if module_output.get('failed', False):
raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
to_native(module_output['module_stdout']).strip(),
to_native(module_output['module_stderr']).strip()))
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
return distribution
except KeyError as ke:
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
def get_shutdown_command(self, task_vars, distribution):
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
search_paths = self._task.args.get('search_paths', default_search_paths)
# FIXME: switch all this to user arg spec validation methods when they are available
# Convert bare strings to a list
if is_string(search_paths):
search_paths = [search_paths]
# Error if we didn't get a list
err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
try:
incorrect_type = any(not is_string(x) for x in search_paths)
if not isinstance(search_paths, list) or incorrect_type:
raise TypeError
except TypeError:
raise AnsibleError(err_msg.format(search_paths))
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
action=self._task.action,
command=shutdown_bin,
paths=search_paths))
find_result = self._execute_module(
task_vars=task_vars,
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
module_name='ansible.legacy.find',
module_args={
'paths': search_paths,
'patterns': [shutdown_bin],
'file_type': 'any'
}
)
full_path = [x['path'] for x in find_result['files']]
if not full_path:
raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
self._shutdown_command = full_path[0]
return self._shutdown_command
def perform_shutdown(self, task_vars, distribution):
result = {}
shutdown_result = {}
shutdown_command = self.get_shutdown_command(task_vars, distribution)
shutdown_command_args = self.get_shutdown_command_args(distribution)
shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
self.cleanup(force=True)
try:
display.vvv("{action}: shutting down server...".format(action=self._task.action))
display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
if self._play_context.check_mode:
shutdown_result['rc'] = 0
else:
shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
shutdown_result['rc'] = 0
if shutdown_result['rc'] != 0:
result['failed'] = True
result['shutdown'] = False
result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
stdout=to_native(shutdown_result['stdout'].strip()),
stderr=to_native(shutdown_result['stderr'].strip()))
return result
result['failed'] = False
result['shutdown_command'] = shutdown_command_exec
return result
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
# If running with local connection, fail so we don't shutdown ourself
if self._connection.transport == 'local' and (not self._play_context.check_mode):
msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
if task_vars is None:
task_vars = {}
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
distribution = self.get_distribution(task_vars)
# Initiate shutdown
shutdown_result = self.perform_shutdown(task_vars, distribution)
if shutdown_result['failed']:
result = shutdown_result
return result
result['shutdown'] = True
result['changed'] = True
result['shutdown_command'] = shutdown_result['shutdown_command']
return result

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: doas
become: doas
short_description: Do As user
description:
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.

View File

@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: dzdo
become: dzdo
short_description: Centrify's Direct Authorize
description:
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ksu
become: ksu
short_description: Kerberos substitute user
description:
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: machinectl
become: machinectl
short_description: Systemd's machinectl privilege escalation
description:
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: pbrun
become: pbrun
short_description: PowerBroker run
description:
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: pfexec
become: pfexec
short_description: profile based execution
description:
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
become_user:
description:
- User you 'become' to execute the task
- This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
- This plugin ignores this setting as pfexec uses it's own ``exec_attr`` to figure this out,
but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
default: root
ini:
@@ -80,8 +80,8 @@ DOCUMENTATION = '''
- name: ansible_pfexec_wrap_execution
env:
- name: ANSIBLE_PFEXEC_WRAP_EXECUTION
notes:
- This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
note:
- This plugin ignores ``become_user`` as pfexec uses it's own ``exec_attr`` to figure this out.
'''
from ansible.plugins.become import BecomeBase

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: pmrun
become: pmrun
short_description: Privilege Manager run
description:
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.

View File

@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: sesu
become: sesu
short_description: CA Privileged Access Manager
description:
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.

View File

@@ -6,8 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: memcached
cache: memcached
short_description: Use memcached DB for cache
description:
- This cache uses JSON formatted, per host records saved in memcached.
@@ -53,14 +52,12 @@ from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common._collections_compat import MutableSet
from ansible.plugins.cache import BaseCacheModule
from ansible.release import __version__ as ansible_base_version
from ansible.utils.display import Display
try:
import memcache
HAS_MEMCACHE = True
except ImportError:
HAS_MEMCACHE = False
raise AnsibleError("python-memcached is required for the memcached fact cache")
display = Display()
@@ -181,17 +178,14 @@ class CacheModule(BaseCacheModule):
self._timeout = self.get_option('_timeout')
self._prefix = self.get_option('_prefix')
except KeyError:
# TODO: remove once we no longer support Ansible 2.9
if not ansible_base_version.startswith('2.9.'):
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
display.deprecated('Rather than importing CacheModules directly, '
'use ansible.plugins.loader.cache_loader',
version='2.0.0', collection_name='community.general') # was Ansible 2.12
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
if not HAS_MEMCACHE:
raise AnsibleError("python-memcached is required for the memcached fact cache")
self._cache = {}
self._db = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])

View File

@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: pickle
cache: pickle
short_description: Pickle formatted files.
description:
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.

View File

@@ -5,8 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: redis
cache: redis
short_description: Use Redis DB for cache
description:
- This cache uses JSON formatted, per host records saved in Redis.
@@ -18,7 +17,6 @@ DOCUMENTATION = '''
- A colon separated string of connection information for Redis.
- The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
- To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
- To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
required: True
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
@@ -33,23 +31,6 @@ DOCUMENTATION = '''
ini:
- key: fact_caching_prefix
section: defaults
_keyset_name:
description: User defined name for cache keyset name.
default: ansible_cache_keys
env:
- name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
ini:
- key: fact_caching_redis_keyset_name
section: defaults
version_added: 1.3.0
_sentinel_service_name:
description: The redis sentinel service name (or referenced as cluster name).
env:
- name: ANSIBLE_CACHE_REDIS_SENTINEL
ini:
- key: fact_caching_redis_sentinel
section: defaults
version_added: 1.3.0
_timeout:
default: 86400
description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
@@ -66,17 +47,14 @@ import json
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseCacheModule
from ansible.release import __version__ as ansible_base_version
from ansible.utils.display import Display
try:
from redis import StrictRedis, VERSION
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
display = Display()
@@ -90,8 +68,6 @@ class CacheModule(BaseCacheModule):
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
_sentinel_service_name = None
def __init__(self, *args, **kwargs):
uri = ''
@@ -101,70 +77,25 @@ class CacheModule(BaseCacheModule):
uri = self.get_option('_uri')
self._timeout = float(self.get_option('_timeout'))
self._prefix = self.get_option('_prefix')
self._keys_set = self.get_option('_keyset_name')
self._sentinel_service_name = self.get_option('_sentinel_service_name')
except KeyError:
# TODO: remove once we no longer support Ansible 2.9
if not ansible_base_version.startswith('2.9.'):
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
display.deprecated('Rather than importing CacheModules directly, '
'use ansible.plugins.loader.cache_loader',
version='2.0.0', collection_name='community.general') # was Ansible 2.12
if C.CACHE_PLUGIN_CONNECTION:
uri = C.CACHE_PLUGIN_CONNECTION
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._keys_set = 'ansible_cache_keys'
if not HAS_REDIS:
raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
self._cache = {}
kw = {}
# tls connection
tlsprefix = 'tls://'
if uri.startswith(tlsprefix):
kw['ssl'] = True
uri = uri[len(tlsprefix):]
# redis sentinel connection
if self._sentinel_service_name:
self._db = self._get_sentinel_connection(uri, kw)
# normal connection
else:
connection = uri.split(':')
self._db = StrictRedis(*connection, **kw)
display.vv('Redis connection: %s' % self._db)
def _get_sentinel_connection(self, uri, kw):
"""
get sentinel connection details from _uri
"""
try:
from redis.sentinel import Sentinel
except ImportError:
raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
if ';' not in uri:
raise AnsibleError('_uri does not have sentinel syntax.')
# format: "localhost:26379;localhost2:26379;0:changeme"
connections = uri.split(';')
connection_args = connections.pop(-1)
if len(connection_args) > 0: # hanle if no db nr is given
connection_args = connection_args.split(':')
kw['db'] = connection_args.pop(0)
try:
kw['password'] = connection_args.pop(0)
except IndexError:
pass # password is optional
sentinels = [tuple(shost.split(':')) for shost in connections]
display.vv('\nUsing redis sentinels: %s' % sentinels)
scon = Sentinel(sentinels, **kw)
try:
return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
except Exception as exc:
raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
connection = uri.split(':')
self._db = StrictRedis(*connection, **kw)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key

View File

@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: yaml
cache: yaml
short_description: YAML formatted files.
description:
- This cache uses YAML formatted, per host, files saved to the filesystem.

View File

@@ -0,0 +1,60 @@
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.0.0' # was Ansible 2.11
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
# Override defaults from 'default' callback plugin
options:
display_skipped_hosts:
name: Show skipped hosts
description: "Toggle to control displaying skipped task/host results in a task"
type: bool
default: no
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without "ANSIBLE_" prefix are deprecated
version: "2.0.0" # was Ansible 2.12
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- key: display_skipped_hosts
section: defaults
display_ok_hosts:
name: Show 'ok' hosts
description: "Toggle to control displaying 'ok' task/host results in a task"
type: bool
default: no
env:
- name: ANSIBLE_DISPLAY_OK_HOSTS
ini:
- key: display_ok_hosts
section: defaults
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.actionable'

View File

@@ -7,9 +7,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: cgroup_memory_recap
type: aggregate
callback: cgroup_memory_recap
callback_type: aggregate
requirements:
- whitelist in configuration
- cgroups

View File

@@ -6,8 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: context_demo
callback: context_demo
type: aggregate
short_description: demo callback that adds play/task context
description:

View File

@@ -6,10 +6,14 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar
from ansible.playbook.task_include import TaskInclude
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: counter_enabled
callback: counter_enabled
type: stdout
short_description: adds counters to the output items (tasks and hosts/task)
description:
@@ -22,12 +26,6 @@ DOCUMENTATION = '''
- set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
'''
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar
from ansible.playbook.task_include import TaskInclude
class CallbackModule(CallbackBase):

View File

@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: dense
callback: dense
type: stdout
short_description: minimal stdout output
extends_documentation_fragment:

View File

@@ -7,8 +7,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: diy
type: stdout
callback: diy
callback_type: stdout
short_description: Customize the output
version_added: 0.2.0
description:

View File

@@ -0,0 +1,75 @@
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppresses tasks if all hosts skipped
description:
- Use this plugin when you do not care about any output for tasks that were completely skipped
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.0.0' # was Ansible 2.11
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
self.outlines = []
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_playbook_on_task_start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []

View File

@@ -6,9 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: hipchat
type: notification
callback: hipchat
callback_type: notification
requirements:
- whitelist in configuration.
- prettytable (python lib)

View File

@@ -6,8 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: jabber
callback: jabber
type: notification
short_description: post task events to a jabber server
description:

View File

@@ -6,8 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: log_plays
callback: log_plays
type: notification
short_description: write playbook output to log file
description:

View File

@@ -5,9 +5,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: logdna
type: aggregate
callback: logdna
callback_type: aggregate
short_description: Sends playbook logs to LogDNA
description:
- This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)

View File

@@ -5,8 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: logentries
callback: logentries
type: notification
short_description: Sends events to Logentries
description:
@@ -76,7 +75,7 @@ examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = community.general.logentries
callback_whitelist = logentries
Either set the environment variables
export LOGENTRIES_API=data.logentries.com

View File

@@ -1,13 +1,12 @@
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
# (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
author: Yevhen Khmelenko (@ujenmr)
name: logstash
DOCUMENTATION = '''
callback: logstash
type: notification
short_description: Sends events to Logstash
description:
@@ -43,61 +42,16 @@ DOCUMENTATION = r'''
key: type
version_added: 1.0.0
default: ansible
pre_command:
description: Executes command before run and result put to ansible_pre_command_output field.
version_added: 2.0.0
ini:
- section: callback_logstash
key: pre_command
env:
- name: LOGSTASH_PRE_COMMAND
format_version:
description: Logging format
type: str
version_added: 2.0.0
ini:
- section: callback_logstash
key: format_version
env:
- name: LOGSTASH_FORMAT_VERSION
default: v1
choices:
- v1
- v2
'''
EXAMPLES = r'''
ansible.cfg: |
# Enable Callback plugin
[defaults]
callback_whitelist = community.general.logstash
[callback_logstash]
server = logstash.example.com
port = 5000
pre_command = git rev-parse HEAD
type = ansible
11-input-tcp.conf: |
# Enable Logstash TCP Input
input {
tcp {
port => 5000
codec => json
add_field => { "[@metadata][beat]" => "notify" }
add_field => { "[@metadata][type]" => "ansible" }
}
}
'''
import os
import json
import socket
import uuid
import logging
from datetime import datetime
import logging
try:
import logstash
HAS_LOGSTASH = True
@@ -108,78 +62,76 @@ from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
ansible logstash callback plugin
ansible.cfg:
callback_plugins = <path_to_callback_plugins_folder>
callback_whitelist = logstash
and put the plugin in <path_to_callback_plugins_folder>
logstash config:
input {
tcp {
port => 5000
codec => json
}
}
Requires:
python-logstash
This plugin makes use of the following environment variables or ini config:
LOGSTASH_SERVER (optional): defaults to localhost
LOGSTASH_PORT (optional): defaults to 5000
LOGSTASH_TYPE (optional): defaults to ansible
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'community.general.logstash'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
if not HAS_LOGSTASH:
self.disabled = True
self._display.warning("The required python-logstash/python3-logstash is not installed. "
"pip install python-logstash for Python 2"
"pip install python3-logstash for Python 3")
self._display.warning("The required python-logstash is not installed. "
"pip install python-logstash")
self.start_time = datetime.utcnow()
def _init_plugin(self):
if not self.disabled:
self.logger = logging.getLogger('python-logstash-logger')
self.logger.setLevel(logging.DEBUG)
self.handler = logstash.TCPLogstashHandler(
self.ls_server,
self.ls_port,
version=1,
message_type=self.ls_type
)
self.logger.addHandler(self.handler)
self.hostname = socket.gethostname()
self.session = str(uuid.uuid4())
self.errors = 0
self.base_data = {
'session': self.session,
'host': self.hostname
}
if self.ls_pre_command is not None:
self.base_data['ansible_pre_command_output'] = os.popen(
self.ls_pre_command).read()
if self._options is not None:
self.base_data['ansible_checkmode'] = self._options.check
self.base_data['ansible_tags'] = self._options.tags
self.base_data['ansible_skip_tags'] = self._options.skip_tags
self.base_data['inventory'] = self._options.inventory
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.ls_server = self.get_option('server')
self.ls_port = int(self.get_option('port'))
self.ls_type = self.get_option('type')
self.ls_pre_command = self.get_option('pre_command')
self.ls_format_version = self.get_option('format_version')
self.logger = logging.getLogger('python-logstash-logger')
self.logger.setLevel(logging.DEBUG)
self._init_plugin()
self.logstash_server = self.get_option('server')
self.logstash_port = self.get_option('port')
self.logstash_type = self.get_option('type')
self.handler = logstash.TCPLogstashHandler(
self.logstash_server,
int(self.logstash_port),
version=1,
message_type=self.logstash_type
)
self.logger.addHandler(self.handler)
self.hostname = socket.gethostname()
self.session = str(uuid.uuid1())
self.errors = 0
def v2_playbook_on_start(self, playbook):
data = self.base_data.copy()
data['ansible_type'] = "start"
data['status'] = "OK"
data['ansible_playbook'] = playbook._file_name
if (self.ls_format_version == "v2"):
self.logger.info(
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
)
else:
self.logger.info("ansible start", extra=data)
self.playbook = playbook._file_name
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "start",
'ansible_playbook': self.playbook,
}
self.logger.info("ansible start", extra=data)
def v2_playbook_on_stats(self, stats):
end_time = datetime.utcnow()
@@ -193,201 +145,103 @@ class CallbackModule(CallbackBase):
else:
status = "FAILED"
data = self.base_data.copy()
data['ansible_type'] = "finish"
data['status'] = status
data['ansible_playbook_duration'] = runtime.total_seconds()
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
if (self.ls_format_version == "v2"):
self.logger.info(
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
)
else:
self.logger.info("ansible stats", extra=data)
def v2_playbook_on_play_start(self, play):
self.play_id = str(play._uuid)
if play.name:
self.play_name = play.name
data = self.base_data.copy()
data['ansible_type'] = "start"
data['status'] = "OK"
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
if (self.ls_format_version == "v2"):
self.logger.info("START PLAY | %s", self.play_name, extra=data)
else:
self.logger.info("ansible play", extra=data)
def v2_playbook_on_task_start(self, task, is_conditional):
self.task_id = str(task._uuid)
'''
Tasks and handler tasks are dealt with here
'''
data = {
'status': status,
'host': self.hostname,
'session': self.session,
'ansible_type': "finish",
'ansible_playbook': self.playbook,
'ansible_playbook_duration': runtime.total_seconds(),
'ansible_result': json.dumps(summarize_stat),
}
self.logger.info("ansible stats", extra=data)
def v2_runner_on_ok(self, result, **kwargs):
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
data = self.base_data.copy()
if task_name == 'setup':
data['ansible_type'] = "setup"
data['status'] = "OK"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_facts'] = self._dump_results(result._result)
if (self.ls_format_version == "v2"):
self.logger.info(
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
)
else:
self.logger.info("ansible facts", extra=data)
else:
if 'changed' in result._result.keys():
data['ansible_changed'] = result._result['changed']
else:
data['ansible_changed'] = False
data['ansible_type'] = "task"
data['status'] = "OK"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
if (self.ls_format_version == "v2"):
self.logger.info(
"TASK OK | %s | RESULT | %s",
task_name, self._dump_results(result._result), extra=data
)
else:
self.logger.info("ansible ok", extra=data)
data = {
'status': "OK",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.info("ansible ok", extra=data)
def v2_runner_on_skipped(self, result, **kwargs):
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
data = self.base_data.copy()
data['ansible_type'] = "task"
data['status'] = "SKIPPED"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
if (self.ls_format_version == "v2"):
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
else:
self.logger.info("ansible skipped", extra=data)
data = {
'status': "SKIPPED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_task': result._task,
'ansible_host': result._host.name
}
self.logger.info("ansible skipped", extra=data)
def v2_playbook_on_import_for_host(self, result, imported_file):
data = self.base_data.copy()
data['ansible_type'] = "import"
data['status'] = "IMPORTED"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['imported_file'] = imported_file
if (self.ls_format_version == "v2"):
self.logger.info("IMPORT | %s", imported_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
data = {
'status': "IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'imported_file': imported_file
}
self.logger.info("ansible import", extra=data)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
data = self.base_data.copy()
data['ansible_type'] = "import"
data['status'] = "NOT IMPORTED"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['imported_file'] = missing_file
if (self.ls_format_version == "v2"):
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
data = {
'status': "NOT IMPORTED",
'host': self.hostname,
'session': self.session,
'ansible_type': "import",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'missing_file': missing_file
}
self.logger.info("ansible import", extra=data)
def v2_runner_on_failed(self, result, **kwargs):
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
data = self.base_data.copy()
if 'changed' in result._result.keys():
data['ansible_changed'] = result._result['changed']
else:
data['ansible_changed'] = False
data['ansible_type'] = "task"
data['status'] = "FAILED"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
if (self.ls_format_version == "v2"):
self.logger.error(
"TASK FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
self._dump_results(result._result), extra=data
)
else:
self.logger.error("ansible failed", extra=data)
self.logger.error("ansible failed", extra=data)
def v2_runner_on_unreachable(self, result, **kwargs):
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
data = self.base_data.copy()
data['ansible_type'] = "task"
data['status'] = "UNREACHABLE"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
if (self.ls_format_version == "v2"):
self.logger.error(
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
self._dump_results(result._result), extra=data
)
else:
self.logger.error("ansible unreachable", extra=data)
data = {
'status': "UNREACHABLE",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.logger.error("ansible unreachable", extra=data)
def v2_runner_on_async_failed(self, result, **kwargs):
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
data = self.base_data.copy()
data['ansible_type'] = "task"
data['status'] = "FAILED"
data['ansible_host'] = result._host.name
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
data['ansible_task'] = task_name
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
data = {
'status': "FAILED",
'host': self.hostname,
'session': self.session,
'ansible_type': "task",
'ansible_playbook': self.playbook,
'ansible_host': result._host.name,
'ansible_task': result._task,
'ansible_result': self._dump_results(result._result)
}
self.errors += 1
if (self.ls_format_version == "v2"):
self.logger.error(
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
self._dump_results(result._result), extra=data
)
else:
self.logger.error("ansible async", extra=data)
self.logger.error("ansible async", extra=data)

View File

@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: mail
callback: mail
type: notification
short_description: Sends failure events via email
description:
@@ -52,7 +52,7 @@ options:
ini:
- section: callback_mail
key: bcc
notes:
note:
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
'''

View File

@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: nrdp
callback: nrdp
type: notification
author: "Remi VERCHERE (@rverchere)"
short_description: post task result to a nagios server through nrdp

View File

@@ -6,9 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: 'null'
type: stdout
callback: 'null'
callback_type: stdout
requirements:
- set as main display callback
short_description: Don't display stuff to screen

View File

@@ -7,8 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: say
callback: say
type: notification
requirements:
- whitelisting in configuration

View File

@@ -6,9 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: selective
type: stdout
callback: selective
callback_type: stdout
requirements:
- set as main display callback
short_description: only print certain tasks

View File

@@ -7,9 +7,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: slack
type: notification
callback: slack
callback_type: notification
requirements:
- whitelist in configuration
- prettytable (python library)

View File

@@ -18,10 +18,10 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: splunk
callback: splunk
type: aggregate
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
author: "Stuart Hirst <support@convergingdata.com>"
description:
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
@@ -57,24 +57,13 @@ DOCUMENTATION = '''
type: bool
default: true
version_added: '1.0.0'
include_milliseconds:
description: Whether to include milliseconds as part of the generated timestamp field in the event
sent to the Splunk HTTP collector
env:
- name: SPLUNK_INCLUDE_MILLISECONDS
ini:
- section: callback_splunk
key: include_milliseconds
type: bool
default: false
version_added: 2.0.0
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = community.general.splunk
callback_whitelist = splunk
Set the environment variable
export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
@@ -107,7 +96,7 @@ class SplunkHTTPCollectorSource(object):
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser()
def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
def send_event(self, url, authtoken, validate_certs, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
@@ -127,13 +116,8 @@ class SplunkHTTPCollectorSource(object):
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
if include_milliseconds:
time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
else:
time_format = '%Y-%m-%d %H:%M:%S +0000'
data['timestamp'] = datetime.utcnow().strftime(time_format)
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
@@ -174,7 +158,6 @@ class CallbackModule(CallbackBase):
self.url = None
self.authtoken = None
self.validate_certs = None
self.include_milliseconds = None
self.splunk = SplunkHTTPCollectorSource()
def _runtime(self, result):
@@ -210,8 +193,6 @@ class CallbackModule(CallbackBase):
self.validate_certs = self.get_option('validate_certs')
self.include_milliseconds = self.get_option('include_milliseconds')
def v2_playbook_on_start(self, playbook):
self.splunk.ansible_playbook = basename(playbook._file_name)
@@ -226,7 +207,6 @@ class CallbackModule(CallbackBase):
self.url,
self.authtoken,
self.validate_certs,
self.include_milliseconds,
'OK',
result,
self._runtime(result)
@@ -237,7 +217,6 @@ class CallbackModule(CallbackBase):
self.url,
self.authtoken,
self.validate_certs,
self.include_milliseconds,
'SKIPPED',
result,
self._runtime(result)
@@ -248,7 +227,6 @@ class CallbackModule(CallbackBase):
self.url,
self.authtoken,
self.validate_certs,
self.include_milliseconds,
'FAILED',
result,
self._runtime(result)
@@ -259,7 +237,6 @@ class CallbackModule(CallbackBase):
self.url,
self.authtoken,
self.validate_certs,
self.include_milliseconds,
'FAILED',
result,
self._runtime(result)
@@ -270,7 +247,6 @@ class CallbackModule(CallbackBase):
self.url,
self.authtoken,
self.validate_certs,
self.include_milliseconds,
'UNREACHABLE',
result,
self._runtime(result)

View File

@@ -0,0 +1,70 @@
# (c) 2017, Frederic Van Espen <github@freh.be>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: stderr
callback_type: stdout
requirements:
- set as main display callback
short_description: Splits output, sending failed tasks to stderr
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.0.0' # was Ansible 2.11
alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
extends_documentation_fragment:
- default_callback
description:
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
- Also it does not output skipped host/task/item status
'''
from ansible import constants as C
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the stderr callback plugin, which reuses the default
callback plugin but sends error output to stderr.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.stderr'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result, use_stderr=True)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR,
stderr=True)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=True)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)

View File

@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: sumologic
callback: sumologic
type: aggregate
short_description: Sends task result events to Sumologic
author: "Ryan Currah (@ryancurrah)"
@@ -42,7 +42,7 @@ EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = community.general.sumologic
callback_whitelist = sumologic
Set the environment variable
export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==

View File

@@ -6,9 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: syslog_json
type: notification
callback: syslog_json
callback_type: notification
requirements:
- whitelist in configuration
short_description: sends JSON events to syslog

View File

@@ -7,9 +7,9 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: unixy
callback: unixy
type: stdout
author: Allyson Bowles (@akatch)
author: Allyson Bowles <@akatch>
short_description: condensed Ansible output
description:
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.

View File

@@ -6,8 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: yaml
callback: yaml
type: stdout
short_description: yaml-ized Ansible screen output
description:
@@ -50,7 +49,7 @@ def my_represent_scalar(self, tag, value, style=None):
# ...no trailing space
value = value.rstrip()
# ...and non-printable characters
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
value = ''.join(x for x in value if x in string.printable)
# ...tabs prevent blocks from expanding
value = value.expandtabs()
# ...and odd bits of whitespace

View File

@@ -9,8 +9,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
name: chroot
author: Maykel Moya <mmoya@speedyrails.com>
connection: chroot
short_description: Interact with local chroot
description:
- Run commands or put/fetch files to an existing chroot on the Ansible controller.

View File

@@ -0,0 +1,364 @@
# Based on the chroot connection plugin by Maykel Moya
#
# (c) 2014, Lorin Hochstein
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Lorin Hochestein
- Leendert Brouwer
connection: docker
short_description: Run tasks in docker containers
description:
- Run commands or put/fetch files to an existing docker container.
options:
remote_user:
description:
- The user to execute as inside the container
vars:
- name: ansible_user
- name: ansible_docker_user
docker_extra_args:
description:
- Extra arguments to pass to the docker command line
default: ''
remote_addr:
description:
- The name of the container you want to access.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_docker_host
'''
import distutils.spawn
import fcntl
import os
import os.path
import subprocess
import re
from distutils.version import LooseVersion
import ansible.constants as C
from ansible.compat import selectors
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local docker based connections '''
transport = 'community.general.docker'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
# root.
# Windows uses Powershell modules
if getattr(self._shell, "_IS_WINDOWS", False):
self.module_implementation_preferences = ('.ps1', '.exe', '')
if 'docker_command' in kwargs:
self.docker_cmd = kwargs['docker_command']
else:
self.docker_cmd = distutils.spawn.find_executable('docker')
if not self.docker_cmd:
raise AnsibleError("docker command not found in PATH")
docker_version = self._get_docker_version()
if docker_version == u'dev':
display.warning(u'Docker version number is "dev". Will assume latest version.')
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
raise AnsibleError('docker connection type requires docker 1.3 or higher')
# The remote user we will request from docker (if supported)
self.remote_user = None
# The actual user which will execute commands in docker (if known)
self.actual_user = None
if self._play_context.remote_user is not None:
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
# Support for specifying the exec user was added in docker 1.7
self.remote_user = self._play_context.remote_user
self.actual_user = self.remote_user
else:
self.actual_user = self._get_docker_remote_user()
if self.actual_user != self._play_context.remote_user:
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
.format(docker_version, self.actual_user or u'?'))
elif self._display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to
self.actual_user = self._get_docker_remote_user()
@staticmethod
def _sanitize_version(version):
return re.sub(u'[^0-9a-zA-Z.]', u'', version)
def _old_docker_version(self):
cmd_args = []
if self._play_context.docker_extra_args:
cmd_args += self._play_context.docker_extra_args.split(' ')
old_version_subcommand = ['version']
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd_output, err = p.communicate()
return old_docker_cmd, to_native(cmd_output), err, p.returncode
def _new_docker_version(self):
# no result yet, must be newer Docker version
cmd_args = []
if self._play_context.docker_extra_args:
cmd_args += self._play_context.docker_extra_args.split(' ')
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd_output, err = p.communicate()
return new_docker_cmd, to_native(cmd_output), err, p.returncode
def _get_docker_version(self):
cmd, cmd_output, err, returncode = self._old_docker_version()
if returncode == 0:
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
if line.startswith(u'Server version:'): # old docker versions
return self._sanitize_version(line.split()[2])
cmd, cmd_output, err, returncode = self._new_docker_version()
if returncode:
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
def _get_docker_remote_user(self):
""" Get the default user configured in the docker container """
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
out = to_text(out, errors='surrogate_or_strict')
if p.returncode != 0:
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
return None
# The default exec user is root, unless it was changed in the Dockerfile with USER
return out.strip() or u'root'
def _build_exec_cmd(self, cmd):
""" Build the local docker exec command to run cmd on remote_host
If remote_user is available and is supported by the docker
version we are using, it will be provided to docker exec.
"""
local_cmd = [self.docker_cmd]
if self._play_context.docker_extra_args:
local_cmd += self._play_context.docker_extra_args.split(' ')
local_cmd += [b'exec']
if self.remote_user is not None:
local_cmd += [b'-u', self.remote_user]
# -i is needed to keep stdin open which allows pipelining to work
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
return local_cmd
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
self.actual_user or u'?'), host=self._play_context.remote_addr
)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command on the docker host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
display.debug("opening command with Popen()")
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(
local_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
display.debug("done running command with Popen()")
if self.become and self.become.expect_prompt() and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
become_output = b''
try:
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
events = selector.select(self._play_context.timeout)
if not events:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
for key, event in events:
if key.fileobj == p.stdout:
chunk = p.stdout.read()
elif key.fileobj == p.stderr:
chunk = p.stderr.read()
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
become_output += chunk
finally:
selector.close()
if not self.become.check_success(become_output):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
display.debug("done with docker.exec_command()")
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
return ntpath.normpath(remote_path)
else:
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
out_path = shlex_quote(out_path)
# Older docker doesn't have native support for copying files into
# running containers, so we use docker exec to implement this
# Although docker version 1.8 and later provide support, the
# owner and group of the files are always set to root
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
if not os.fstat(in_file.fileno()).st_size:
count = ' count=0'
else:
count = ''
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
try:
p = subprocess.Popen(args, stdin=in_file,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection requires dd command in the container to put files")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = self._prefix_login_path(in_path)
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
else:
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if p.returncode != 0:
# Older docker doesn't have native support for fetching files command `cp`
# If `cp` fails, try to use `dd` instead
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
try:
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=out_file, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection requires dd command in the container to put files")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
# Rename if needed
if actual_out_path != out_path:
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
super(Connection, self).close()
self._connected = False

View File

@@ -9,7 +9,7 @@ __metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@msherer) <misc@zarb.org>
name: funcd
connection: funcd
short_description: Use funcd to connect to target
description:
- This transport permits you to use Ansible over Func.

View File

@@ -9,8 +9,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
name: iocage
author: Stephan Lohse <dev-github@ploek.org>
connection: iocage
short_description: Run tasks in iocage jails
description:
- Run commands or put/fetch files to an existing iocage jail

View File

@@ -10,7 +10,7 @@ __metaclass__ = type
DOCUMENTATION = '''
author: Ansible Core Team
name: jail
connection: jail
short_description: Run tasks in jails
description:
- Run commands or put/fetch files to an existing jail

View File

@@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
name: lxc
author: Joerg Thalheim <joerg@higgsboson.tk>
connection: lxc
short_description: Run tasks in lxc containers via lxc python library
description:
- Run commands or put/fetch files to an existing lxc container using lxc python library

View File

@@ -6,15 +6,15 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Matt Clay (@mattclay) <matt@mystile.com>
name: lxd
author: Matt Clay <matt@mystile.com>
connection: lxd
short_description: Run tasks in lxc containers via lxc CLI
description:
- Run commands or put/fetch files to an existing lxc container using lxc CLI
options:
remote_addr:
description:
- Container identifier.
- Container identifier
default: inventory_hostname
vars:
- name: ansible_host
@@ -26,19 +26,6 @@ DOCUMENTATION = '''
vars:
- name: ansible_executable
- name: ansible_lxd_executable
remote:
description:
- Name of the LXD remote to use.
default: local
vars:
- name: ansible_lxd_remote
version_added: 2.0.0
project:
description:
- Name of the LXD project to use.
vars:
- name: ansible_lxd_project
version_added: 2.0.0
'''
import os
@@ -83,15 +70,7 @@ class Connection(ConnectionBase):
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"exec",
"%s:%s" % (self.get_option("remote"), self._host),
"--",
self._play_context.executable, "-c", cmd
])
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
@@ -119,14 +98,7 @@ class Connection(ConnectionBase):
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"file", "push",
in_path,
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
])
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
@@ -139,14 +111,7 @@ class Connection(ConnectionBase):
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"file", "pull",
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
out_path
])
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]

173
plugins/connection/oc.py Normal file
View File

@@ -0,0 +1,173 @@
# Based on the docker connection plugin
#
# Connection plugin for configuring kubernetes containers with kubectl
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- xuxinkun
connection: oc
short_description: Execute tasks in pods running on OpenShift.
description:
- Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
container platform.
requirements:
- oc (go binary)
options:
oc_pod:
description:
- Pod name. Required when the host name does not match pod name.
default: ''
vars:
- name: ansible_oc_pod
env:
- name: K8S_AUTH_POD
oc_container:
description:
- Container name. Required when a pod contains more than one container.
default: ''
vars:
- name: ansible_oc_container
env:
- name: K8S_AUTH_CONTAINER
oc_namespace:
description:
- The namespace of the pod
default: ''
vars:
- name: ansible_oc_namespace
env:
- name: K8S_AUTH_NAMESPACE
oc_extra_args:
description:
- Extra arguments to pass to the oc command line.
default: ''
vars:
- name: ansible_oc_extra_args
env:
- name: K8S_AUTH_EXTRA_ARGS
oc_kubeconfig:
description:
- Path to a oc config file. Defaults to I(~/.kube/conig)
default: ''
vars:
- name: ansible_oc_kubeconfig
- name: ansible_oc_config
env:
- name: K8S_AUTH_KUBECONFIG
oc_context:
description:
- The name of a context found in the K8s config file.
default: ''
vars:
- name: ansible_oc_context
env:
- name: K8S_AUTH_CONTEXT
oc_host:
description:
- URL for accessing the API.
default: ''
vars:
- name: ansible_oc_host
- name: ansible_oc_server
env:
- name: K8S_AUTH_HOST
- name: K8S_AUTH_SERVER
oc_token:
description:
- API authentication bearer token.
vars:
- name: ansible_oc_token
- name: ansible_oc_api_key
env:
- name: K8S_AUTH_TOKEN
- name: K8S_AUTH_API_KEY
client_cert:
description:
- Path to a certificate used to authenticate with the API.
default: ''
vars:
- name: ansible_oc_cert_file
- name: ansible_oc_client_cert
env:
- name: K8S_AUTH_CERT_FILE
aliases: [ oc_cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API.
default: ''
vars:
- name: ansible_oc_key_file
- name: ansible_oc_client_key
env:
- name: K8S_AUTH_KEY_FILE
aliases: [ oc_key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
default: ''
vars:
- name: ansible_oc_ssl_ca_cert
- name: ansible_oc_ca_cert
env:
- name: K8S_AUTH_SSL_CA_CERT
aliases: [ oc_ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
default: ''
vars:
- name: ansible_oc_verify_ssl
- name: ansible_oc_validate_certs
env:
- name: K8S_AUTH_VERIFY_SSL
aliases: [ oc_verify_ssl ]
'''
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
CONNECTION_TRANSPORT = 'oc'
CONNECTION_OPTIONS = {
'oc_container': '-c',
'oc_namespace': '-n',
'oc_kubeconfig': '--config',
'oc_context': '--context',
'oc_host': '--server',
'client_cert': '--client-certificate',
'client_key': '--client-key',
'ca_cert': '--certificate-authority',
'validate_certs': '--insecure-skip-tls-verify',
'oc_token': '--token'
}
class Connection(KubectlConnection):
''' Local oc based connections '''
transport = CONNECTION_TRANSPORT
connection_options = CONNECTION_OPTIONS
documentation = DOCUMENTATION

View File

@@ -11,7 +11,7 @@ __metaclass__ = type
DOCUMENTATION = '''
name: qubes
connection: qubes
short_description: Interact with an existing QubesOS AppVM
description:

View File

@@ -10,7 +10,7 @@ __metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <misc@zarb.org>
name: saltstack
connection: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
@@ -19,7 +19,6 @@ DOCUMENTATION = '''
import re
import os
import pty
import codecs
import subprocess
from ansible.module_utils._text import to_bytes, to_text
@@ -86,9 +85,9 @@ class Connection(ConnectionBase):
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
with open(in_path) as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
self.client.cmd(self.host, 'file.write', [out_path, content])
# TODO test it
def fetch_file(self, in_path, out_path):

View File

@@ -11,7 +11,7 @@ __metaclass__ = type
DOCUMENTATION = '''
author: Ansible Core Team
name: zone
connection: zone
short_description: Run tasks in a zone instance
description:
- Run commands or put/fetch files to an existing zone

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# GCP doc fragment.
DOCUMENTATION = r'''
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices: [ application, machineaccount, serviceaccount ]
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected
and the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used.
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the
c(gcp_service_account_file) env variable.
- for authentication, you can set service_account_contents using the
c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
- For authentication, you can set service_account_email using the
C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are
not set.
- The I(service_account_email) and I(service_account_file) options are
mutually exclusive.
'''

View File

@@ -19,6 +19,7 @@ options:
region:
description:
- The target region.
choices:
- Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
- They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
- Note that the default value "na" stands for "North America".

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
type: str
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
ca_cert:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_cert, cert_path ]
client_key:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_key, key_path ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
validate_certs:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
aliases: [ tls_verify ]
debug:
description:
- Debug mode
type: bool
default: no
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docs.docker.com/machine/reference/env/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
DOCKER_PY_1_DOCUMENTATION = r'''
options: {}
requirements:
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should *not*
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
'''
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
DOCKER_PY_2_DOCUMENTATION = r'''
options: {}
requirements:
- "Python >= 2.7"
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
This module does *not* work with docker-py."
'''

View File

@@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
hetzner_user:
description: The username for the Robot webservice user.
type: str
required: yes
hetzner_password:
description: The password for the Robot webservice user.
type: str
required: yes
'''

View File

@@ -40,7 +40,6 @@ options:
path:
description:
- The path on which InfluxDB server is accessible
- Only available when using python-influxdb >= 5.1.0
type: str
version_added: '0.2.0'
validate_certs:
@@ -53,7 +52,6 @@ options:
description:
- Use https instead of http to connect to InfluxDB server.
type: bool
default: false
timeout:
description:
- Number of seconds Requests will wait for client to establish a connection.
@@ -62,14 +60,12 @@ options:
description:
- Number of retries client will try before aborting.
- C(0) indicates try until success.
- Only available when using python-influxdb >= 4.1.0
type: int
default: 3
use_udp:
description:
- Use UDP to connect to InfluxDB server.
type: bool
default: false
udp_port:
description:
- UDP port to connect to InfluxDB server.

View File

@@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
resource_definition:
description:
- "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
resource parameters not covered by this module's parameters."
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
aliases:
- definition
- inline
type: dict
wait:
description:
- "I(True) if the module should wait for the resource to get into desired state."
type: bool
default: yes
force:
description:
- If set to C(no), and I(state) is C(present), an existing object will be replaced.
type: bool
default: no
wait_timeout:
description:
- The amount of time in seconds the module should wait for the resource to get into desired state.
type: int
default: 120
wait_sleep:
description:
- Number of seconds to sleep between checks.
default: 5
version_added: '0.2.0'
memory:
description:
- The amount of memory to be requested by virtual machine.
- For example 1024Mi.
type: str
memory_limit:
description:
- The maximum memory to be used by virtual machine.
- For example 1024Mi.
type: str
machine_type:
description:
- QEMU machine type is the actual chipset of the virtual machine.
type: str
merge_type:
description:
- Whether to override the default patch merge approach with a specific type.
- If more than one merge type is given, the merge types will be tried in order.
- "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources, as
Custom Resource Definitions typically aren't updatable by the usual strategic merge."
- "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
type: list
choices: [ json, merge, strategic-merge ]
cpu_shares:
description:
- "Specify CPU shares."
type: int
cpu_limit:
description:
- "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
type: int
cpu_cores:
description:
- "Number of CPU cores."
type: int
cpu_model:
description:
- "CPU model."
- "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
- "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
- "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
- "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
type: str
bootloader:
description:
- "Specify the bootloader of the virtual machine."
- "All virtual machines use BIOS by default for booting."
type: str
smbios_uuid:
description:
- "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
type: str
cpu_features:
description:
- "List of dictionary to fine-tune features provided by the selected CPU model."
- "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
- "I(Note): In case a policy is omitted for a feature, it will default to require."
- "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
type: list
headless:
description:
- "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
- "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
compatible and comes with a memory size of 16 MB."
hugepage_size:
description:
- "Specify huge page size."
type: str
tablets:
description:
- "Specify tablets to be used as input devices"
type: list
hostname:
description:
- "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
name will be used."
subdomain:
description:
- "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
itself can pick up a hostname."
requirements:
- python >= 2.7
- openshift >= 0.8.2
notes:
- "In order to use this module you have to install Openshift Python SDK.
To ensure it's installed with correct version you can create the following task:
I(pip: name=openshift>=0.8.2)"
'''

View File

@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Standard oVirt documentation fragment
DOCUMENTATION = r'''
options:
disks:
description:
- List of dictionaries which specify disks of the virtual machine.
- "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
- "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
- Each disk must have specified a I(volume) that declares which volume type of the disk
All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
type: list
labels:
description:
- Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
- More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
type: dict
interfaces:
description:
- An interface defines a virtual network interface of a virtual machine (also called a frontend).
- All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
- Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
type: list
cloud_init_nocloud:
description:
- "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
type: dict
affinity:
description:
- "Describes node affinity scheduling rules for the vm."
type: dict
suboptions:
soft:
description:
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
C(term); the nodes with the highest sum are the most preferred."
type: dict
hard:
description:
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
each C(term) are intersected, i.e. all terms must be satisfied."
type: dict
node_affinity:
description:
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
type: dict
suboptions:
soft:
description:
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
match_expressions; the nodes with the highest sum are the most preferred."
type: dict
hard:
description:
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
may or may not try to eventually evict the vm from its node."
type: dict
anti_affinity:
description:
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
type: dict
suboptions:
soft:
description:
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
the corresponding C(term); the nodes with the highest sum are the most preferred."
type: dict
hard:
description:
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
type: dict
'''

View File

@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
options:
bind_dn:
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
- If this is blank, we'll use an anonymous bind.
type: str
bind_pw:
@@ -27,15 +27,6 @@ options:
description:
- The DN of the entry to add or remove.
type: str
referrals_chasing:
choices: [disabled, anonymous]
default: anonymous
type: str
description:
- Set the referrals chasing behavior.
- C(anonymous) follow referrals anonymously. This is the default behavior.
- C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
version_added: 2.0.0
server_uri:
description:
- A URI to the LDAP server.
@@ -53,12 +44,4 @@ options:
- This should only be used on sites using self-signed certificates.
type: bool
default: yes
sasl_class:
description:
- The class to use for SASL authentication.
- possible choices are C(external), C(gssapi).
type: str
choices: ['external', 'gssapi']
default: external
version_added: "2.0.0"
'''

View File

@@ -15,14 +15,14 @@ options:
manageiq_connection:
description:
- ManageIQ connection configuration information.
required: false
required: true
type: dict
suboptions:
url:
description:
- ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
type: str
required: false
required: true
username:
description:
- ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
@@ -44,7 +44,7 @@ options:
ca_cert:
description:
- The path to a CA bundle file or directory with certificates. defaults to None.
type: str
type: path
aliases: [ ca_bundle_path ]
requirements:

View File

@@ -24,6 +24,7 @@ options:
- Value can also be specified using C(INFOBLOX_HOST) environment
variable.
type: str
required: true
username:
description:
- Configures the username to use to authenticate the connection to
@@ -78,24 +79,6 @@ options:
variable.
type: int
default: 1000
http_pool_connections:
description:
- Number of pools to be used by the C(infoblox_client.Connector) object.
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
type: int
default: 10
http_pool_maxsize:
description:
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
type: int
default: 10
silent_ssl_warnings:
description:
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
type: bool
default: true
notes:
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.

View File

@@ -1,51 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
host:
description:
- FQDN of Nomad server.
required: true
type: str
use_ssl:
description:
- Use TLS/SSL connection.
type: bool
default: true
timeout:
description:
- Timeout (in seconds) for the request to Nomad.
type: int
default: 5
validate_certs:
description:
- Enable TLS/SSL certificate validation.
type: bool
default: true
client_cert:
description:
- Path of certificate for TLS/SSL.
type: path
client_key:
description:
- Path of certificate's private key for TLS/SSL.
type: path
namespace:
description:
- Namespace for Nomad.
type: str
token:
description:
- ACL token for authentification.
type: str
'''

View File

@@ -15,7 +15,6 @@ options:
description:
- Online OAuth token.
type: str
required: true
aliases: [ oauth_token ]
api_url:
description:

View File

@@ -19,7 +19,6 @@ options:
Only the attributes of the current entity. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes).
type: bool
default: false
nested_attributes:
description:
- Specifies list of the attributes which should be fetched from the API.

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Postgres documentation fragment
DOCUMENTATION = r'''
options:
login_user:
description:
- The username used to authenticate with.
type: str
default: postgres
login_password:
description:
- The password used to authenticate with.
type: str
login_host:
description:
- Host running the database.
type: str
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
port:
description:
- Database port to connect to.
type: int
default: 5432
aliases: [ login_port ]
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- To avoid "Peer authentication failed for user postgres" error,
use postgres user as a I(become_user).
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
requirements: [ psycopg2 ]
'''

View File

@@ -1,64 +0,0 @@
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Common parameters for Proxmox VE modules
DOCUMENTATION = r'''
options:
api_host:
description:
- Specify the target host of the Proxmox VE cluster.
type: str
required: true
api_user:
description:
- Specify the user to authenticate with.
type: str
required: true
api_password:
description:
- Specify the password to authenticate with.
- You can use C(PROXMOX_PASSWORD) environment variable.
type: str
api_token_id:
description:
- Specify the token ID.
type: str
version_added: 1.3.0
api_token_secret:
description:
- Specify the token secret.
type: str
version_added: 1.3.0
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: no
requirements: [ "proxmoxer", "requests" ]
'''
SELECTION = r'''
options:
vmid:
description:
- Specifies the instance ID.
- If not set the next available ID will be fetched from ProxmoxAPI.
type: int
node:
description:
- Proxmox VE node on which to operate.
- Only required for I(state=present).
- For every other states it will be autodiscovered.
type: str
pool:
description:
- Add the new VM to the specified pool.
type: str
'''

View File

@@ -32,6 +32,7 @@ options:
description:
- Region to create an instance in.
type: str
default: DFW
username:
description:
- Rackspace username, overrides I(credentials).
@@ -58,45 +59,37 @@ notes:
OPENSTACK = r'''
options:
api_key:
type: str
description:
- Rackspace API key, overrides I(credentials).
aliases: [ password ]
auth_endpoint:
type: str
description:
- The URI of the authentication service.
- If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
default: https://identity.api.rackspacecloud.com/v2.0/
credentials:
type: path
description:
- File to find the Rackspace credentials in. Ignored if I(api_key) and
I(username) are provided.
aliases: [ creds_file ]
env:
type: str
description:
- Environment as configured in I(~/.pyrax.cfg),
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
identity_type:
type: str
description:
- Authentication mechanism to use, such as rackspace or keystone.
default: rackspace
region:
type: str
description:
- Region to create an instance in.
default: DFW
tenant_id:
type: str
description:
- The tenant ID used for authentication.
tenant_name:
type: str
description:
- The tenant name used for authentication.
username:
type: str
description:
- Rackspace username, overrides I(credentials).
validate_certs:

View File

@@ -16,7 +16,6 @@ options:
description:
- Scaleway OAuth token.
type: str
required: true
aliases: [ oauth_token ]
api_url:
description:

View File

@@ -1,70 +0,0 @@
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def dict_kv(value, key):
'''Return a dictionary with a single key-value pair
Example:
- hosts: localhost
gather_facts: false
vars:
myvar: myvalue
tasks:
- debug:
msg: "{{ myvar | dict_kv('thatsmyvar') }}"
produces:
ok: [localhost] => {
"msg": {
"thatsmyvar": "myvalue"
}
}
Example 2:
- hosts: localhost
gather_facts: false
vars:
common_config:
type: host
database: all
myservers:
- server1
- server2
tasks:
- debug:
msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
produces:
ok: [localhost] => {
"msg": [
{
"database": "all",
"server": "server1",
"type": "host"
},
{
"database": "all",
"server": "server2",
"type": "host"
}
]
}
'''
return {key: value}
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'dict_kv': dict_kv
}

View File

@@ -1,94 +0,0 @@
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# contributed by Kelly Brazil <kellyjonbrazil@gmail.com>
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
import importlib
try:
import jc
HAS_LIB = True
except ImportError:
HAS_LIB = False
def jc(data, parser, quiet=True, raw=False):
"""Convert returned command output to JSON using the JC library
Arguments:
parser required (string) the correct parser for the input data (e.g. 'ifconfig')
see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
quiet optional (bool) True to suppress warning messages (default is True)
raw optional (bool) True to return pre-processed JSON (default is False)
Returns:
dictionary or list of dictionaries
Example:
- name: run date command
hosts: ubuntu
tasks:
- shell: date
register: result
- set_fact:
myvar: "{{ result.stdout | community.general.jc('date') }}"
- debug:
msg: "{{ myvar }}"
produces:
ok: [192.168.1.239] => {
"msg": {
"day": 9,
"hour": 22,
"minute": 6,
"month": "Aug",
"month_num": 8,
"second": 22,
"timezone": "UTC",
"weekday": "Sun",
"weekday_num": 1,
"year": 2020
}
}
"""
if not HAS_LIB:
raise AnsibleError('You need to install "jc" prior to running jc filter')
try:
jc_parser = importlib.import_module('jc.parsers.' + parser)
return jc_parser.parse(data, quiet=quiet, raw=raw)
except Exception as e:
raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'jc': jc
}

View File

@@ -35,9 +35,6 @@ def json_query(data, expr):
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
# Hack to handle Ansible String Types
# See issue: https://github.com/ansible-collections/community.general/issues/320
jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:

View File

@@ -1,47 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Vladimir Botka <vbotka@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
from ansible.module_utils.six import string_types
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from collections import defaultdict
from operator import itemgetter
def lists_mergeby(l1, l2, index):
''' merge lists by attribute index. Example:
- debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" '''
if not isinstance(l1, Sequence):
raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' %
(l1, type(l1)))
if not isinstance(l2, Sequence):
raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' %
(l2, type(l2)))
if not isinstance(index, string_types):
raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' %
(index, type(index)))
d = defaultdict(dict)
for l in (l1, l2):
for elem in l:
if not isinstance(elem, Mapping):
raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem))
if index in elem.keys():
d[elem[index]].update(elem)
return sorted(d.values(), key=itemgetter(index))
class FilterModule(object):
''' Ansible list filters '''
def filters(self):
return {
'lists_mergeby': lists_mergeby,
}

View File

@@ -6,8 +6,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Orion Poplawski (@opoplawski)
name: cobbler
plugin_type: inventory
short_description: Cobbler inventory source
version_added: 1.0.0
description:
@@ -17,9 +17,9 @@ DOCUMENTATION = '''
- inventory_cache
options:
plugin:
description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
description: The name of this plugin, it should always be set to C(cobbler) for this plugin to recognize it as it's own.
required: yes
choices: [ 'cobbler', 'community.general.cobbler' ]
choices: ['cobbler']
url:
description: URL to cobbler.
default: 'http://cobbler/cobbler_api'
@@ -92,7 +92,7 @@ except ImportError:
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using cobbler as source. '''
NAME = 'community.general.cobbler'
NAME = 'cobbler'
def __init__(self):

View File

@@ -0,0 +1,272 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_machine
plugin_type: inventory
author: Ximon Eighteen (@ximon18)
short_description: Docker Machine inventory source
requirements:
- L(Docker Machine,https://docs.docker.com/machine/)
extends_documentation_fragment:
- constructed
description:
- Get inventory hosts from Docker Machine.
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
options:
plugin:
description: token that ensures this is a source file for the C(docker_machine) plugin.
required: yes
choices: ['docker_machine']
daemon_env:
description:
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
A warning will be issued for any skipped host if the choice is C(require).
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
type: str
choices:
- require
- require-silently
- optional
- optional-silently
- skip
default: require
running_required:
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
type: bool
default: yes
verbose_output:
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
type: bool
default: yes
'''
EXAMPLES = '''
# Minimal example
plugin: docker_machine
# Example using constructed features to create a group per Docker Machine driver
# (https://docs.docker.com/machine/drivers/), e.g.:
# $ docker-machine create --driver digitalocean ... mymachine
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
# {
# ...
# "digitalocean": {
# "hosts": [
# "mymachine"
# ]
# ...
# }
strict: no
keyed_groups:
- separator: ''
key: docker_machine_node_attributes.DriverName
# Example grouping hosts by Digital Machine tag
strict: no
keyed_groups:
- prefix: tag
key: 'dm_tags'
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
compose:
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
'''
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils._text import to_text
from ansible.module_utils.common.process import get_bin_path
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
import json
import re
import subprocess
display = Display()
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using Docker machine as source. '''
NAME = 'community.general.docker_machine'
DOCKER_MACHINE_PATH = None
def _run_command(self, args):
if not self.DOCKER_MACHINE_PATH:
try:
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
except ValueError as e:
raise AnsibleError(to_native(e))
command = [self.DOCKER_MACHINE_PATH]
command.extend(args)
display.debug('Executing command {0}'.format(command))
try:
result = subprocess.check_output(command)
except subprocess.CalledProcessError as e:
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
raise e
return to_text(result).strip()
def _get_docker_daemon_variables(self, machine_name):
'''
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
'''
try:
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
except subprocess.CalledProcessError:
# This can happen when the machine is created but provisioning is incomplete
return []
# example output of docker-machine env --shell=sh:
# export DOCKER_TLS_VERIFY="1"
# export DOCKER_HOST="tcp://134.209.204.160:2376"
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
# export DOCKER_MACHINE_NAME="routinator"
# # Run this command to configure your shell:
# # eval $(docker-machine env --shell=bash routinator)
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
# with the same name and value but with a dm_ name prefix.
vars = []
for line in env_lines:
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
if match:
env_var_name = match.group(1)
env_var_value = match.group(2)
vars.append((env_var_name, env_var_value))
return vars
def _get_machine_names(self):
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
# with them.
ls_command = ['ls', '-q']
if self.get_option('running_required'):
ls_command.extend(['--filter', 'state=Running'])
try:
ls_lines = self._run_command(ls_command)
except subprocess.CalledProcessError:
return []
return ls_lines.splitlines()
def _inspect_docker_machine_host(self, node):
try:
inspect_lines = self._run_command(['inspect', self.node])
except subprocess.CalledProcessError:
return None
return json.loads(inspect_lines)
def _ip_addr_docker_machine_host(self, node):
try:
ip_addr = self._run_command(['ip', self.node])
except subprocess.CalledProcessError:
return None
return ip_addr
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
if not env_var_tuples:
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
if daemon_env in ('require', 'require-silently'):
if daemon_env == 'require':
display.warning('{0}: host will be skipped'.format(warning_prefix))
return True
else: # 'optional', 'optional-silently'
if daemon_env == 'optional':
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
return False
def _populate(self):
daemon_env = self.get_option('daemon_env')
try:
for self.node in self._get_machine_names():
self.node_attrs = self._inspect_docker_machine_host(self.node)
if not self.node_attrs:
continue
machine_name = self.node_attrs['Driver']['MachineName']
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
# that could be used to set environment variables to influence a local Docker client:
if daemon_env == 'skip':
env_var_tuples = []
else:
env_var_tuples = self._get_docker_daemon_variables(machine_name)
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
continue
# add an entry in the inventory for this host
self.inventory.add_host(machine_name)
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
# this works around an issue seen with Google Compute Platform where the IP address was not available
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
if self.node_attrs['Driver']['IPAddress']:
ip_addr = self.node_attrs['Driver']['IPAddress']
else:
ip_addr = self._ip_addr_docker_machine_host(self.node)
# set standard Ansible remote host connection settings to details captured from `docker-machine`
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
# set variables based on Docker Machine tags
tags = self.node_attrs['Driver'].get('Tags') or ''
self.inventory.set_variable(machine_name, 'dm_tags', tags)
# set variables based on Docker Machine env variables
for kv in env_var_tuples:
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
if self.get_option('verbose_output'):
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
to_native(e), orig_exc=e)
def verify_file(self, path):
"""Return the possibility of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()

View File

@@ -0,0 +1,255 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_swarm
plugin_type: inventory
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
requirements:
- python >= 2.7
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
extends_documentation_fragment:
- constructed
description:
- Reads inventories from the Docker swarm API.
- Uses a YAML configuration file docker_swarm.[yml|yaml].
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
I(managers) - all manager nodes; I(leader) - the swarm leader node;
I(nonleaders) - all nodes except the swarm leader."
options:
plugin:
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
recognize it as it's own.
type: str
required: true
choices: docker_swarm
docker_host:
description:
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
- "Use C(unix://var/run/docker.sock) to connect via local socket."
type: str
required: true
aliases: [ docker_url ]
verbose_output:
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
C(EngineVersion))
type: bool
default: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
type: bool
default: no
validate_certs:
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
host server.
type: bool
default: no
aliases: [ tls_verify ]
client_key:
description: Path to the client's TLS key file.
type: path
aliases: [ tls_client_key, key_path ]
ca_cert:
description: Use a CA certificate when performing server verification by providing the path to a CA
certificate file.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description: Path to the client's TLS certificate file.
type: path
aliases: [ tls_client_cert, cert_path ]
tls_hostname:
description: When verifying the authenticity of the Docker host server, provide the expected name of
the server.
type: str
ssl_version:
description: Provide a valid SSL version number. Default value determined by ssl.py module.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by docker-py.
type: str
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
will be used instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
aliases: [ time_out ]
include_host_uri:
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
The port always defaults to C(2376).
type: bool
default: no
include_host_uri_port:
description: Override the detected port number included in I(ansible_host_uri)
type: int
'''
EXAMPLES = '''
# Minimal example using local docker
plugin: docker_swarm
docker_host: unix://var/run/docker.sock
# Minimal example using remote docker
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
# Example using remote docker with unverified TLS
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
tls: yes
# Example using remote docker with verified TLS and client certificate verification
plugin: docker_swarm
docker_host: tcp://my-docker-host:2376
validate_certs: yes
ca_cert: /somewhere/ca.pem
client_key: /somewhere/key.pem
client_cert: /somewhere/cert.pem
# Example using constructed features to create groups and set ansible_host
plugin: docker_swarm
docker_host: tcp://my-docker-host:2375
strict: False
keyed_groups:
# add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'Description.Platform.Architecture'
# add e.g. linux hosts to an os_linux group
- prefix: os
key: 'Description.Platform.OS'
# create a group per node label
# e.g. a node labeled w/ "production" ends up in group "label_production"
# hint: labels containing special characters will be converted to safe names
- key: 'Spec.Labels'
prefix: label
'''
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.parsing.utils.addresses import parse_address
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Docker swarm as source. '''
NAME = 'community.general.docker_swarm'
def _fail(self, msg):
raise AnsibleError(msg)
def _populate(self):
raw_params = dict(
docker_host=self.get_option('docker_host'),
tls=self.get_option('tls'),
tls_verify=self.get_option('validate_certs'),
key_path=self.get_option('client_key'),
cacert_path=self.get_option('ca_cert'),
cert_path=self.get_option('client_cert'),
tls_hostname=self.get_option('tls_hostname'),
api_version=self.get_option('api_version'),
timeout=self.get_option('timeout'),
ssl_version=self.get_option('ssl_version'),
debug=None,
)
update_tls_hostname(raw_params)
connect_params = get_connect_params(raw_params, fail_function=self._fail)
self.client = docker.DockerClient(**connect_params)
self.inventory.add_group('all')
self.inventory.add_group('manager')
self.inventory.add_group('worker')
self.inventory.add_group('leader')
self.inventory.add_group('nonleaders')
if self.get_option('include_host_uri'):
if self.get_option('include_host_uri_port'):
host_uri_port = str(self.get_option('include_host_uri_port'))
elif self.get_option('tls') or self.get_option('validate_certs'):
host_uri_port = '2376'
else:
host_uri_port = '2375'
try:
self.nodes = self.client.nodes.list()
for self.node in self.nodes:
self.node_attrs = self.client.nodes.get(self.node.id).attrs
self.inventory.add_host(self.node_attrs['ID'])
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
self.node_attrs['Status']['Addr'])
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
if self.get_option('verbose_output'):
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
if 'ManagerStatus' in self.node_attrs:
if self.node_attrs['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
self.node_attrs['Status']['Addr']
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
self.inventory.add_host(self.node_attrs['ID'], group='leader')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
to_native(e))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
def parse(self, inventory, loader, path, cache=True):
if not HAS_DOCKER:
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
'https://github.com/docker/docker-py.')
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()

View File

@@ -9,8 +9,9 @@ __metaclass__ = type
DOCUMENTATION = '''
name: gitlab_runners
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
plugin_type: inventory
authors:
- Stefan Heitmüller (stefan.heitmueller@gmx.com)
short_description: Ansible dynamic inventory plugin for GitLab runners.
requirements:
- python >= 2.7
@@ -27,7 +28,6 @@ DOCUMENTATION = '''
required: true
choices:
- gitlab_runners
- community.general.gitlab_runners
server_url:
description: The URL of the GitLab server, with protocol (i.e. http or https).
env:
@@ -60,11 +60,11 @@ DOCUMENTATION = '''
EXAMPLES = '''
# gitlab_runners.yml
plugin: community.general.gitlab_runners
plugin: gitlab_runners
host: https://gitlab.com
# Example using constructed features to create groups and set ansible_host
plugin: community.general.gitlab_runners
plugin: gitlab_runners
host: https://gitlab.com
strict: False
keyed_groups:

View File

@@ -0,0 +1,256 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: kubevirt
plugin_type: inventory
author:
- KubeVirt Team (@kubevirt)
short_description: KubeVirt inventory source
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Fetch running VirtualMachines for one or more namespaces.
- Groups by namespace, namespace_vms and labels.
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'kubevirt' plugin.
required: True
choices: ['kubevirt']
type: str
host_format:
description:
- Specify the format of the host in the inventory group.
default: "{namespace}-{name}-{uid}"
connections:
type: list
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
suboptions:
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
type: str
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
type: str
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
type: str
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
type: str
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
environment variable.
type: str
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
type: str
verify_ssl:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
namespaces:
description:
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
to access.
type: list
network_name:
description:
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
address.
type: str
aliases: [ interface_name ]
api_version:
description:
- "Specify the KubeVirt API version."
type: str
annotation_variable:
description:
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
- "Note, that the value in ansible annotations should be json."
type: str
default: 'ansible'
requirements:
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named kubevirt.yaml or kubevirt.yml
# Authenticate with token, and return all virtual machines for all namespaces
plugin: kubevirt
connections:
- host: https://kubevirt.io
token: xxxxxxxxxxxxxxxx
ssl_verify: false
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
# connected to network myovsnetwork and from namespace vms
plugin: kubevirt
connections:
- namespaces:
- vms
network_name: myovsnetwork
'''
import json
from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
API_VERSION = 'kubevirt.io/v1alpha3'
class InventoryModule(K8sInventoryModule):
NAME = 'community.general.kubevirt'
def setup(self, config_data, cache, cache_key):
self.config_data = config_data
super(InventoryModule, self).setup(config_data, cache, cache_key)
def fetch_objects(self, connections):
client = self.get_api_client()
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
if connections:
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
interface_name = connection.get('network_name')
api_version = connection.get('api_version', API_VERSION)
annotation_variable = connection.get('annotation_variable', 'ansible')
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
try:
obj = v1_vm.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_vms_group = '{0}_vms'.format(namespace_group)
name = self._sanitize_group_name(name)
namespace_group = self._sanitize_group_name(namespace_group)
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_vms_group)
self.inventory.add_child(namespace_group, namespace_vms_group)
for vm in obj.items:
if not (vm.status and vm.status.interfaces):
continue
# Find interface by its name:
if interface_name is None:
interface = vm.status.interfaces[0]
else:
interface = next(
(i for i in vm.status.interfaces if i.name == interface_name),
None
)
# If interface is not found or IP address is not reported skip this VM:
if interface is None or interface.ipAddress is None:
continue
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
vm_ip = interface.ipAddress
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
self.inventory.add_host(vm_name)
if vm.metadata.labels:
# create a group for each label_value
for key, value in vm.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
group_name = self._sanitize_group_name(group_name)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, vm_name)
vm_labels = dict(vm.metadata.labels)
else:
vm_labels = {}
self.inventory.add_child(namespace_vms_group, vm_name)
# add hostvars
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
self.inventory.set_variable(vm_name, 'labels', vm_labels)
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
self.inventory.set_variable(vm_name, 'object_type', 'vm')
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
# Add all variables which are listed in 'ansible' annotation:
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
for k, v in annotations_data.items():
self.inventory.set_variable(vm_name, k, v)
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
return True
return False

View File

@@ -6,6 +6,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
name: linode
plugin_type: inventory
author:
- Luke Murphy (@decentral1se)
short_description: Ansible dynamic inventory plugin for Linode.
@@ -16,15 +17,12 @@ DOCUMENTATION = r'''
- Reads inventories from the Linode API v4.
- Uses a YAML configuration file that ends with linode.(yml|yaml).
- Linode labels are used by default as the hostnames.
- The default inventory groups are built from groups (deprecated by
Linode) and not tags.
extends_documentation_fragment:
- constructed
- The inventory groups are built from groups and not tags.
options:
plugin:
description: marks this as an instance of the 'linode' plugin
required: true
choices: ['linode', 'community.general.linode']
choices: ['linode']
access_token:
description: The Linode account personal access token.
required: true
@@ -35,70 +33,41 @@ DOCUMENTATION = r'''
default: []
type: list
required: false
tags:
description: Populate inventory only with instances which have at least one of the tags listed here.
default: []
type: list
reqired: false
version_added: 2.0.0
types:
description: Populate inventory with instances with this type.
default: []
type: list
required: false
strict:
version_added: 2.0.0
compose:
version_added: 2.0.0
groups:
version_added: 2.0.0
keyed_groups:
version_added: 2.0.0
'''
EXAMPLES = r'''
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: community.general.linode
plugin: linode
# Example with regions, types, groups and access token
plugin: community.general.linode
plugin: linode
access_token: foobar
regions:
- eu-west
types:
- g5-standard-2
# Example with keyed_groups, groups, and compose
plugin: community.general.linode
access_token: foobar
keyed_groups:
- key: tags
separator: ''
- key: region
prefix: region
groups:
webservers: "'web' in (tags|list)"
mailservers: "'mail' in (tags|list)"
compose:
ansible_port: 2222
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.plugins.inventory import BaseInventoryPlugin
try:
from linode_api4 import LinodeClient
from linode_api4.errors import ApiError as LinodeApiError
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
class InventoryModule(BaseInventoryPlugin, Constructable):
class InventoryModule(BaseInventoryPlugin):
NAME = 'community.general.linode'
@@ -141,7 +110,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
for linode_group in self.linode_groups:
self.inventory.add_group(linode_group)
def _filter_by_config(self, regions, types, tags):
def _filter_by_config(self, regions, types):
"""Filter instances by user specified configuration."""
if regions:
self.instances = [
@@ -155,12 +124,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
if instance.type.id in types
]
if tags:
self.instances = [
instance for instance in self.instances
if any(tag in instance.tags for tag in tags)
]
def _add_instances_to_groups(self):
"""Add instance names to their dynamic inventory groups."""
for instance in self.instances:
@@ -205,10 +168,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
'type_to_be': list,
'value': config_data.get('types', [])
},
'tags': {
'type_to_be': list,
'value': config_data.get('tags', [])
},
}
for name in options:
@@ -220,9 +179,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
regions = options['regions']['value']
types = options['types']['value']
tags = options['tags']['value']
return regions, types, tags
return regions, types
def verify_file(self, path):
"""Verify the Linode configuration file."""
@@ -236,35 +194,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
"""Dynamically parse Linode the cloud inventory."""
super(InventoryModule, self).parse(inventory, loader, path)
if not HAS_LINODE:
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
config_data = self._read_config_data(path)
self._build_client()
self._get_instances_inventory()
strict = self.get_option('strict')
regions, types, tags = self._get_query_options(config_data)
self._filter_by_config(regions, types, tags)
regions, types = self._get_query_options(config_data)
self._filter_by_config(regions, types)
self._add_groups()
self._add_instances_to_groups()
self._add_hostvars_for_instances()
for instance in self.instances:
variables = self.inventory.get_host(instance.label).get_vars()
self._add_host_to_composed_groups(
self.get_option('groups'),
variables,
instance.label,
strict=strict)
self._add_host_to_keyed_groups(
self.get_option('keyed_groups'),
variables,
instance.label,
strict=strict)
self._set_composite_vars(
self.get_option('compose'),
variables,
instance.label,
strict=strict)

View File

@@ -5,8 +5,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: nmap
plugin_type: inventory
short_description: Uses nmap to find hosts to target
description:
- Uses a YAML configuration file with a valid YAML extension.
@@ -19,7 +19,7 @@ DOCUMENTATION = '''
plugin:
description: token that ensures this is a source file for the 'nmap' plugin.
required: True
choices: ['nmap', 'community.general.nmap']
choices: ['nmap']
address:
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
required: True
@@ -43,10 +43,10 @@ DOCUMENTATION = '''
- 'TODO: add OS fingerprinting'
'''
EXAMPLES = '''
# inventory.config file in YAML format
plugin: community.general.nmap
strict: False
address: 192.168.0.0/24
# inventory.config file in YAML format
plugin: nmap
strict: False
address: 192.168.0.0/24
'''
import os

Some files were not shown because too many files have changed in this diff Show More