Compare commits

..

3 Commits
3.7.0 ... 2.0.0

Author SHA1 Message Date
Felix Fontein
dc4222df0d Fix links. 2021-01-28 13:52:13 +01:00
Felix Fontein
b9a89d6d0f Release 2.0.0. 2021-01-28 13:43:42 +01:00
Felix Fontein
f48913d91b Add release summary. 2021-01-28 13:42:22 +01:00
1299 changed files with 22047 additions and 55690 deletions

View File

@@ -13,25 +13,13 @@ pr:
- stable-* - stable-*
schedules: schedules:
- cron: 0 8 * * * - cron: 0 9 * * *
displayName: Nightly (main) displayName: Nightly
always: true always: true
branches: branches:
include: include:
- main - main
- cron: 0 10 * * * - stable-*
displayName: Nightly (active stable branches)
always: true
branches:
include:
- stable-2
- stable-3
- cron: 0 11 * * 0
displayName: Weekly (old stable branches)
always: true
branches:
include:
- stable-1
variables: variables:
- name: checkoutPath - name: checkoutPath
@@ -48,7 +36,7 @@ variables:
resources: resources:
containers: containers:
- container: default - container: default
image: quay.io/ansible/azure-pipelines-test-container:1.9.0 image: quay.io/ansible/azure-pipelines-test-container:1.7.1
pool: Standard pool: Standard
@@ -68,19 +56,6 @@ stages:
- test: 3 - test: 3
- test: 4 - test: 4
- test: extra - test: extra
- stage: Sanity_2_11
displayName: Sanity 2.11
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
testFormat: 2.11/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
- stage: Sanity_2_10 - stage: Sanity_2_10
displayName: Sanity 2.10 displayName: Sanity 2.10
dependsOn: [] dependsOn: []
@@ -124,23 +99,6 @@ stages:
- test: 3.7 - test: 3.7
- test: 3.8 - test: 3.8
- test: 3.9 - test: 3.9
- test: '3.10'
- stage: Units_2_11
displayName: Units 2.11
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.11/units/{0}/1
targets:
- test: 2.6
- test: 2.7
- test: 3.5
- test: 3.6
- test: 3.7
- test: 3.8
- test: 3.9
- stage: Units_2_10 - stage: Units_2_10
displayName: Units 2.10 displayName: Units 2.10
dependsOn: [] dependsOn: []
@@ -181,47 +139,6 @@ stages:
- template: templates/matrix.yml - template: templates/matrix.yml
parameters: parameters:
testFormat: devel/{0} testFormat: devel/{0}
targets:
- name: macOS 11.1
test: macos/11.1
- name: RHEL 7.9
test: rhel/7.9
- name: RHEL 8.4
test: rhel/8.4
- name: FreeBSD 12.2
test: freebsd/12.2
- name: FreeBSD 13.0
test: freebsd/13.0
groups:
- 1
- 2
- 3
- stage: Remote_2_11
displayName: Remote 2.11
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.11/{0}
targets:
- name: macOS 11.1
test: macos/11.1
- name: RHEL 7.9
test: rhel/7.9
- name: RHEL 8.3
test: rhel/8.3
- name: FreeBSD 12.2
test: freebsd/12.2
groups:
- 1
- 2
- stage: Remote_2_10
displayName: Remote 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.10/{0}
targets: targets:
- name: OS X 10.11 - name: OS X 10.11
test: osx/10.11 test: osx/10.11
@@ -233,6 +150,26 @@ stages:
test: rhel/7.8 test: rhel/7.8
- name: RHEL 8.2 - name: RHEL 8.2
test: rhel/8.2 test: rhel/8.2
- name: FreeBSD 11.4
test: freebsd/11.4
- name: FreeBSD 12.2
test: freebsd/12.2
groups:
- 1
- 2
- 3
- stage: Remote_2_10
displayName: Remote 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.10/{0}
targets:
- name: macOS 11.1
test: macos/11.1
- name: RHEL 8.2
test: rhel/8.2
- name: FreeBSD 12.1 - name: FreeBSD 12.1
test: freebsd/12.1 test: freebsd/12.1
groups: groups:
@@ -269,14 +206,16 @@ stages:
test: centos7 test: centos7
- name: CentOS 8 - name: CentOS 8
test: centos8 test: centos8
- name: Fedora 33 - name: Fedora 31
test: fedora33 test: fedora31
- name: Fedora 34 - name: Fedora 32
test: fedora34 test: fedora32
- name: openSUSE 15 py2 - name: openSUSE 15 py2
test: opensuse15py2 test: opensuse15py2
- name: openSUSE 15 py3 - name: openSUSE 15 py3
test: opensuse15 test: opensuse15
- name: Ubuntu 16.04
test: ubuntu1604
- name: Ubuntu 18.04 - name: Ubuntu 18.04
test: ubuntu1804 test: ubuntu1804
- name: Ubuntu 20.04 - name: Ubuntu 20.04
@@ -285,25 +224,6 @@ stages:
- 1 - 1
- 2 - 2
- 3 - 3
- stage: Docker_2_11
displayName: Docker 2.11
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
testFormat: 2.11/linux/{0}
targets:
- name: CentOS 8
test: centos8
- name: Fedora 33
test: fedora33
- name: openSUSE 15 py3
test: opensuse15
- name: Ubuntu 20.04
test: ubuntu2004
groups:
- 2
- 3
- stage: Docker_2_10 - stage: Docker_2_10
displayName: Docker 2.10 displayName: Docker 2.10
dependsOn: [] dependsOn: []
@@ -318,8 +238,6 @@ stages:
test: fedora32 test: fedora32
- name: openSUSE 15 py3 - name: openSUSE 15 py3
test: opensuse15 test: opensuse15
- name: Ubuntu 16.04
test: ubuntu1604
groups: groups:
- 2 - 2
- 3 - 3
@@ -350,16 +268,6 @@ stages:
parameters: parameters:
nameFormat: Python {0} nameFormat: Python {0}
testFormat: devel/cloud/{0}/1 testFormat: devel/cloud/{0}/1
targets:
- test: 3.8
- stage: Cloud_2_11
displayName: Cloud 2.11
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.11/cloud/{0}/1
targets: targets:
- test: 2.7 - test: 2.7
- test: 3.6 - test: 3.6
@@ -389,22 +297,17 @@ stages:
- Sanity_devel - Sanity_devel
- Sanity_2_9 - Sanity_2_9
- Sanity_2_10 - Sanity_2_10
- Sanity_2_11
- Units_devel - Units_devel
- Units_2_9 - Units_2_9
- Units_2_10 - Units_2_10
- Units_2_11
- Remote_devel - Remote_devel
- Remote_2_9 - Remote_2_9
- Remote_2_10 - Remote_2_10
- Remote_2_11
- Docker_devel - Docker_devel
- Docker_2_9 - Docker_2_9
- Docker_2_10 - Docker_2_10
- Docker_2_11
- Cloud_devel - Cloud_devel
- Cloud_2_9 - Cloud_2_9
- Cloud_2_10 - Cloud_2_10
- Cloud_2_11
jobs: jobs:
- template: templates/coverage.yml - template: templates/coverage.yml

View File

@@ -7,7 +7,7 @@ set -o pipefail -eu
output_path="$1" output_path="$1"
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh curl --silent --show-error https://codecov.io/bash > codecov.sh
for file in "${output_path}"/reports/coverage*.xml; do for file in "${output_path}"/reports/coverage*.xml; do
name="${file}" name="${file}"

441
.github/BOTMETA.yml vendored
View File

@@ -1,78 +1,25 @@
automerge: true automerge: true
files: files:
plugins/:
supershipit: quidame
changelogs/: {}
changelogs/fragments/: changelogs/fragments/:
support: community support: community
$actions: $actions:
labels: action labels: action
$actions/system/iptables_state.py: $actions/aireos.py:
maintainers: quidame labels: aireos cisco networking
$actions/system/shutdown.py: $actions/ironware.py:
maintainers: paulquack
labels: ironware networking
$actions/shutdown.py:
maintainers: nitzmahone samdoran aminvakil maintainers: nitzmahone samdoran aminvakil
$becomes/: $becomes/:
labels: become labels: become
$becomes/doas.py:
maintainers: $team_ansible_core
$becomes/dzdo.py:
maintainers: $team_ansible_core
$becomes/ksu.py:
maintainers: $team_ansible_core
$becomes/machinectl.py:
maintainers: $team_ansible_core
$becomes/pbrun.py:
maintainers: $team_ansible_core
$becomes/pfexec.py:
maintainers: $team_ansible_core
$becomes/pmrun.py:
maintainers: $team_ansible_core
$becomes/sesu.py:
maintainers: nekonyuu
$becomes/sudosu.py:
maintainers: dagwieers
$caches/:
labels: cache
$caches/memcached.py: {}
$caches/pickle.py:
maintainers: bcoca
$caches/redis.py: {}
$caches/yaml.py:
maintainers: bcoca
$callbacks/: $callbacks/:
labels: callbacks labels: callbacks
$callbacks/cgroup_memory_recap.py: {}
$callbacks/context_demo.py: {}
$callbacks/counter_enabled.py: {}
$callbacks/dense.py:
maintainers: dagwieers
$callbacks/diy.py:
maintainers: theque5t
$callbacks/hipchat.py: {}
$callbacks/jabber.py: {}
$callbacks/loganalytics.py:
maintainers: zhcli
$callbacks/logdna.py: {}
$callbacks/logentries.py: {}
$callbacks/log_plays.py: {}
$callbacks/logstash.py:
maintainers: ujenmr
$callbacks/mail.py:
maintainers: dagwieers
$callbacks/nrdp.py:
maintainers: rverchere
$callbacks/null.py: {}
$callbacks/opentelemetry.py:
maintainers: v1v
keywords: opentelemetry observability
$callbacks/say.py: $callbacks/say.py:
notify: chris-short notify: chris-short
maintainers: $team_macos maintainers: $team_macos
labels: macos say labels: macos say
keywords: brew cask darwin homebrew macosx macports osx keywords: brew cask darwin homebrew macosx macports osx
$callbacks/selective.py: {}
$callbacks/slack.py: {}
$callbacks/splunk.py: {}
$callbacks/sumologic.py: $callbacks/sumologic.py:
maintainers: ryancurrah maintainers: ryancurrah
labels: sumologic labels: sumologic
@@ -81,26 +28,16 @@ files:
$callbacks/unixy.py: $callbacks/unixy.py:
maintainers: akatch maintainers: akatch
labels: unixy labels: unixy
$callbacks/yaml.py: {}
$connections/: $connections/:
labels: connections labels: connections
$connections/chroot.py: {} $connections/kubectl.py:
$connections/funcd.py: maintainers: chouseknecht fabianvf flaper87 maxamillion
maintainers: mscherer labels: k8s kubectl
$connections/iocage.py: {}
$connections/jail.py:
maintainers: $team_ansible_core
$connections/lxc.py: {}
$connections/lxd.py: $connections/lxd.py:
maintainers: mattclay maintainers: mattclay
labels: lxd labels: lxd
$connections/qubes.py:
maintainers: kushaldas
$connections/saltstack.py: $connections/saltstack.py:
maintainers: mscherer
labels: saltstack labels: saltstack
$connections/zone.py:
maintainers: $team_ansible_core
$doc_fragments/: $doc_fragments/:
labels: docs_fragments labels: docs_fragments
$doc_fragments/hpe3par.py: $doc_fragments/hpe3par.py:
@@ -114,109 +51,55 @@ files:
$doc_fragments/xenserver.py: $doc_fragments/xenserver.py:
maintainers: bvitnik maintainers: bvitnik
labels: xenserver labels: xenserver
$filters/dict.py:
maintainers: felixfontein
$filters/dict_kv.py: $filters/dict_kv.py:
maintainers: giner maintainers: giner
$filters/from_csv.py:
maintainers: Ajpantuso
$filters/groupby:
maintainers: felixfontein
$filters/hashids:
maintainers: Ajpantuso
$filters/jc.py: $filters/jc.py:
maintainers: kellyjonbrazil maintainers: kellyjonbrazil
$filters/json_query.py: {}
$filters/list.py: $filters/list.py:
maintainers: vbotka maintainers: vbotka
$filters/path_join_shim.py:
maintainers: felixfontein
$filters/random_mac.py: {}
$filters/time.py: $filters/time.py:
maintainers: resmo maintainers: resmo
$filters/unicode_normalize.py: $httpapis/:
maintainers: Ajpantuso maintainers: $team_networking
$filters/version_sort.py: labels: networking
maintainers: ericzolf $httpapis/ftd.py:
maintainers: $team_networking annikulin
labels: cisco ftd networking
keywords: firepower ftd
$inventories/: $inventories/:
labels: inventories labels: inventories
$inventories/cobbler.py:
maintainers: opoplawski
$inventories/gitlab_runners.py:
maintainers: morph027
$inventories/linode.py: $inventories/linode.py:
maintainers: $team_linode maintainers: $team_linode
labels: cloud linode labels: cloud linode
keywords: linode dynamic inventory script keywords: linode dynamic inventory script
$inventories/lxd.py:
maintainers: conloos
$inventories/nmap.py: {}
$inventories/online.py:
maintainers: sieben
$inventories/proxmox.py:
maintainers: $team_virt ilijamt
$inventories/icinga2.py:
maintainers: bongoeadgc6
$inventories/scaleway.py: $inventories/scaleway.py:
maintainers: $team_scaleway maintainers: $team_scaleway
labels: cloud scaleway labels: cloud scaleway
$inventories/stackpath_compute.py:
maintainers: shayrybak
$inventories/virtualbox.py: {}
$lookups/: $lookups/:
labels: lookups labels: lookups
$lookups/cartesian.py: {} $lookups/onepass:
$lookups/chef_databag.py: {} maintainers: samdoran
$lookups/consul_kv.py: {} labels: onepassword
$lookups/credstash.py: {} $lookups/conjur_variable.py:
notify: cyberark-bizdev
maintainers: $team_cyberark_conjur
labels: conjur_variable
$lookups/cyberarkpassword.py: $lookups/cyberarkpassword.py:
notify: cyberark-bizdev notify: cyberark-bizdev
labels: cyberarkpassword labels: cyberarkpassword
$lookups/dependent.py:
maintainers: felixfontein
$lookups/dig.py: $lookups/dig.py:
maintainers: jpmens maintainers: jpmens
labels: dig labels: dig
$lookups/dnstxt.py: $lookups/tss.py:
maintainers: jpmens maintainers: amigus
$lookups/dsv.py: $lookups/dsv.py:
maintainers: amigus endlesstrax maintainers: amigus
$lookups/etcd3.py:
maintainers: eric-belhomme
$lookups/etcd.py:
maintainers: jpmens
$lookups/filetree.py:
maintainers: dagwieers
$lookups/flattened.py: {}
$lookups/hiera.py:
maintainers: jparrill
$lookups/keyring.py: {}
$lookups/lastpass.py: {}
$lookups/lmdb_kv.py:
maintainers: jpmens
$lookups/manifold.py: $lookups/manifold.py:
maintainers: galanoff maintainers: galanoff
labels: manifold labels: manifold
$lookups/nios: $lookups/nios:
maintainers: $team_networking sganesh-infoblox maintainers: $team_networking sganesh-infoblox
labels: infoblox networking labels: infoblox networking
$lookups/onepass:
maintainers: samdoran
labels: onepassword
$lookups/onepassword.py:
maintainers: azenk scottsb
$lookups/onepassword_raw.py:
maintainers: azenk scottsb
$lookups/passwordstore.py: {}
$lookups/random_pet.py:
maintainers: Akasurde
$lookups/random_string.py:
maintainers: Akasurde
$lookups/redis.py:
maintainers: $team_ansible_core jpmens
$lookups/shelvefile.py: {}
$lookups/tss.py:
maintainers: amigus endlesstrax
$module_utils/: $module_utils/:
labels: module_utils labels: module_utils
$module_utils/gitlab.py: $module_utils/gitlab.py:
@@ -239,9 +122,6 @@ files:
$module_utils/memset.py: $module_utils/memset.py:
maintainers: glitchcrab maintainers: glitchcrab
labels: cloud memset labels: cloud memset
$module_utils/mh/:
maintainers: russoz
labels: module_helper
$module_utils/module_helper.py: $module_utils/module_helper.py:
maintainers: russoz maintainers: russoz
labels: module_helper labels: module_helper
@@ -257,13 +137,12 @@ files:
$module_utils/redfish_utils.py: $module_utils/redfish_utils.py:
maintainers: $team_redfish maintainers: $team_redfish
labels: redfish_utils labels: redfish_utils
$module_utils/remote_management/lxca/common.py: $module_utils/remote_management/dellemc/: rajeevarakkal
maintainers: navalkp prabhosa $module_utils/remote_management/lxca/common.py: navalkp prabhosa
$module_utils/scaleway.py: $module_utils/scaleway.py:
maintainers: $team_scaleway maintainers: $team_scaleway
labels: cloud scaleway labels: cloud scaleway
$module_utils/storage/hpe3par/hpe3par.py: $module_utils/storage/hpe3par/hpe3par.py: farhan7500 gautamphegde
maintainers: farhan7500 gautamphegde
$module_utils/utm_utils.py: $module_utils/utm_utils.py:
maintainers: $team_e_spirit maintainers: $team_e_spirit
labels: utm_utils labels: utm_utils
@@ -296,25 +175,31 @@ files:
maintainers: cloudnull maintainers: cloudnull
$modules/cloud/lxd/: $modules/cloud/lxd/:
ignore: hnakamur ignore: hnakamur
$modules/cloud/lxd/lxd_profile.py:
maintainers: conloos
$modules/cloud/memset/: $modules/cloud/memset/:
maintainers: glitchcrab maintainers: glitchcrab
$modules/cloud/misc/cloud_init_data_facts.py: $modules/cloud/misc/cloud_init_data_facts.py:
maintainers: resmo maintainers: resmo
$modules/cloud/misc/proxmox: $modules/cloud/misc/helm.py:
maintainers: flaper87
$modules/cloud/misc/proxmox.py:
maintainers: $team_virt UnderGreen
labels: proxmox virt
ignore: skvidal
keywords: kvm libvirt proxmox qemu
$modules/cloud/misc/proxmox_kvm.py:
maintainers: $team_virt helldorado
labels: proxmox_kvm virt
ignore: skvidal
keywords: kvm libvirt proxmox qemu
$modules/cloud/misc/proxmox_snap.py:
maintainers: $team_virt maintainers: $team_virt
labels: proxmox virt labels: proxmox virt
keywords: kvm libvirt proxmox qemu keywords: kvm libvirt proxmox qemu
$modules/cloud/misc/proxmox.py:
maintainers: UnderGreen
ignore: skvidal
$modules/cloud/misc/proxmox_kvm.py:
maintainers: helldorado
ignore: skvidal
$modules/cloud/misc/proxmox_template.py: $modules/cloud/misc/proxmox_template.py:
maintainers: UnderGreen maintainers: $team_virt UnderGreen
labels: proxmox_template virt
ignore: skvidal ignore: skvidal
keywords: kvm libvirt proxmox qemu
$modules/cloud/misc/rhevm.py: $modules/cloud/misc/rhevm.py:
maintainers: $team_virt TimothyVandenbrande maintainers: $team_virt TimothyVandenbrande
labels: rhevm virt labels: rhevm virt
@@ -323,7 +208,7 @@ files:
$modules/cloud/misc/: $modules/cloud/misc/:
ignore: ryansb ignore: ryansb
$modules/cloud/misc/terraform.py: $modules/cloud/misc/terraform.py:
maintainers: m-yosefpor rainerleber maintainers: m-yosefpor
$modules/cloud/misc/xenserver_facts.py: $modules/cloud/misc/xenserver_facts.py:
maintainers: caphrim007 cheese maintainers: caphrim007 cheese
labels: xenserver_facts labels: xenserver_facts
@@ -356,40 +241,16 @@ files:
maintainers: omgjlk sivel maintainers: omgjlk sivel
$modules/cloud/rackspace/: $modules/cloud/rackspace/:
ignore: ryansb sivel ignore: ryansb sivel
$modules/cloud/rackspace/rax_cbs.py:
maintainers: claco
$modules/cloud/rackspace/rax_cbs_attachments.py:
maintainers: claco
$modules/cloud/rackspace/rax_cdb.py:
maintainers: jails
$modules/cloud/rackspace/rax_cdb_user.py:
maintainers: jails
$modules/cloud/rackspace/rax_cdb_database.py:
maintainers: jails
$modules/cloud/rackspace/rax_clb.py: $modules/cloud/rackspace/rax_clb.py:
maintainers: claco maintainers: claco
$modules/cloud/rackspace/rax_clb_nodes.py: $modules/cloud/rackspace/rax_clb_nodes.py:
maintainers: neuroid maintainers: neuroid
$modules/cloud/rackspace/rax_clb_ssl.py: $modules/cloud/rackspace/rax_clb_ssl.py:
maintainers: smashwilson maintainers: smashwilson
$modules/cloud/rackspace/rax_files.py:
maintainers: angstwad
$modules/cloud/rackspace/rax_files_objects.py:
maintainers: angstwad
$modules/cloud/rackspace/rax_identity.py: $modules/cloud/rackspace/rax_identity.py:
maintainers: claco maintainers: claco
$modules/cloud/rackspace/rax_network.py: $modules/cloud/rackspace/rax_network.py:
maintainers: claco omgjlk maintainers: claco omgjlk
$modules/cloud/rackspace/rax_mon_alarm.py:
maintainers: smashwilson
$modules/cloud/rackspace/rax_mon_check.py:
maintainers: smashwilson
$modules/cloud/rackspace/rax_mon_entity.py:
maintainers: smashwilson
$modules/cloud/rackspace/rax_mon_notification.py:
maintainers: smashwilson
$modules/cloud/rackspace/rax_mon_notification_plan.py:
maintainers: smashwilson
$modules/cloud/rackspace/rax_queue.py: $modules/cloud/rackspace/rax_queue.py:
maintainers: claco maintainers: claco
$modules/cloud/scaleway/: $modules/cloud/scaleway/:
@@ -401,17 +262,13 @@ files:
$modules/cloud/scaleway/scaleway_ip_info.py: $modules/cloud/scaleway/scaleway_ip_info.py:
maintainers: Spredzy maintainers: Spredzy
$modules/cloud/scaleway/scaleway_organization_info.py: $modules/cloud/scaleway/scaleway_organization_info.py:
maintainers: sieben Spredzy maintainers: sieben
$modules/cloud/scaleway/scaleway_security_group.py: $modules/cloud/scaleway/scaleway_security_group.py:
maintainers: DenBeke maintainers: DenBeke
$modules/cloud/scaleway/scaleway_security_group_info.py: $modules/cloud/scaleway/scaleway_security_group_info.py:
maintainers: sieben Spredzy maintainers: sieben
$modules/cloud/scaleway/scaleway_security_group_rule.py: $modules/cloud/scaleway/scaleway_security_group_rule.py:
maintainers: DenBeke maintainers: DenBeke
$modules/cloud/scaleway/scaleway_server_info.py:
maintainers: Spredzy
$modules/cloud/scaleway/scaleway_snapshot_info.py:
maintainers: Spredzy
$modules/cloud/scaleway/scaleway_volume.py: $modules/cloud/scaleway/scaleway_volume.py:
labels: scaleway_volume labels: scaleway_volume
ignore: hekonsek ignore: hekonsek
@@ -435,10 +292,8 @@ files:
maintainers: bvitnik maintainers: bvitnik
$modules/clustering/consul/: $modules/clustering/consul/:
maintainers: $team_consul maintainers: $team_consul
ignore: colin-nolan
$modules/clustering/etcd3.py: $modules/clustering/etcd3.py:
maintainers: evrardjp maintainers: evrardjp vfauth
ignore: vfauth
$modules/clustering/nomad/: $modules/clustering/nomad/:
maintainers: chris93111 maintainers: chris93111
$modules/clustering/pacemaker_cluster.py: $modules/clustering/pacemaker_cluster.py:
@@ -463,35 +318,21 @@ files:
maintainers: john-westcott-iv maintainers: john-westcott-iv
$modules/database/misc/redis.py: $modules/database/misc/redis.py:
maintainers: slok maintainers: slok
$modules/database/misc/redis_info.py:
maintainers: levonet
$modules/database/misc/redis_data_info.py:
maintainers: paginabianca
$modules/database/misc/redis_data.py:
maintainers: paginabianca
$modules/database/misc/riak.py: $modules/database/misc/riak.py:
maintainers: drewkerrigan jsmartin maintainers: drewkerrigan jsmartin
$modules/database/mssql/mssql_db.py: $modules/database/mssql/mssql_db.py:
maintainers: vedit Jmainguy kenichi-ogawa-1988 maintainers: vedit Jmainguy kenichi-ogawa-1988
labels: mssql_db labels: mssql_db
$modules/database/saphana/hana_query.py:
maintainers: rainerleber
$modules/database/vertica/: $modules/database/vertica/:
maintainers: dareko maintainers: dareko
$modules/files/archive.py: $modules/files/archive.py:
maintainers: bendoh maintainers: bendoh
$modules/files/filesize.py:
maintainers: quidame
$modules/files/ini_file.py: $modules/files/ini_file.py:
maintainers: jpmens noseka1 maintainers: jpmens noseka1
$modules/files/iso_create.py:
maintainers: Tomorrow9
$modules/files/iso_extract.py: $modules/files/iso_extract.py:
maintainers: dagwieers jhoekx ribbons maintainers: dagwieers jhoekx ribbons
$modules/files/read_csv.py: $modules/files/read_csv.py:
maintainers: dagwieers maintainers: dagwieers
$modules/files/sapcar_extract.py:
maintainers: RainerLeber
$modules/files/xattr.py: $modules/files/xattr.py:
maintainers: bcoca maintainers: bcoca
labels: xattr labels: xattr
@@ -499,6 +340,8 @@ files:
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
labels: m:xml xml labels: m:xml xml
ignore: magnus919 ignore: magnus919
$modules/identity/onepassword_facts.py:
maintainers: Rylon
$modules/identity/ipa/: $modules/identity/ipa/:
maintainers: $team_ipa maintainers: $team_ipa
$modules/identity/ipa/ipa_pwpolicy.py: $modules/identity/ipa/ipa_pwpolicy.py:
@@ -509,28 +352,13 @@ files:
maintainers: jparrill maintainers: jparrill
$modules/identity/keycloak/: $modules/identity/keycloak/:
maintainers: $team_keycloak maintainers: $team_keycloak
$modules/identity/keycloak/keycloak_authentication.py:
maintainers: elfelip Gaetan2907
$modules/identity/keycloak/keycloak_clientscope.py:
maintainers: Gaetan2907
$modules/identity/keycloak/keycloak_client_rolemapping.py:
maintainers: Gaetan2907
$modules/identity/keycloak/keycloak_group.py: $modules/identity/keycloak/keycloak_group.py:
maintainers: adamgoossens maintainers: adamgoossens
$modules/identity/keycloak/keycloak_identity_provider.py:
maintainers: laurpaum
$modules/identity/keycloak/keycloak_realm.py:
maintainers: kris2kris
$modules/identity/keycloak/keycloak_role.py:
maintainers: laurpaum
$modules/identity/keycloak/keycloak_user_federation.py:
maintainers: laurpaum
$modules/identity/onepassword_info.py: $modules/identity/onepassword_info.py:
maintainers: Rylon maintainers: Rylon
$modules/identity/opendj/opendj_backendprop.py: $modules/identity/opendj/opendj_backendprop.py:
maintainers: dj-wasabi maintainers: dj-wasabi
$modules/monitoring/airbrake_deployment.py: $modules/monitoring/airbrake_deployment.py:
maintainers: phumpal
labels: airbrake_deployment labels: airbrake_deployment
ignore: bpennypacker ignore: bpennypacker
$modules/monitoring/bigpanda.py: $modules/monitoring/bigpanda.py:
@@ -541,8 +369,6 @@ files:
maintainers: n0ts maintainers: n0ts
labels: datadog_event labels: datadog_event
ignore: arturaz ignore: arturaz
$modules/monitoring/datadog/datadog_downtime.py:
maintainers: Datadog
$modules/monitoring/datadog/datadog_monitor.py: $modules/monitoring/datadog/datadog_monitor.py:
maintainers: skornehl maintainers: skornehl
$modules/monitoring/honeybadger_deployment.py: $modules/monitoring/honeybadger_deployment.py:
@@ -589,12 +415,8 @@ files:
maintainers: andsens maintainers: andsens
$modules/monitoring/spectrum_device.py: $modules/monitoring/spectrum_device.py:
maintainers: orgito maintainers: orgito
$modules/monitoring/spectrum_model_attrs.py:
maintainers: tgates81
$modules/monitoring/stackdriver.py: $modules/monitoring/stackdriver.py:
maintainers: bwhaley maintainers: bwhaley
$modules/monitoring/statsd.py:
maintainers: mamercad
$modules/monitoring/statusio_maintenance.py: $modules/monitoring/statusio_maintenance.py:
maintainers: bhcopeland maintainers: bhcopeland
$modules/monitoring/uptimerobot.py: $modules/monitoring/uptimerobot.py:
@@ -606,10 +428,10 @@ files:
maintainers: drcapulet maintainers: drcapulet
$modules/net_tools/dnsmadeeasy.py: $modules/net_tools/dnsmadeeasy.py:
maintainers: briceburg maintainers: briceburg
$modules/net_tools/gandi_livedns.py:
maintainers: gthiemonge
$modules/net_tools/haproxy.py: $modules/net_tools/haproxy.py:
maintainers: ravibhure Normo maintainers: ravibhure
$modules/net_tools/:
maintainers: nerzhul
$modules/net_tools/infinity/infinity.py: $modules/net_tools/infinity/infinity.py:
maintainers: MeganLiu maintainers: MeganLiu
$modules/net_tools/ip_netns.py: $modules/net_tools/ip_netns.py:
@@ -620,6 +442,8 @@ files:
maintainers: akostyuk maintainers: akostyuk
$modules/net_tools/ipwcli_dns.py: $modules/net_tools/ipwcli_dns.py:
maintainers: cwollinger maintainers: cwollinger
$modules/net_tools/ldap/ldap_attr.py:
maintainers: jtyr
$modules/net_tools/ldap/ldap_attrs.py: $modules/net_tools/ldap/ldap_attrs.py:
maintainers: drybjed jtyr noles maintainers: drybjed jtyr noles
$modules/net_tools/ldap/ldap_entry.py: $modules/net_tools/ldap/ldap_entry.py:
@@ -633,29 +457,13 @@ files:
ignore: andyhky ignore: andyhky
$modules/net_tools/netcup_dns.py: $modules/net_tools/netcup_dns.py:
maintainers: nbuchwitz maintainers: nbuchwitz
$modules/net_tools/nsupdate.py:
maintainers: nerzhul
$modules/net_tools/omapi_host.py: $modules/net_tools/omapi_host.py:
maintainers: amasolov nerzhul maintainers: amasolov
$modules/net_tools/pritunl/:
maintainers: Lowess
$modules/net_tools/nios/: $modules/net_tools/nios/:
maintainers: $team_networking maintainers: $team_networking
labels: infoblox networking labels: infoblox networking
$modules/net_tools/nios/nios_a_record.py:
maintainers: brampling
$modules/net_tools/nios/nios_aaaa_record.py:
maintainers: brampling
$modules/net_tools/nios/nios_cname_record.py:
maintainers: brampling
$modules/net_tools/nios/nios_fixed_address.py: $modules/net_tools/nios/nios_fixed_address.py:
maintainers: sjaiswal maintainers: sjaiswal
$modules/net_tools/nios/nios_member.py:
maintainers: krisvasudevan
$modules/net_tools/nios/nios_mx_record.py:
maintainers: brampling
$modules/net_tools/nios/nios_naptr_record.py:
maintainers: brampling
$modules/net_tools/nios/nios_nsgroup.py: $modules/net_tools/nios/nios_nsgroup.py:
maintainers: ebirn sjaiswal maintainers: ebirn sjaiswal
$modules/net_tools/nios/nios_ptr_record.py: $modules/net_tools/nios/nios_ptr_record.py:
@@ -668,16 +476,18 @@ files:
maintainers: alcamie101 maintainers: alcamie101
$modules/net_tools/snmp_facts.py: $modules/net_tools/snmp_facts.py:
maintainers: ogenstad ujwalkomarla maintainers: ogenstad ujwalkomarla
$modules/notification/osx_say.py:
maintainers: ansible mpdehaan
labels: _osx_say
deprecated: true
$modules/notification/bearychat.py: $modules/notification/bearychat.py:
maintainers: tonyseek maintainers: tonyseek
$modules/notification/campfire.py: $modules/notification/campfire.py:
maintainers: fabulops maintainers: fabulops
$modules/notification/catapult.py: $modules/notification/catapult.py:
maintainers: Jmainguy maintainers: Jmainguy
$modules/notification/cisco_webex.py: $modules/notification/cisco_spark.py:
maintainers: drew-russell maintainers: drew-russell
$modules/notification/discord.py:
maintainers: cwollinger
$modules/notification/flowdock.py: $modules/notification/flowdock.py:
maintainers: mcodd maintainers: mcodd
$modules/notification/grove.py: $modules/notification/grove.py:
@@ -705,13 +515,13 @@ files:
$modules/notification/pushbullet.py: $modules/notification/pushbullet.py:
maintainers: willybarro maintainers: willybarro
$modules/notification/pushover.py: $modules/notification/pushover.py:
maintainers: weaselkeeper wopfel maintainers: weaselkeeper
$modules/notification/rocketchat.py: $modules/notification/rocketchat.py:
maintainers: Deepakkothandan maintainers: Deepakkothandan
labels: rocketchat labels: rocketchat
ignore: ramondelafuente ignore: ramondelafuente
$modules/notification/say.py: $modules/notification/say.py:
maintainers: $team_ansible_core mpdehaan maintainers: ansible mpdehaan
$modules/notification/sendgrid.py: $modules/notification/sendgrid.py:
maintainers: makaimc maintainers: makaimc
$modules/notification/slack.py: $modules/notification/slack.py:
@@ -719,26 +529,23 @@ files:
$modules/notification/syslogger.py: $modules/notification/syslogger.py:
maintainers: garbled1 maintainers: garbled1
$modules/notification/telegram.py: $modules/notification/telegram.py:
maintainers: tyouxa loms lomserman maintainers: tyouxa loms
$modules/notification/twilio.py: $modules/notification/twilio.py:
maintainers: makaimc maintainers: makaimc
$modules/notification/typetalk.py: $modules/notification/typetalk.py:
maintainers: tksmd maintainers: tksmd
$modules/packaging/language/ansible_galaxy_install.py:
maintainers: russoz
$modules/packaging/language/bower.py: $modules/packaging/language/bower.py:
maintainers: mwarkentin maintainers: mwarkentin
$modules/packaging/language/bundler.py: $modules/packaging/language/bundler.py:
maintainers: thoiberg maintainers: thoiberg
$modules/packaging/language/composer.py: $modules/packaging/language/composer.py:
maintainers: dmtrs maintainers: dmtrs resmo
ignore: resmo
$modules/packaging/language/cpanm.py: $modules/packaging/language/cpanm.py:
maintainers: fcuny russoz maintainers: fcuny
$modules/packaging/language/easy_install.py: $modules/packaging/language/easy_install.py:
maintainers: mattupstate maintainers: mattupstate
$modules/packaging/language/gem.py: $modules/packaging/language/gem.py:
maintainers: $team_ansible_core johanwiren maintainers: ansible johanwiren
labels: gem labels: gem
$modules/packaging/language/maven_artifact.py: $modules/packaging/language/maven_artifact.py:
maintainers: tumbl3w33d turb maintainers: tumbl3w33d turb
@@ -759,8 +566,6 @@ files:
maintainers: tdtrask maintainers: tdtrask
labels: apk labels: apk
ignore: kbrebanov ignore: kbrebanov
$modules/packaging/os/apt_repo.py:
maintainers: obirvalger
$modules/packaging/os/apt_rpm.py: $modules/packaging/os/apt_rpm.py:
maintainers: evgkrsk maintainers: evgkrsk
$modules/packaging/os/copr.py: $modules/packaging/os/copr.py:
@@ -816,9 +621,6 @@ files:
maintainers: elasticdog indrajitr tchernomax maintainers: elasticdog indrajitr tchernomax
labels: pacman labels: pacman
ignore: elasticdog ignore: elasticdog
$modules/packaging/os/pacman_key.py:
maintainers: grawlinson
labels: pacman
$modules/packaging/os/pkgin.py: $modules/packaging/os/pkgin.py:
maintainers: $team_solaris L2G jasperla szinck martinm82 maintainers: $team_solaris L2G jasperla szinck martinm82
labels: pkgin solaris labels: pkgin solaris
@@ -884,11 +686,15 @@ files:
labels: zypper labels: zypper
ignore: dirtyharrycallahan robinro ignore: dirtyharrycallahan robinro
$modules/packaging/os/zypper_repository.py: $modules/packaging/os/zypper_repository.py:
maintainers: $team_suse maintainers: matze
labels: zypper
ignore: matze
$modules/remote_management/cobbler/: $modules/remote_management/cobbler/:
maintainers: dagwieers maintainers: dagwieers
$modules/remote_management/dellemc/:
maintainers: rajeevarakkal
$modules/remote_management/dellemc/idrac_server_config_profile.py:
maintainers: jagadeeshnv
$modules/remote_management/dellemc/ome_device_info.py:
maintainers: Sajna-Shetty
$modules/remote_management/hpilo/: $modules/remote_management/hpilo/:
maintainers: haad maintainers: haad
ignore: dagwieers ignore: dagwieers
@@ -897,8 +703,6 @@ files:
labels: cisco labels: cisco
$modules/remote_management/ipmi/: $modules/remote_management/ipmi/:
maintainers: bgaifullin cloudnull maintainers: bgaifullin cloudnull
$modules/remote_management/lenovoxcc/:
maintainers: panyy3 renxulei
$modules/remote_management/lxca/: $modules/remote_management/lxca/:
maintainers: navalkp prabhosa maintainers: navalkp prabhosa
$modules/remote_management/manageiq/: $modules/remote_management/manageiq/:
@@ -908,6 +712,8 @@ files:
maintainers: evertmulder maintainers: evertmulder
$modules/remote_management/manageiq/manageiq_tenant.py: $modules/remote_management/manageiq/manageiq_tenant.py:
maintainers: evertmulder maintainers: evertmulder
$modules/remote_management/oneview/oneview_datacenter_facts.py:
maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
$modules/remote_management/oneview/: $modules/remote_management/oneview/:
maintainers: adriane-cardozo fgbulsoni tmiotto maintainers: adriane-cardozo fgbulsoni tmiotto
$modules/remote_management/oneview/oneview_datacenter_info.py: $modules/remote_management/oneview/oneview_datacenter_info.py:
@@ -917,7 +723,7 @@ files:
$modules/remote_management/oneview/oneview_fcoe_network.py: $modules/remote_management/oneview/oneview_fcoe_network.py:
maintainers: fgbulsoni maintainers: fgbulsoni
$modules/remote_management/redfish/: $modules/remote_management/redfish/:
maintainers: $team_redfish maintainers: $team_redfish billdodd
ignore: jose-delarosa ignore: jose-delarosa
$modules/remote_management/stacki/stacki_host.py: $modules/remote_management/stacki/stacki_host.py:
maintainers: bsanders bbyhuy maintainers: bsanders bbyhuy
@@ -940,8 +746,6 @@ files:
ignore: erydo ignore: erydo
$modules/source_control/github/github_release.py: $modules/source_control/github/github_release.py:
maintainers: adrianmoisey maintainers: adrianmoisey
$modules/source_control/github/github_repo.py:
maintainers: atorrescogollo
$modules/source_control/github/: $modules/source_control/github/:
maintainers: stpierre maintainers: stpierre
$modules/source_control/gitlab/: $modules/source_control/gitlab/:
@@ -952,16 +756,27 @@ files:
maintainers: markuman maintainers: markuman
$modules/source_control/gitlab/gitlab_runner.py: $modules/source_control/gitlab/gitlab_runner.py:
maintainers: SamyCoenen maintainers: SamyCoenen
$modules/source_control/gitlab/gitlab_user.py:
maintainers: LennertMertens stgrace
$modules/source_control/hg.py: $modules/source_control/hg.py:
maintainers: yeukhon maintainers: yeukhon
$modules/storage/emc/emc_vnx_sg_member.py: $modules/storage/emc/emc_vnx_sg_member.py:
maintainers: remixtj maintainers: remixtj
$modules/storage/glusterfs/:
maintainers: devyanikota
$modules/storage/glusterfs/gluster_peer.py:
maintainers: sac
$modules/storage/glusterfs/gluster_volume.py:
maintainers: rosmo
$modules/storage/hpe3par/ss_3par_cpg.py: $modules/storage/hpe3par/ss_3par_cpg.py:
maintainers: farhan7500 gautamphegde maintainers: farhan7500 gautamphegde
$modules/storage/ibm/: $modules/storage/ibm/:
maintainers: tzure maintainers: tzure
$modules/storage/infinidat/:
maintainers: vmalloc GR360RY
$modules/storage/netapp/:
maintainers: $team_netapp
$modules/storage/purestorage/:
maintainers: $team_purestorage
labels: pure_storage
$modules/storage/vexata/: $modules/storage/vexata/:
maintainers: vexata maintainers: vexata
$modules/storage/zfs/: $modules/storage/zfs/:
@@ -972,6 +787,9 @@ files:
maintainers: johanwiren maintainers: johanwiren
$modules/storage/zfs/zfs_delegate_admin.py: $modules/storage/zfs/zfs_delegate_admin.py:
maintainers: natefoo maintainers: natefoo
$modules/system/python_requirements_facts.py:
maintainers: willthames
ignore: ryansb
$modules/system/aix: $modules/system/aix:
maintainers: $team_aix maintainers: $team_aix
labels: aix labels: aix
@@ -980,8 +798,6 @@ files:
maintainers: mulby maintainers: mulby
labels: alternatives labels: alternatives
ignore: DavidWittman ignore: DavidWittman
$modules/system/aix_lvol.py:
maintainers: adejoux
$modules/system/awall.py: $modules/system/awall.py:
maintainers: tdtrask maintainers: tdtrask
$modules/system/beadm.py: $modules/system/beadm.py:
@@ -999,7 +815,7 @@ files:
$modules/system/dpkg_divert.py: $modules/system/dpkg_divert.py:
maintainers: quidame maintainers: quidame
$modules/system/facter.py: $modules/system/facter.py:
maintainers: $team_ansible_core gamethis maintainers: ansible gamethis
labels: facter labels: facter
$modules/system/filesystem.py: $modules/system/filesystem.py:
maintainers: pilou- abulimov quidame maintainers: pilou- abulimov quidame
@@ -1012,12 +828,10 @@ files:
labels: interfaces_file labels: interfaces_file
$modules/system/iptables_state.py: $modules/system/iptables_state.py:
maintainers: quidame maintainers: quidame
$modules/system/shutdown.py:
maintainers: nitzmahone samdoran aminvakil
$modules/system/java_cert.py: $modules/system/java_cert.py:
maintainers: haad absynth76 maintainers: haad
$modules/system/java_keystore.py: $modules/system/java_keystore.py:
maintainers: Mogztter quidame maintainers: Mogztter
$modules/system/kernel_blacklist.py: $modules/system/kernel_blacklist.py:
maintainers: matze maintainers: matze
$modules/system/launchd.py: $modules/system/launchd.py:
@@ -1031,7 +845,7 @@ files:
$modules/system/lvg.py: $modules/system/lvg.py:
maintainers: abulimov maintainers: abulimov
$modules/system/lvol.py: $modules/system/lvol.py:
maintainers: abulimov jhoekx zigaSRC unkaputtbar112 maintainers: abulimov jhoekx
$modules/system/make.py: $modules/system/make.py:
maintainers: LinusU maintainers: LinusU
$modules/system/mksysb.py: $modules/system/mksysb.py:
@@ -1044,7 +858,7 @@ files:
$modules/system/nosh.py: $modules/system/nosh.py:
maintainers: tacatac maintainers: tacatac
$modules/system/ohai.py: $modules/system/ohai.py:
maintainers: $team_ansible_core mpdehaan maintainers: ansible mpdehaan
labels: ohai labels: ohai
$modules/system/open_iscsi.py: $modules/system/open_iscsi.py:
maintainers: srvg maintainers: srvg
@@ -1073,8 +887,6 @@ files:
ignore: ryansb ignore: ryansb
$modules/system/runit.py: $modules/system/runit.py:
maintainers: jsumners maintainers: jsumners
$modules/system/sap_task_list_execute:
maintainers: rainerleber
$modules/system/sefcontext.py: $modules/system/sefcontext.py:
maintainers: dagwieers maintainers: dagwieers
$modules/system/selinux_permissive.py: $modules/system/selinux_permissive.py:
@@ -1087,8 +899,6 @@ files:
maintainers: $team_solaris pmarkham maintainers: $team_solaris pmarkham
labels: solaris labels: solaris
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
$modules/system/ssh_config.py:
maintainers: gaqzi Akasurde
$modules/system/svc.py: $modules/system/svc.py:
maintainers: bcoca maintainers: bcoca
$modules/system/syspatch.py: $modules/system/syspatch.py:
@@ -1104,38 +914,35 @@ files:
maintainers: ahtik ovcharenko pyykkis maintainers: ahtik ovcharenko pyykkis
labels: ufw labels: ufw
$modules/system/vdo.py: $modules/system/vdo.py:
maintainers: rhawalsh bgurney-rh maintainers: bgurney-rh
$modules/system/xfconf.py: $modules/system/xfconf.py:
maintainers: russoz jbenden maintainers: russoz jbenden
labels: xfconf labels: xfconf
$modules/system/xfconf_info.py:
maintainers: russoz
labels: xfconf
$modules/system/xfs_quota.py: $modules/system/xfs_quota.py:
maintainers: bushvin maintainers: bushvin
$modules/web_infrastructure/jenkins_job_facts.py:
maintainers: stpierre
$modules/web_infrastructure/nginx_status_facts.py:
maintainers: resmo
$modules/web_infrastructure/apache2_mod_proxy.py: $modules/web_infrastructure/apache2_mod_proxy.py:
maintainers: oboukili maintainers: oboukili
$modules/web_infrastructure/apache2_module.py: $modules/web_infrastructure/apache2_module.py:
maintainers: berendt n0trax maintainers: berendt n0trax robinro
ignore: robinro
$modules/web_infrastructure/deploy_helper.py: $modules/web_infrastructure/deploy_helper.py:
maintainers: ramondelafuente maintainers: ramondelafuente
$modules/web_infrastructure/django_manage.py: $modules/web_infrastructure/django_manage.py:
maintainers: russoz maintainers: scottanderson42 russoz tastychutney
ignore: scottanderson42 tastychutney
labels: django_manage labels: django_manage
$modules/web_infrastructure/ejabberd_user.py: $modules/web_infrastructure/ejabberd_user.py:
maintainers: privateip maintainers: privateip
$modules/web_infrastructure/gunicorn.py: $modules/web_infrastructure/gunicorn.py:
maintainers: agmezr maintainers: agmezr
$modules/web_infrastructure/htpasswd.py: $modules/web_infrastructure/htpasswd.py:
maintainers: $team_ansible_core maintainers: ansible
labels: htpasswd labels: htpasswd
$modules/web_infrastructure/jboss.py: $modules/web_infrastructure/jboss.py:
maintainers: $team_jboss jhoekx maintainers: $team_jboss jhoekx
labels: jboss labels: jboss
$modules/web_infrastructure/jenkins_build.py:
maintainers: brettmilford unnecessary-username
$modules/web_infrastructure/jenkins_job.py: $modules/web_infrastructure/jenkins_job.py:
maintainers: sermilrod maintainers: sermilrod
$modules/web_infrastructure/jenkins_job_info.py: $modules/web_infrastructure/jenkins_job_info.py:
@@ -1145,13 +952,11 @@ files:
$modules/web_infrastructure/jenkins_script.py: $modules/web_infrastructure/jenkins_script.py:
maintainers: hogarthj maintainers: hogarthj
$modules/web_infrastructure/jira.py: $modules/web_infrastructure/jira.py:
maintainers: Slezhuk tarka pertoft DWSR maintainers: Slezhuk tarka
labels: jira labels: jira
$modules/web_infrastructure/nginx_status_info.py: $modules/web_infrastructure/nginx_status_info.py:
maintainers: resmo maintainers: resmo
$modules/web_infrastructure/rundeck_acl_policy.py: $modules/web_infrastructure/:
maintainers: nerzhul
$modules/web_infrastructure/rundeck_project.py:
maintainers: nerzhul maintainers: nerzhul
$modules/web_infrastructure/sophos_utm/: $modules/web_infrastructure/sophos_utm/:
maintainers: $team_e_spirit maintainers: $team_e_spirit
@@ -1162,14 +967,6 @@ files:
$modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
maintainers: $team_e_spirit RickS-C137 maintainers: $team_e_spirit RickS-C137
keywords: sophos utm keywords: sophos utm
$modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py:
maintainers: stearz
$modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py:
maintainers: stearz
$modules/web_infrastructure/sophos_utm/utm_network_interface_address.py:
maintainers: steamx
$modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py:
maintainers: steamx
$modules/web_infrastructure/supervisorctl.py: $modules/web_infrastructure/supervisorctl.py:
maintainers: inetfuture mattupstate maintainers: inetfuture mattupstate
$modules/web_infrastructure/taiga_issue.py: $modules/web_infrastructure/taiga_issue.py:
@@ -1189,40 +986,40 @@ files:
macros: macros:
actions: plugins/action actions: plugins/action
becomes: plugins/become becomes: plugins/become
caches: plugins/cache
callbacks: plugins/callback callbacks: plugins/callback
cliconfs: plugins/cliconf cliconfs: plugins/cliconf
connections: plugins/connection connections: plugins/connection
doc_fragments: plugins/doc_fragments doc_fragments: plugins/doc_fragments
filters: plugins/filter filters: plugins/filter
httpapis: plugins/httpapi
inventories: plugins/inventory inventories: plugins/inventory
lookups: plugins/lookup lookups: plugins/lookup
module_utils: plugins/module_utils module_utils: plugins/module_utils
modules: plugins/modules modules: plugins/modules
terminals: plugins/terminal terminals: plugins/terminal
team_ansible_core:
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
team_consul: sgargan team_consul: colin-nolan sgargan
team_cyberark_conjur: jvanderhoof ryanprior team_cyberark_conjur: jvanderhoof ryanprior
team_e_spirit: MatrixCrawler getjack team_e_spirit: MatrixCrawler getjack
team_flatpak: JayKayy oolongbrothers team_flatpak: JayKayy oolongbrothers
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
team_hpux: bcoca davx8342 team_hpux: bcoca davx8342
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
team_ipa: Akasurde Nosmoht fxfitz justchris1 team_ipa: Akasurde Nosmoht fxfitz
team_jboss: Wolfant jairojunior wbrefvem team_jboss: Wolfant jairojunior wbrefvem
team_keycloak: eikef ndclt team_keycloak: eikef ndclt
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_linode: InTheCloudDan decentral1se displague rmcintosh
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_opennebula: ilicmilan meerkampdvv rsmontero xorel
team_oracle: manojmeda mross22 nalsaber team_oracle: manojmeda mross22 nalsaber
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
team_redfish: mraineri tomasg2012 xmadsen renxulei team_redfish: billdodd mraineri tomasg2012
team_rhn: FlossWare alikins barnabycourt vritant team_rhn: FlossWare alikins barnabycourt vritant
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso team_virt: joshainglis karmab Aversiste Thulium-Drake

View File

@@ -1,135 +0,0 @@
---
name: Bug report
description: Create a report to help us improve
body:
- type: markdown
attributes:
value: |
Verify first that your issue is not [already reported on GitHub][issue search].
Also test if the latest release and devel branch are affected too.
*Complete **all** sections as described, this form is processed automatically.*
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
- type: textarea
attributes:
label: Summary
description: Explain the problem briefly below.
placeholder: >-
When I try to do X with the collection from the main branch on GitHub, Y
breaks in a way Z under the env E. Here are all the details I know
about this problem...
validations:
required: true
- type: dropdown
attributes:
label: Issue Type
# FIXME: Once GitHub allows defining the default choice, update this
options:
- Bug Report
validations:
required: true
- type: textarea
attributes:
# For smaller collections we could use a multi-select and hardcode the list
# May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
# Select from list, filter as you type (`mysql` would only show the 3 mysql components)
# OR freeform - doesn't seem to be supported in adaptivecards
label: Component Name
description: >-
Write the short name of the module, plugin, task or feature below,
*use your best guess if unsure*.
placeholder: dnf, apt, yum, pip, user etc.
validations:
required: true
- type: textarea
attributes:
label: Ansible Version
description: >-
Paste verbatim output from `ansible --version` between
tripple backticks.
value: |
```console (paste below)
$ ansible --version
```
validations:
required: true
- type: textarea
attributes:
label: Configuration
description: >-
If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
Paste verbatim output from `ansible-config dump --only-changed` between quotes
value: |
```console (paste below)
$ ansible-config dump --only-changed
```
- type: textarea
attributes:
label: OS / Environment
description: >-
Provide all relevant information below, e.g. target OS versions,
network device firmware, etc.
placeholder: RHEL 8, CentOS Stream etc.
validations:
required: false
- type: textarea
attributes:
label: Steps to Reproduce
description: |
Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
**HINT:** You can paste https://gist.github.com links for larger files.
value: |
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
validations:
required: true
- type: textarea
attributes:
label: Expected Results
description: >-
Describe what you expected to happen when running the steps above.
placeholder: >-
I expected X to happen because I assumed Y.
that it did not.
validations:
required: true
- type: textarea
attributes:
label: Actual Results
description: |
Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
Paste verbatim command output between quotes.
value: |
```console (paste below)
```
- type: checkboxes
attributes:
label: Code of Conduct
description: |
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
...

View File

@@ -1,27 +0,0 @@
---
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
blank_issues_enabled: false # default: true
contact_links:
- name: Security bug report
url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
about: |
Please learn how to report security vulnerabilities here.
For all security related bugs, email security@ansible.com
instead of using this issue tracker and you will receive
a prompt response.
For more information, see
https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
- name: Ansible Code of Conduct
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
about: Be nice to other members of the community.
- name: Talks to the community
url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
about: Please ask and answer usage questions here
- name: Working groups
url: https://github.com/ansible/community/wiki
about: Interested in improving a specific area? Become a part of a working group!
- name: For Enterprise
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
about: Red Hat offers support for the Ansible Automation Platform

View File

@@ -1,111 +0,0 @@
---
name: Documentation Report
description: Ask us about docs
# NOTE: issue body is enabled to allow screenshots
body:
- type: markdown
attributes:
value: |
Verify first that your issue is not [already reported on GitHub][issue search].
Also test if the latest release and devel branch are affected too.
*Complete **all** sections as described, this form is processed automatically.*
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
- type: textarea
attributes:
label: Summary
description: |
Explain the problem briefly below, add suggestions to wording or structure.
**HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
placeholder: >-
I was reading the Collection documentation of version X and I'm having
problems understanding Y. It would be very helpful if that got
rephrased as Z.
validations:
required: true
- type: dropdown
attributes:
label: Issue Type
# FIXME: Once GitHub allows defining the default choice, update this
options:
- Documentation Report
validations:
required: true
- type: input
attributes:
label: Component Name
description: >-
Write the short name of the rst file, module, plugin, task or
feature below, *use your best guess if unsure*.
placeholder: mysql_user
validations:
required: true
- type: textarea
attributes:
label: Ansible Version
description: >-
Paste verbatim output from `ansible --version` between
tripple backticks.
value: |
```console (paste below)
$ ansible --version
```
validations:
required: false
- type: textarea
attributes:
label: Configuration
description: >-
Paste verbatim output from `ansible-config dump --only-changed` between quotes.
value: |
```console (paste below)
$ ansible-config dump --only-changed
```
validations:
required: false
- type: textarea
attributes:
label: OS / Environment
description: >-
Provide all relevant information below, e.g. OS version,
browser, etc.
placeholder: Fedora 33, Firefox etc.
validations:
required: false
- type: textarea
attributes:
label: Additional Information
description: |
Describe how this improves the documentation, e.g. before/after situation or screenshots.
**Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
**HINT:** You can paste https://gist.github.com links for larger files.
placeholder: >-
When the improvement is applied, it makes it more straightforward
to understand X.
validations:
required: false
- type: checkboxes
attributes:
label: Code of Conduct
description: |
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
...

View File

@@ -1,69 +0,0 @@
---
name: Feature request
description: Suggest an idea for this project
body:
- type: markdown
attributes:
value: |
Verify first that your issue is not [already reported on GitHub][issue search].
Also test if the latest release and devel branch are affected too.
*Complete **all** sections as described, this form is processed automatically.*
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
- type: textarea
attributes:
label: Summary
description: Describe the new feature/improvement briefly below.
placeholder: >-
I am trying to do X with the collection from the main branch on GitHub and
I think that implementing a feature Y would be very helpful for me and
every other user of ansible-core because of Z.
validations:
required: true
- type: dropdown
attributes:
label: Issue Type
# FIXME: Once GitHub allows defining the default choice, update this
options:
- Feature Idea
validations:
required: true
- type: input
attributes:
label: Component Name
description: >-
Write the short name of the module, plugin, task or feature below,
*use your best guess if unsure*.
placeholder: dnf, apt, yum, pip, user etc.
validations:
required: true
- type: textarea
attributes:
label: Additional Information
description: |
Describe how the feature would be used, why it is needed and what it would solve.
**HINT:** You can paste https://gist.github.com links for larger files.
value: |
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
validations:
required: false
- type: checkboxes
attributes:
label: Code of Conduct
description: |
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
...

81
.gitignore vendored
View File

@@ -1,6 +1,6 @@
# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv # Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv # Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
### dotenv ### ### dotenv ###
.env .env
@@ -88,7 +88,7 @@ flycheck_*.el
.nfs* .nfs*
### PyCharm+all ### ### PyCharm+all ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff # User-specific stuff
@@ -98,9 +98,6 @@ flycheck_*.el
.idea/**/dictionaries .idea/**/dictionaries
.idea/**/shelf .idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files # Generated files
.idea/**/contentModel.xml .idea/**/contentModel.xml
@@ -121,9 +118,6 @@ flycheck_*.el
# When using Gradle or Maven with auto-import, you should exclude module files, # When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using # since they will be recreated, and may cause churn. Uncomment if using
# auto-import. # auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml # .idea/modules.xml
# .idea/*.iml # .idea/*.iml
# .idea/modules # .idea/modules
@@ -204,6 +198,7 @@ parts/
sdist/ sdist/
var/ var/
wheels/ wheels/
pip-wheel-metadata/
share/python-wheels/ share/python-wheels/
*.egg-info/ *.egg-info/
.installed.cfg .installed.cfg
@@ -230,25 +225,13 @@ htmlcov/
nosetests.xml nosetests.xml
coverage.xml coverage.xml
*.cover *.cover
*.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
cover/
# Translations # Translations
*.mo *.mo
*.pot *.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff: # Scrapy stuff:
.scrapy .scrapy
@@ -256,19 +239,9 @@ instance/
docs/_build/ docs/_build/
# PyBuilder # PyBuilder
.pybuilder/
target/ target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv # pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version .python-version
# pipenv # pipenv
@@ -278,24 +251,12 @@ ipython_config.py
# install all needed dependencies. # install all needed dependencies.
#Pipfile.lock #Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow # celery beat schedule file
__pypackages__/
# Celery stuff
celerybeat-schedule celerybeat-schedule
celerybeat.pid
# SageMath parsed files # SageMath parsed files
*.sage.py *.sage.py
# Environments
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings # Spyder project settings
.spyderproject .spyderproject
.spyproject .spyproject
@@ -303,6 +264,10 @@ venv.bak/
# Rope project settings # Rope project settings
.ropeproject .ropeproject
# Mr Developer
.mr.developer.cfg
.project
# mkdocs documentation # mkdocs documentation
/site /site
@@ -314,16 +279,9 @@ dmypy.json
# Pyre type checker # Pyre type checker
.pyre/ .pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
### Vim ### ### Vim ###
# Swap # Swap
[._]*.s[a-v][a-z] [._]*.s[a-v][a-z]
!*.svg # comment out if you don't need vector files
[._]*.sw[a-p] [._]*.sw[a-p]
[._]s[a-rt-v][a-z] [._]s[a-rt-v][a-z]
[._]ss[a-gi-z] [._]ss[a-gi-z]
@@ -341,13 +299,11 @@ tags
[._]*.un~ [._]*.un~
### WebStorm ### ### WebStorm ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff # User-specific stuff
# AWS User-specific
# Generated files # Generated files
# Sensitive or high-churn files # Sensitive or high-churn files
@@ -358,9 +314,6 @@ tags
# When using Gradle or Maven with auto-import, you should exclude module files, # When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using # since they will be recreated, and may cause churn. Uncomment if using
# auto-import. # auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml # .idea/modules.xml
# .idea/*.iml # .idea/*.iml
# .idea/modules # .idea/modules
@@ -396,27 +349,15 @@ tags
# *.ipr # *.ipr
# Sonarlint plugin # Sonarlint plugin
# https://plugins.jetbrains.com/plugin/7973-sonarlint
.idea/**/sonarlint/ .idea/**/sonarlint/
# SonarQube Plugin # SonarQube Plugin
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
.idea/**/sonarIssues.xml .idea/**/sonarIssues.xml
# Markdown Navigator plugin # Markdown Navigator plugin
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
.idea/**/markdown-navigator.xml .idea/**/markdown-navigator.xml
.idea/**/markdown-navigator-enh.xml
.idea/**/markdown-navigator/ .idea/**/markdown-navigator/
# Cache file creation bug
# See https://youtrack.jetbrains.com/issue/JBR-2257
.idea/$CACHE_FILE$
# CodeStream plugin
# https://plugins.jetbrains.com/plugin/12206-codestream
.idea/codestream.xml
### Windows ### ### Windows ###
# Windows thumbnail cache files # Windows thumbnail cache files
Thumbs.db Thumbs.db
@@ -443,4 +384,4 @@ $RECYCLE.BIN/
# Windows shortcuts # Windows shortcuts
*.lnk *.lnk
# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv # End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv

File diff suppressed because it is too large Load Diff

View File

@@ -1,67 +0,0 @@
# Contributing
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
## Issue tracker
Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
Also somebody may already have started discussing or working on implementing the same or a similar idea,
so you can cooperate to create a better solution together.
* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
## Open pull requests
Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
* Try committing your changes with an informative but short commit message.
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
## Test pull requests
If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly.
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
## Creating new modules or plugins
Creating new modules and plugins requires a bit more work than other Pull Requests.
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
can only be used by very few people should better be added to more specialized collections.
2. When creating a new module or plugin, please make sure that you follow various guidelines:
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide);
- Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed
(new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed);
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
which run in CI.
3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
subdirectories.)
- Action plugins need to be accompanied by a module, even if the module file only contains documentation
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
than the action plugin has in `plugins/action/`.
4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.

View File

@@ -1,23 +1,15 @@
# Community General Collection # Community General Collection
[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-3)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) [![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-2)](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
## Code of Conduct
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
## Tested with Ansible ## Tested with Ansible
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
## External requirements ## External requirements
@@ -29,9 +21,7 @@ Please check the included content on the [Ansible Galaxy page for this collectio
## Using this collection ## Using this collection
This collection is shipped with the Ansible package. So if you have it installed, no more action is required. Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
ansible-galaxy collection install community.general ansible-galaxy collection install community.general
@@ -42,79 +32,57 @@ collections:
- name: community.general - name: community.general
``` ```
Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
```bash
ansible-galaxy collection install community.general --upgrade
```
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
```bash
ansible-galaxy collection install community.general:==X.Y.Z
```
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
## Contributing to this collection ## Contributing to this collection
The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software. If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
We are actively accepting new contributors. For example, if you are working in the `~/dev` directory:
All types of contributions are very welcome. ```
cd ~/dev
You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)! git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. ```
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
### Running tests ### Running tests
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections). See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
## Collection maintenance ### Communication
To learn how to maintain / become a maintainer of this collection, refer to: We have a dedicated Working Group for Ansible development.
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md). You can find other people interested on the following Freenode IRC channels -
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst). - `#ansible` - For general use questions and support.
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
It is necessary for maintainers of this collection to be subscribed to: - `#ansible-community` - For discussions on community topics and community meetings.
* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
## Communication
We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html)
## Publishing New Version ### Publishing New Version
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. Basic instructions without release branches:
1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
2. Run `antsibull-changelog release --collection-flatmap yes`
3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
4. Tag the commit with `<version>`. Push changes and tag to the main repository.
## Release notes ## Release notes
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst). See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
## Roadmap ## Roadmap
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes. See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation. In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
## More information ## More information

File diff suppressed because it is too large Load Diff

View File

@@ -1,74 +0,0 @@
Committers Guidelines for community.general
===========================================
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
That said, use the trust wisely.
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
Our workflow on GitHub
----------------------
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
* Fork the repository upon which you want to do some work to your own personal repository
* Work on the specific branch upon which you need to commit
* Create a Pull Request back to the collection repository and await reviews
* Adjust code as necessary based on the Comments provided
* Ask someone from the other committers to do a final review and merge
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
Roles
-----
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
General rules
-------------
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
* Do NOTs:
- Do not commit directly.
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
- Do not break playbooks. Always keep backwards compatibility in mind.
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
* Do:
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
- Consider backwards compatibility (goes back to "do not break existing playbooks").
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
- Discuss with other committers, specially when you are unsure of something.
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
- Consider scope, sometimes a fix can be generalized.
- Keep it simple, then things are maintainable, debuggable and intelligible.
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
People
------
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
| Name | GitHub ID | IRC Nick | Other |
| ------------------- | -------------------- | ------------------ | -------------------- |
| Alexei Znamensky | russoz | russoz | |
| Andrew Klychkov | andersson007 | andersson007_ | |
| Andrew Pantuso | Ajpantuso | ajpantuso | |
| Felix Fontein | felixfontein | felixfontein | |
| John R Barker | gundalow | gundalow | |

View File

@@ -1,5 +0,0 @@
---
sections:
- title: Guides
toctree:
- filter_guide

View File

@@ -1,784 +0,0 @@
.. _ansible_collections.community.general.docsite.filter_guide:
community.general Filter Guide
==============================
The :ref:`community.general collection <plugins_in_community.general>` offers several useful filter plugins.
.. contents:: Topics
Paths
-----
The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
.. code-block:: yaml+jinja
# ansible-base 2.10 or newer:
path: {{ ('/etc', path, 'subdir', file) | path_join }}
# Also works with Ansible 2.9:
path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
.. versionadded:: 3.0.0
Abstract transformations
------------------------
Dictionaries
^^^^^^^^^^^^
You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
.. code-block:: yaml+jinja
- name: Create a single-entry dictionary
debug:
msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
vars:
myvar: myvalue
- name: Create a list of dictionaries where the 'server' field is taken from a list
debug:
msg: >-
{{ myservers | map('community.general.dict_kv', 'server')
| map('combine', common_config) }}
vars:
common_config:
type: host
database: all
myservers:
- server1
- server2
This produces:
.. code-block:: ansible-output
TASK [Create a single-entry dictionary] **************************************************
ok: [localhost] => {
"msg": {
"thatsmyvar": "myvalue"
}
}
TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
ok: [localhost] => {
"msg": [
{
"database": "all",
"server": "server1",
"type": "host"
},
{
"database": "all",
"server": "server2",
"type": "host"
}
]
}
.. versionadded:: 2.0.0
If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
.. code-block:: yaml+jinja
- name: Create a dictionary with the dict function
debug:
msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
- name: Create a dictionary with the community.general.dict filter
debug:
msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
- name: Create a list of dictionaries with map and the community.general.dict filter
debug:
msg: >-
{{ values | map('zip', ['k1', 'k2', 'k3'])
| map('map', 'reverse')
| map('community.general.dict') }}
vars:
values:
- - foo
- 23
- a
- - bar
- 42
- b
This produces:
.. code-block:: ansible-output
TASK [Create a dictionary with the dict function] ****************************************
ok: [localhost] => {
"msg": {
"1": 2,
"a": "b"
}
}
TASK [Create a dictionary with the community.general.dict filter] ************************
ok: [localhost] => {
"msg": {
"1": 2,
"a": "b"
}
}
TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
ok: [localhost] => {
"msg": [
{
"k1": "foo",
"k2": 23,
"k3": "a"
},
{
"k1": "bar",
"k2": 42,
"k3": "b"
}
]
}
.. versionadded:: 3.0.0
Grouping
^^^^^^^^
If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
.. code-block:: yaml+jinja
- name: Output mount facts grouped by device name
debug:
var: ansible_facts.mounts | community.general.groupby_as_dict('device')
- name: Output mount facts grouped by mount point
debug:
var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
This produces:
.. code-block:: ansible-output
TASK [Output mount facts grouped by device name] ******************************************
ok: [localhost] => {
"ansible_facts.mounts | community.general.groupby_as_dict('device')": {
"/dev/sda1": {
"block_available": 2000,
"block_size": 4096,
"block_total": 2345,
"block_used": 345,
"device": "/dev/sda1",
"fstype": "ext4",
"inode_available": 500,
"inode_total": 512,
"inode_used": 12,
"mount": "/boot",
"options": "rw,relatime,data=ordered",
"size_available": 56821,
"size_total": 543210,
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
},
"/dev/sda2": {
"block_available": 1234,
"block_size": 4096,
"block_total": 12345,
"block_used": 11111,
"device": "/dev/sda2",
"fstype": "ext4",
"inode_available": 1111,
"inode_total": 1234,
"inode_used": 123,
"mount": "/",
"options": "rw,relatime",
"size_available": 42143,
"size_total": 543210,
"uuid": "abcdef01-2345-6789-0abc-def012345678"
}
}
}
TASK [Output mount facts grouped by mount point] ******************************************
ok: [localhost] => {
"ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
"/": {
"block_available": 1234,
"block_size": 4096,
"block_total": 12345,
"block_used": 11111,
"device": "/dev/sda2",
"fstype": "ext4",
"inode_available": 1111,
"inode_total": 1234,
"inode_used": 123,
"mount": "/",
"options": "rw,relatime",
"size_available": 42143,
"size_total": 543210,
"uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
},
"/boot": {
"block_available": 2000,
"block_size": 4096,
"block_total": 2345,
"block_used": 345,
"device": "/dev/sda1",
"fstype": "ext4",
"inode_available": 500,
"inode_total": 512,
"inode_used": 12,
"mount": "/boot",
"options": "rw,relatime,data=ordered",
"size_available": 56821,
"size_total": 543210,
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
}
}
}
.. versionadded: 3.0.0
Merging lists of dictionaries
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter.
.. code-block:: yaml+jinja
- name: Merge two lists by common attribute 'name'
debug:
var: list1 | community.general.lists_mergeby(list2, 'name')
vars:
list1:
- name: foo
extra: true
- name: bar
extra: false
- name: meh
extra: true
list2:
- name: foo
path: /foo
- name: baz
path: /bazzz
This produces:
.. code-block:: ansible-output
TASK [Merge two lists by common attribute 'name'] ****************************************
ok: [localhost] => {
"list1 | community.general.lists_mergeby(list2, 'name')": [
{
"extra": false,
"name": "bar"
},
{
"name": "baz",
"path": "/bazzz"
},
{
"extra": true,
"name": "foo",
"path": "/foo"
},
{
"extra": true,
"name": "meh"
}
]
}
.. versionadded: 2.0.0
Working with times
------------------
The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
.. list-table:: Units
:widths: 25 25 25 25
:header-rows: 1
* - Unit name
- Unit value in seconds
- Unit strings for filter
- Shorthand filter
* - Millisecond
- 1/1000 second
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
- ``to_milliseconds``
* - Second
- 1 second
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
- ``to_seconds``
* - Minute
- 60 seconds
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
- ``to_minutes``
* - Hour
- 60*60 seconds
- ``h``, ``hour``, ``hours``
- ``to_hours``
* - Day
- 24*60*60 seconds
- ``d``, ``day``, ``days``
- ``to_days``
* - Week
- 7*24*60*60 seconds
- ``w``, ``week``, ``weeks``
- ``to_weeks``
* - Month
- 30*24*60*60 seconds
- ``mo``, ``month``, ``months``
- ``to_months``
* - Year
- 365*24*60*60 seconds
- ``y``, ``year``, ``years``
- ``to_years``
Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
.. code-block:: yaml+jinja
- name: Convert string to seconds
debug:
msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
- name: Convert string to hours
debug:
msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
- name: Convert string to years (using 365.25 days == 1 year)
debug:
msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
This produces:
.. code-block:: ansible-output
TASK [Convert string to seconds] **********************************************************
ok: [localhost] => {
"msg": "109210.123"
}
TASK [Convert string to hours] ************************************************************
ok: [localhost] => {
"msg": "30.336145277778"
}
TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
ok: [localhost] => {
"msg": "1.096851471595"
}
.. versionadded: 0.2.0
Working with versions
---------------------
If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
.. code-block:: yaml+jinja
- name: Sort list by version number
debug:
var: ansible_versions | community.general.version_sort
vars:
ansible_versions:
- '2.8.0'
- '2.11.0'
- '2.7.0'
- '2.10.0'
- '2.9.0'
This produces:
.. code-block:: ansible-output
TASK [Sort list by version number] ********************************************************
ok: [localhost] => {
"ansible_versions | community.general.version_sort": [
"2.7.0",
"2.8.0",
"2.9.0",
"2.10.0",
"2.11.0"
]
}
.. versionadded: 2.2.0
Creating identifiers
--------------------
The following filters allow to create identifiers.
Hashids
^^^^^^^
`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
.. code-block:: yaml+jinja
- name: "Create hashid"
debug:
msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
- name: "Decode hashid"
debug:
msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
This produces:
.. code-block:: ansible-output
TASK [Create hashid] **********************************************************************
ok: [localhost] => {
"msg": "jm2Cytn"
}
TASK [Decode hashid] **********************************************************************
ok: [localhost] => {
"msg": [
1234,
5,
6
]
}
The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
:salt: String to use as salt when hashing.
:alphabet: String of 16 or more unique characters to produce a hash.
:min_length: Minimum length of hash produced.
.. versionadded: 3.0.0
Random MACs
^^^^^^^^^^^
You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
.. code-block:: yaml+jinja
- name: "Create a random MAC starting with ff:"
debug:
msg: "{{ 'FF' | community.general.random_mac }}"
- name: "Create a random MAC starting with 00:11:22:"
debug:
msg: "{{ '00:11:22' | community.general.random_mac }}"
This produces:
.. code-block:: ansible-output
TASK [Create a random MAC starting with ff:] **********************************************
ok: [localhost] => {
"msg": "ff:69:d3:78:7f:b4"
}
TASK [Create a random MAC starting with 00:11:22:] ****************************************
ok: [localhost] => {
"msg": "00:11:22:71:5d:3b"
}
You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
.. code-block:: yaml+jinja
"{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
Conversions
-----------
Parsing CSV files
^^^^^^^^^^^^^^^^^
Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
.. code-block:: yaml+jinja
- name: "Parse CSV from string"
debug:
msg: "{{ csv_string | community.general.from_csv }}"
vars:
csv_string: |
foo,bar,baz
1,2,3
you,this,then
This produces:
.. code-block:: ansible-output
TASK [Parse CSV from string] **************************************************************
ok: [localhost] => {
"msg": [
{
"bar": "2",
"baz": "3",
"foo": "1"
},
{
"bar": "this",
"baz": "then",
"foo": "you"
}
]
}
The ``from_csv`` filter has several keyword arguments to control its behavior:
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
:delimiter: Sets the delimiter to use. Default depends on the dialect used.
:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
:strict: Set to ``true`` to error out on invalid CSV input.
.. versionadded: 3.0.0
Converting to JSON
^^^^^^^^^^^^^^^^^^
`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
.. code-block:: yaml+jinja
- name: Run 'ls' to list files in /
command: ls /
register: result
- name: Parse the ls output
debug:
msg: "{{ result.stdout | community.general.jc('ls') }}"
This produces:
.. code-block:: ansible-output
TASK [Run 'ls' to list files in /] ********************************************************
changed: [localhost]
TASK [Parse the ls output] ****************************************************************
ok: [localhost] => {
"msg": [
{
"filename": "bin"
},
{
"filename": "boot"
},
{
"filename": "dev"
},
{
"filename": "etc"
},
{
"filename": "home"
},
{
"filename": "lib"
},
{
"filename": "proc"
},
{
"filename": "root"
},
{
"filename": "run"
},
{
"filename": "tmp"
}
]
}
.. versionadded: 2.0.0
.. _ansible_collections.community.general.docsite.json_query_filter:
Selecting JSON data: JSON queries
---------------------------------
To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
Consider this data structure:
.. code-block:: yaml+jinja
{
"domain_definition": {
"domain": {
"cluster": [
{
"name": "cluster1"
},
{
"name": "cluster2"
}
],
"server": [
{
"name": "server11",
"cluster": "cluster1",
"port": "8080"
},
{
"name": "server12",
"cluster": "cluster1",
"port": "8090"
},
{
"name": "server21",
"cluster": "cluster2",
"port": "9080"
},
{
"name": "server22",
"cluster": "cluster2",
"port": "9090"
}
],
"library": [
{
"name": "lib1",
"target": "cluster1"
},
{
"name": "lib2",
"target": "cluster2"
}
]
}
}
}
To extract all clusters from this structure, you can use the following query:
.. code-block:: yaml+jinja
- name: Display all cluster names
ansible.builtin.debug:
var: item
loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
To extract all server names:
.. code-block:: yaml+jinja
- name: Display all server names
ansible.builtin.debug:
var: item
loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
To extract ports from cluster1:
.. code-block:: yaml+jinja
- name: Display all ports from cluster1
ansible.builtin.debug:
var: item
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
vars:
server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
.. note:: You can use a variable to make the query more readable.
To print out the ports from cluster1 in a comma separated string:
.. code-block:: yaml+jinja
- name: Display all ports from cluster1 as a string
ansible.builtin.debug:
msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
.. code-block:: yaml+jinja
- name: Display all ports from cluster1
ansible.builtin.debug:
var: item
loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
To get a hash map with all ports and names of a cluster:
.. code-block:: yaml+jinja
- name: Display all server ports and names from cluster1
ansible.builtin.debug:
var: item
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
vars:
server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
To extract ports from all clusters with name starting with 'server1':
.. code-block:: yaml+jinja
- name: Display all ports from cluster1
ansible.builtin.debug:
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
vars:
server_name_query: "domain.server[?starts_with(name,'server1')].port"
To extract ports from all clusters with name containing 'server1':
.. code-block:: yaml+jinja
- name: Display all ports from cluster1
ansible.builtin.debug:
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
vars:
server_name_query: "domain.server[?contains(name,'server1')].port"
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
Working with Unicode
---------------------
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
.. code-block:: yaml+jinja
- name: Compare Unicode representations
debug:
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
vars:
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
without_combining_character: Mayagüez
This produces:
.. code-block:: ansible-output
TASK [Compare Unicode representations] ********************************************************
ok: [localhost] => {
"msg": true
}
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
.. versionadded:: 3.7.0

View File

@@ -1,6 +1,6 @@
namespace: community namespace: community
name: general name: general
version: 3.7.0 version: 2.0.0
readme: README.md readme: README.md
authors: authors:
- Ansible (https://github.com/ansible) - Ansible (https://github.com/ansible)

View File

@@ -1,5 +1,31 @@
--- ---
requires_ansible: '>=2.9.10' requires_ansible: '>=2.9.10'
action_groups:
ovirt:
- ovirt_affinity_label_facts
- ovirt_api_facts
- ovirt_cluster_facts
- ovirt_datacenter_facts
- ovirt_disk_facts
- ovirt_event_facts
- ovirt_external_provider_facts
- ovirt_group_facts
- ovirt_host_facts
- ovirt_host_storage_facts
- ovirt_network_facts
- ovirt_nic_facts
- ovirt_permission_facts
- ovirt_quota_facts
- ovirt_scheduling_policy_facts
- ovirt_snapshot_facts
- ovirt_storage_domain_facts
- ovirt_storage_template_facts
- ovirt_storage_vm_facts
- ovirt_tag_facts
- ovirt_template_facts
- ovirt_user_facts
- ovirt_vm_facts
- ovirt_vmpool_facts
plugin_routing: plugin_routing:
connection: connection:
docker: docker:
@@ -11,26 +37,11 @@ plugin_routing:
redirect: community.google.gcp_storage_file redirect: community.google.gcp_storage_file
hashi_vault: hashi_vault:
redirect: community.hashi_vault.hashi_vault redirect: community.hashi_vault.hashi_vault
nios:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios lookup plugin has been deprecated.
Please use infoblox.nios_modules.nios_lookup instead.
nios_next_ip:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_next_ip lookup plugin has been deprecated.
Please use infoblox.nios_modules.nios_next_ip instead.
nios_next_network:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_next_network lookup plugin has been
deprecated. Please use infoblox.nios_modules.nios_next_network instead.
modules: modules:
ali_instance_facts: ali_instance_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.ali_instance_info instead. warning_text: see plugin documentation for details
docker_compose: docker_compose:
redirect: community.docker.docker_compose redirect: community.docker.docker_compose
docker_config: docker_config:
@@ -130,13 +141,11 @@ plugin_routing:
gcp_forwarding_rule: gcp_forwarding_rule:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead.
instead.
gcp_healthcheck: gcp_healthcheck:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead.
or google.cloud.gcp_compute_https_health_check instead.
gcp_target_proxy: gcp_target_proxy:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
@@ -147,22 +156,37 @@ plugin_routing:
warning_text: Use google.cloud.gcp_compute_url_map instead. warning_text: Use google.cloud.gcp_compute_url_map instead.
gcpubsub: gcpubsub:
redirect: community.google.gcpubsub redirect: community.google.gcpubsub
gcpubsub_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.google.gcpubsub_info instead.
gcpubsub_info: gcpubsub_info:
redirect: community.google.gcpubsub_info redirect: community.google.gcpubsub_info
gcpubsub_facts:
redirect: community.google.gcpubsub_info
deprecation:
removal_version: 3.0.0
warning_text: Use community.google.gcpubsub_info instead.
gcspanner: gcspanner:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead.
instead.
github_hooks: github_hooks:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use community.general.github_webhook and community.general.github_webhook_info warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
instead. gluster_heal_info:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
gluster_peer:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
gluster_volume:
deprecation:
removal_version: 3.0.0
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
helm:
deprecation:
removal_version: 3.0.0
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
hetzner_failover_ip: hetzner_failover_ip:
redirect: community.hrobot.failover_ip redirect: community.hrobot.failover_ip
hetzner_failover_ip_info: hetzner_failover_ip_info:
@@ -172,21 +196,17 @@ plugin_routing:
hetzner_firewall_info: hetzner_firewall_info:
redirect: community.hrobot.firewall_info redirect: community.hrobot.firewall_info
hpilo_facts: hpilo_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.hpilo_info instead. warning_text: see plugin documentation for details
idrac_firmware:
redirect: dellemc.openmanage.idrac_firmware
idrac_redfish_facts: idrac_redfish_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.idrac_redfish_info instead. warning_text: see plugin documentation for details
idrac_server_config_profile:
redirect: dellemc.openmanage.idrac_server_config_profile
jenkins_job_facts: jenkins_job_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.jenkins_job_info instead. warning_text: see plugin documentation for details
katello: katello:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
@@ -204,27 +224,25 @@ plugin_routing:
kubevirt_vm: kubevirt_vm:
redirect: community.kubevirt.kubevirt_vm redirect: community.kubevirt.kubevirt_vm
ldap_attr: ldap_attr:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.ldap_attrs instead. warning_text: see plugin documentation for details
logicmonitor: logicmonitor:
tombstone: tombstone:
removal_version: 1.0.0 removal_version: 1.0.0
warning_text: The logicmonitor_facts module is no longer maintained and the warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
API used has been disabled in 2017.
logicmonitor_facts: logicmonitor_facts:
tombstone: tombstone:
removal_version: 1.0.0 removal_version: 1.0.0
warning_text: The logicmonitor_facts module is no longer maintained and the warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
API used has been disabled in 2017.
memset_memstore_facts: memset_memstore_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.memset_memstore_info instead. warning_text: see plugin documentation for details
memset_server_facts: memset_server_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.memset_server_info instead. warning_text: see plugin documentation for details
na_cdot_aggregate: na_cdot_aggregate:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
@@ -258,244 +276,161 @@ plugin_routing:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use netapp.ontap.na_ontap_volume instead. warning_text: Use netapp.ontap.na_ontap_volume instead.
na_ontap_gather_facts: na_ontap_gather_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use netapp.ontap.na_ontap_info instead. warning_text: see plugin documentation for details
nginx_status_facts: nginx_status_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.nginx_status_info instead. warning_text: see plugin documentation for details
nios_a_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_a_record module has been deprecated.
Please use infoblox.nios_modules.nios_a_record instead.
nios_aaaa_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_aaaa_record module has been deprecated.
Please use infoblox.nios_modules.nios_aaaa_record instead.
nios_cname_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_cname_record module has been deprecated.
Please use infoblox.nios_modules.nios_cname_record instead.
nios_dns_view:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_dns_view module has been deprecated.
Please use infoblox.nios_modules.nios_dns_view instead.
nios_fixed_address:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_fixed_address module has been deprecated.
Please use infoblox.nios_modules.nios_fixed_address instead.
nios_host_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_host_record module has been deprecated.
Please use infoblox.nios_modules.nios_host_record instead.
nios_member:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_member module has been deprecated.
Please use infoblox.nios_modules.nios_member instead.
nios_mx_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_mx_record module has been deprecated.
Please use infoblox.nios_modules.nios_mx_record instead.
nios_naptr_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_naptr_record module has been deprecated.
Please use infoblox.nios_modules.nios_naptr_record instead.
nios_network:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_network module has been deprecated.
Please use infoblox.nios_modules.nios_network instead.
nios_network_view:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_network_view module has been deprecated.
Please use infoblox.nios_modules.nios_network_view instead.
nios_nsgroup:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_nsgroup module has been deprecated.
Please use infoblox.nios_modules.nios_nsgroup instead.
nios_ptr_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_ptr_record module has been deprecated.
Please use infoblox.nios_modules.nios_ptr_record instead.
nios_srv_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_srv_record module has been deprecated.
Please use infoblox.nios_modules.nios_srv_record instead.
nios_txt_record:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_txt_record module has been deprecated.
Please use infoblox.nios_modules.nios_txt_record instead.
nios_zone:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios_zone module has been deprecated.
Please use infoblox.nios_modules.nios_zone instead.
ome_device_info:
redirect: dellemc.openmanage.ome_device_info
one_image_facts: one_image_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.one_image_info instead. warning_text: see plugin documentation for details
onepassword_facts: onepassword_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.onepassword_info instead. warning_text: see plugin documentation for details
oneview_datacenter_facts: oneview_datacenter_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_datacenter_info instead. warning_text: see plugin documentation for details
oneview_enclosure_facts: oneview_enclosure_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_enclosure_info instead. warning_text: see plugin documentation for details
oneview_ethernet_network_facts: oneview_ethernet_network_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_ethernet_network_info instead. warning_text: see plugin documentation for details
oneview_fc_network_facts: oneview_fc_network_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_fc_network_info instead. warning_text: see plugin documentation for details
oneview_fcoe_network_facts: oneview_fcoe_network_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_fcoe_network_info instead. warning_text: see plugin documentation for details
oneview_logical_interconnect_group_facts: oneview_logical_interconnect_group_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_logical_interconnect_group_info warning_text: see plugin documentation for details
instead.
oneview_network_set_facts: oneview_network_set_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_network_set_info instead. warning_text: see plugin documentation for details
oneview_san_manager_facts: oneview_san_manager_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.oneview_san_manager_info instead. warning_text: see plugin documentation for details
online_server_facts: online_server_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.online_server_info instead. warning_text: see plugin documentation for details
online_user_facts: online_user_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.online_user_info instead. warning_text: see plugin documentation for details
ovirt: ovirt:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_vm instead. warning_text: see plugin documentation for details
ovirt_affinity_label_facts: ovirt_affinity_label_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead. warning_text: see plugin documentation for details
ovirt_api_facts: ovirt_api_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_api_info instead. warning_text: see plugin documentation for details
ovirt_cluster_facts: ovirt_cluster_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_cluster_info instead. warning_text: see plugin documentation for details
ovirt_datacenter_facts: ovirt_datacenter_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead. warning_text: see plugin documentation for details
ovirt_disk_facts: ovirt_disk_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_disk_info instead. warning_text: see plugin documentation for details
ovirt_event_facts: ovirt_event_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_event_info instead. warning_text: see plugin documentation for details
ovirt_external_provider_facts: ovirt_external_provider_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead. warning_text: see plugin documentation for details
ovirt_group_facts: ovirt_group_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_group_info instead. warning_text: see plugin documentation for details
ovirt_host_facts: ovirt_host_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_host_info instead. warning_text: see plugin documentation for details
ovirt_host_storage_facts: ovirt_host_storage_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead. warning_text: see plugin documentation for details
ovirt_network_facts: ovirt_network_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_network_info instead. warning_text: see plugin documentation for details
ovirt_nic_facts: ovirt_nic_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_nic_info instead. warning_text: see plugin documentation for details
ovirt_permission_facts: ovirt_permission_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_permission_info instead. warning_text: see plugin documentation for details
ovirt_quota_facts: ovirt_quota_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_quota_info instead. warning_text: see plugin documentation for details
ovirt_scheduling_policy_facts: ovirt_scheduling_policy_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead. warning_text: see plugin documentation for details
ovirt_snapshot_facts: ovirt_snapshot_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead. warning_text: see plugin documentation for details
ovirt_storage_domain_facts: ovirt_storage_domain_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead. warning_text: see plugin documentation for details
ovirt_storage_template_facts: ovirt_storage_template_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead. warning_text: see plugin documentation for details
ovirt_storage_vm_facts: ovirt_storage_vm_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead. warning_text: see plugin documentation for details
ovirt_tag_facts: ovirt_tag_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_tag_info instead. warning_text: see plugin documentation for details
ovirt_template_facts: ovirt_template_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_template_info instead. warning_text: see plugin documentation for details
ovirt_user_facts: ovirt_user_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_user_info instead. warning_text: see plugin documentation for details
ovirt_vm_facts: ovirt_vm_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_vm_info instead. warning_text: see plugin documentation for details
ovirt_vmpool_facts: ovirt_vmpool_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. warning_text: see plugin documentation for details
postgresql_copy: postgresql_copy:
redirect: community.postgresql.postgresql_copy redirect: community.postgresql.postgresql_copy
postgresql_db: postgresql_db:
@@ -536,54 +471,54 @@ plugin_routing:
redirect: community.postgresql.postgresql_table redirect: community.postgresql.postgresql_table
postgresql_tablespace: postgresql_tablespace:
redirect: community.postgresql.postgresql_tablespace redirect: community.postgresql.postgresql_tablespace
postgresql_user:
redirect: community.postgresql.postgresql_user
postgresql_user_obj_stat_info: postgresql_user_obj_stat_info:
redirect: community.postgresql.postgresql_user_obj_stat_info redirect: community.postgresql.postgresql_user_obj_stat_info
postgresql_user:
redirect: community.postgresql.postgresql_user
purefa_facts: purefa_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use purestorage.flasharray.purefa_info instead. warning_text: see plugin documentation for details
purefb_facts: purefb_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use purestorage.flashblade.purefb_info instead. warning_text: see plugin documentation for details
python_requirements_facts: python_requirements_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.python_requirements_info instead. warning_text: see plugin documentation for details
redfish_facts: redfish_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.redfish_info instead. warning_text: see plugin documentation for details
scaleway_image_facts: scaleway_image_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_image_info instead. warning_text: see plugin documentation for details
scaleway_ip_facts: scaleway_ip_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_ip_info instead. warning_text: see plugin documentation for details
scaleway_organization_facts: scaleway_organization_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_organization_info instead. warning_text: see plugin documentation for details
scaleway_security_group_facts: scaleway_security_group_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_security_group_info instead. warning_text: see plugin documentation for details
scaleway_server_facts: scaleway_server_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_server_info instead. warning_text: see plugin documentation for details
scaleway_snapshot_facts: scaleway_snapshot_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_snapshot_info instead. warning_text: see plugin documentation for details
scaleway_volume_facts: scaleway_volume_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.scaleway_volume_info instead. warning_text: see plugin documentation for details
sf_account_manager: sf_account_manager:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
@@ -605,17 +540,17 @@ plugin_routing:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_volume instead. warning_text: Use netapp.elementsw.na_elementsw_volume instead.
smartos_image_facts: smartos_image_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.smartos_image_info instead. warning_text: see plugin documentation for details
vertica_facts: vertica_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.vertica_info instead. warning_text: see plugin documentation for details
xenserver_guest_facts: xenserver_guest_facts:
tombstone: deprecation:
removal_version: 3.0.0 removal_version: 3.0.0
warning_text: Use community.general.xenserver_guest_info instead. warning_text: see plugin documentation for details
doc_fragments: doc_fragments:
_gcp: _gcp:
redirect: community.google._gcp redirect: community.google._gcp
@@ -627,11 +562,6 @@ plugin_routing:
redirect: community.kubevirt.kubevirt_common_options redirect: community.kubevirt.kubevirt_common_options
kubevirt_vm_options: kubevirt_vm_options:
redirect: community.kubevirt.kubevirt_vm_options redirect: community.kubevirt.kubevirt_vm_options
nios:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.nios document fragment has been deprecated.
Please use infoblox.nios_modules.nios instead.
postgresql: postgresql:
redirect: community.postgresql.postgresql redirect: community.postgresql.postgresql
module_utils: module_utils:
@@ -649,33 +579,21 @@ plugin_routing:
redirect: community.hrobot.robot redirect: community.hrobot.robot
kubevirt: kubevirt:
redirect: community.kubevirt.kubevirt redirect: community.kubevirt.kubevirt
net_tools.nios.api:
deprecation:
removal_version: 5.0.0
warning_text: The community.general.net_tools.nios.api module_utils has been
deprecated. Please use infoblox.nios_modules.api instead.
postgresql: postgresql:
redirect: community.postgresql.postgresql redirect: community.postgresql.postgresql
remote_management.dellemc.dellemc_idrac:
redirect: dellemc.openmanage.dellemc_idrac
remote_management.dellemc.ome:
redirect: dellemc.openmanage.ome
callback: callback:
actionable: actionable:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options.
= no' and 'display_ok_hosts = no' options.
full_skip: full_skip:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option.
= no' option.
stderr: stderr:
tombstone: tombstone:
removal_version: 2.0.0 removal_version: 2.0.0
warning_text: Use the 'default' callback plugin with 'display_failed_stderr warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option.
= yes' option.
inventory: inventory:
docker_machine: docker_machine:
redirect: community.docker.docker_machine redirect: community.docker.docker_machine
@@ -683,10 +601,3 @@ plugin_routing:
redirect: community.docker.docker_swarm redirect: community.docker.docker_swarm
kubevirt: kubevirt:
redirect: community.kubevirt.kubevirt redirect: community.kubevirt.kubevirt
filter:
path_join:
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
# Since plugin routing is only available since ansible-base 2.10, this
# redirect will be used for ansible-base 2.10 or later, and the included
# path_join filter will be used for Ansible 2.9 or earlier.
redirect: ansible.builtin.path_join

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, quidame <quidame@poivron.org> # Copyright: (c) 2020, quidame <quidame@poivron.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -8,7 +7,7 @@ __metaclass__ = type
import time import time
from ansible.plugins.action import ActionBase from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
from ansible.utils.vars import merge_hash from ansible.utils.vars import merge_hash
from ansible.utils.display import Display from ansible.utils.display import Display
@@ -41,27 +40,19 @@ class ActionModule(ActionBase):
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
"'ansible_timeout' (=%s) (recommended).") "'ansible_timeout' (=%s) (recommended).")
def _async_result(self, async_status_args, task_vars, timeout): def _async_result(self, module_args, task_vars, timeout):
''' '''
Retrieve results of the asynchonous task, and display them in place of Retrieve results of the asynchonous task, and display them in place of
the async wrapper results (those with the ansible_job_id key). the async wrapper results (those with the ansible_job_id key).
''' '''
async_status = self._task.copy()
async_status.args = async_status_args
async_status.action = 'ansible.builtin.async_status'
async_status.async_val = 0
async_action = self._shared_loader_obj.action_loader.get(
async_status.action, task=async_status, connection=self._connection,
play_context=self._play_context, loader=self._loader, templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
if async_status.args['mode'] == 'cleanup':
return async_action.run(task_vars=task_vars)
# At least one iteration is required, even if timeout is 0. # At least one iteration is required, even if timeout is 0.
for dummy in range(max(1, timeout)): for i in range(max(1, timeout)):
async_result = async_action.run(task_vars=task_vars) async_result = self._execute_module(
if async_result.get('finished', 0) == 1: module_name='ansible.builtin.async_status',
module_args=module_args,
task_vars=task_vars,
wrap_async=False)
if async_result['finished'] == 1:
break break
time.sleep(min(1, timeout)) time.sleep(min(1, timeout))
@@ -85,6 +76,7 @@ class ActionModule(ActionBase):
task_async = self._task.async_val task_async = self._task.async_val
check_mode = self._play_context.check_mode check_mode = self._play_context.check_mode
max_timeout = self._connection._play_context.timeout max_timeout = self._connection._play_context.timeout
module_name = self._task.action
module_args = self._task.args module_args = self._task.args
if module_args.get('state', None) == 'restored': if module_args.get('state', None) == 'restored':
@@ -115,7 +107,7 @@ class ActionModule(ActionBase):
# longer on the controller); and set a backup file path. # longer on the controller); and set a backup file path.
module_args['_timeout'] = task_async module_args['_timeout'] = task_async
module_args['_back'] = '%s/iptables.state' % async_dir module_args['_back'] = '%s/iptables.state' % async_dir
async_status_args = dict(mode='status') async_status_args = dict(_async_dir=async_dir)
confirm_cmd = 'rm -f %s' % module_args['_back'] confirm_cmd = 'rm -f %s' % module_args['_back']
starter_cmd = 'touch %s.starter' % module_args['_back'] starter_cmd = 'touch %s.starter' % module_args['_back']
remaining_time = max(task_async, max_timeout) remaining_time = max(task_async, max_timeout)
@@ -141,7 +133,7 @@ class ActionModule(ActionBase):
# The module is aware to not process the main iptables-restore # The module is aware to not process the main iptables-restore
# command before finding (and deleting) the 'starter' cookie on # command before finding (and deleting) the 'starter' cookie on
# the host, so the previous query will not reach ssh timeout. # the host, so the previous query will not reach ssh timeout.
dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
# As the main command is not yet executed on the target, here # As the main command is not yet executed on the target, here
# 'finished' means 'failed before main command be executed'. # 'finished' means 'failed before main command be executed'.
@@ -151,7 +143,7 @@ class ActionModule(ActionBase):
except AttributeError: except AttributeError:
pass pass
for dummy in range(max_timeout): for x in range(max_timeout):
time.sleep(1) time.sleep(1)
remaining_time -= 1 remaining_time -= 1
# - AnsibleConnectionFailure covers rejected requests (i.e. # - AnsibleConnectionFailure covers rejected requests (i.e.
@@ -159,7 +151,7 @@ class ActionModule(ActionBase):
# - ansible_timeout is able to cover dropped requests (due # - ansible_timeout is able to cover dropped requests (due
# to a rule or policy DROP) if not lower than async_val. # to a rule or policy DROP) if not lower than async_val.
try: try:
dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
break break
except AnsibleConnectionFailure: except AnsibleConnectionFailure:
continue continue
@@ -172,12 +164,16 @@ class ActionModule(ActionBase):
del result[key] del result[key]
if result.get('invocation', {}).get('module_args'): if result.get('invocation', {}).get('module_args'):
for key in ('_back', '_timeout', '_async_dir', 'jid'): if '_timeout' in result['invocation']['module_args']:
if result['invocation']['module_args'].get(key): del result['invocation']['module_args']['_back']
del result['invocation']['module_args'][key] del result['invocation']['module_args']['_timeout']
async_status_args['mode'] = 'cleanup' async_status_args['mode'] = 'cleanup'
dummy = self._async_result(async_status_args, task_vars, 0) garbage = self._execute_module(
module_name='ansible.builtin.async_status',
module_args=async_status_args,
task_vars=task_vars,
wrap_async=False)
if not wrap_async: if not wrap_async:
# remove a temporary path we created # remove a temporary path we created

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com> # Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com> # Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com> # Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
@@ -8,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type __metaclass__ = type
from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.collections import is_string from ansible.module_utils.common.collections import is_string
from ansible.plugins.action import ActionBase from ansible.plugins.action import ActionBase
from ansible.utils.display import Display from ansible.utils.display import Display

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: Do As user short_description: Do As user
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the doas utility. - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: User you 'become' to execute the task description: User you 'become' to execute the task
@@ -81,7 +81,7 @@ DOCUMENTATION = '''
import re import re
from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils._text import to_bytes
from ansible.plugins.become import BecomeBase from ansible.plugins.become import BecomeBase

View File

@@ -8,7 +8,7 @@ DOCUMENTATION = '''
short_description: Centrify's Direct Authorize short_description: Centrify's Direct Authorize
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: User you 'become' to execute the task description: User you 'become' to execute the task

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: Kerberos substitute user short_description: Kerberos substitute user
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: User you 'become' to execute the task description: User you 'become' to execute the task
@@ -82,7 +82,7 @@ DOCUMENTATION = '''
import re import re
from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils._text import to_bytes
from ansible.plugins.become import BecomeBase from ansible.plugins.become import BecomeBase

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: Systemd's machinectl privilege escalation short_description: Systemd's machinectl privilege escalation
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: User you 'become' to execute the task description: User you 'become' to execute the task

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: PowerBroker run short_description: PowerBroker run
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: User you 'become' to execute the task description: User you 'become' to execute the task

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: profile based execution short_description: profile based execution
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_user: become_user:
description: description:

View File

@@ -9,7 +9,7 @@ DOCUMENTATION = '''
short_description: Privilege Manager run short_description: Privilege Manager run
description: description:
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
author: Ansible Core Team author: ansible (@core)
options: options:
become_exe: become_exe:
description: Sudo executable description: Sudo executable

View File

@@ -1,91 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: sudosu
short_description: Run tasks using sudo su -
description:
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
author:
- Dag Wieers (@dagwieers)
version_added: 2.4.0
options:
become_user:
description: User you 'become' to execute the task.
default: root
ini:
- section: privilege_escalation
key: become_user
- section: sudo_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_sudo_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_SUDO_USER
become_flags:
description: Options to pass to C(sudo).
default: -H -S -n
ini:
- section: privilege_escalation
key: become_flags
- section: sudo_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_sudo_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_SUDO_FLAGS
become_pass:
description: Password to pass to C(sudo).
required: false
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_sudo_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_SUDO_PASS
ini:
- section: sudo_become_plugin
key: password
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'community.general.sudosu'
# messages for detecting prompted password issues
fail = ('Sorry, try again.',)
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
becomecmd = 'sudo'
flags = self.get_option('become_flags') or ''
prompt = ''
if self.get_option('become_pass'):
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
if flags: # this could be simplified, but kept as is for now for backwards string matching
flags = flags.replace('-n', '')
prompt = '-p "%s"' % (self.prompt)
user = self.get_option('become_user') or ''
if user:
user = '%s' % (user)
return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca, Josh Drake, et al # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -154,16 +153,16 @@ class CacheModuleKeys(MutableSet):
def __len__(self): def __len__(self):
return len(self._keyset) return len(self._keyset)
def add(self, value): def add(self, key):
self._keyset[value] = time.time() self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset) self._cache.set(self.PREFIX, self._keyset)
def discard(self, value): def discard(self, key):
del self._keyset[value] del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset) self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max): def remove_by_timerange(self, s_min, s_max):
for k in list(self._keyset.keys()): for k in self._keyset.keys():
t = self._keyset[k] t = self._keyset[k]
if s_min < t < s_max: if s_min < t < s_max:
del self._keyset[k] del self._keyset[k]

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2017, Brian Coca # (c) 2017, Brian Coca
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca, Josh Drake, et al # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -62,13 +61,12 @@ DOCUMENTATION = '''
type: integer type: integer
''' '''
import re
import time import time
import json import json
from ansible import constants as C from ansible import constants as C
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native from ansible.module_utils._text import to_native
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseCacheModule from ansible.plugins.cache import BaseCacheModule
from ansible.release import __version__ as ansible_base_version from ansible.release import __version__ as ansible_base_version
@@ -93,8 +91,6 @@ class CacheModule(BaseCacheModule):
performance. performance.
""" """
_sentinel_service_name = None _sentinel_service_name = None
re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
re_sent_conn = re.compile(r'^(.*):(\d+)$')
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
uri = '' uri = ''
@@ -134,18 +130,11 @@ class CacheModule(BaseCacheModule):
self._db = self._get_sentinel_connection(uri, kw) self._db = self._get_sentinel_connection(uri, kw)
# normal connection # normal connection
else: else:
connection = self._parse_connection(self.re_url_conn, uri) connection = uri.split(':')
self._db = StrictRedis(*connection, **kw) self._db = StrictRedis(*connection, **kw)
display.vv('Redis connection: %s' % self._db) display.vv('Redis connection: %s' % self._db)
@staticmethod
def _parse_connection(re_patt, uri):
match = re_patt.match(uri)
if not match:
raise AnsibleError("Unable to parse connection string")
return match.groups()
def _get_sentinel_connection(self, uri, kw): def _get_sentinel_connection(self, uri, kw):
""" """
get sentinel connection details from _uri get sentinel connection details from _uri
@@ -169,7 +158,7 @@ class CacheModule(BaseCacheModule):
except IndexError: except IndexError:
pass # password is optional pass # password is optional
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] sentinels = [tuple(shost.split(':')) for shost in connections]
display.vv('\nUsing redis sentinels: %s' % sentinels) display.vv('\nUsing redis sentinels: %s' % sentinels)
scon = Sentinel(sentinels, **kw) scon = Sentinel(sentinels, **kw)
try: try:
@@ -228,12 +217,14 @@ class CacheModule(BaseCacheModule):
self._db.zrem(self._keys_set, key) self._db.zrem(self._keys_set, key)
def flush(self): def flush(self):
for key in list(self.keys()): for key in self.keys():
self.delete(key) self.delete(key)
def copy(self): def copy(self):
# TODO: there is probably a better way to do this in redis # TODO: there is probably a better way to do this in redis
ret = dict([(k, self.get(k)) for k in self.keys()]) ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret return ret
def __getstate__(self): def __getstate__(self):

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2017, Brian Coca # (c) 2017, Brian Coca
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com> # (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' '''

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <dag@wieers.com> # (c) 2016, Dag Wieers <dag@wieers.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -792,7 +792,7 @@ from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar from ansible.template import Templar
from ansible.vars.manager import VariableManager from ansible.vars.manager import VariableManager
from ansible.plugins.callback.default import CallbackModule as Default from ansible.plugins.callback.default import CallbackModule as Default
from ansible.module_utils.common.text.converters import to_text from ansible.module_utils._text import to_text
class DummyStdout(object): class DummyStdout(object):
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
for attr in _stats_attributes: for attr in _stats_attributes:
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)}) _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())}) _ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
return _ret return _ret

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (C) 2014, Matt Martz <matt@sivel.net> # (C) 2014, Matt Martz <matt@sivel.net>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -174,7 +173,8 @@ class CallbackModule(CallbackBase):
# Displays info about playbook being started by a person on an # Displays info about playbook being started by a person on an
# inventory, as well as Tags, Skip Tags and Limits # inventory, as well as Tags, Skip Tags and Limits
if not self.printed_playbook: if not self.printed_playbook:
self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) self.playbook_name, _ = os.path.splitext(
os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list)) inventory = os.path.basename(os.path.realpath(host_list))
self.send_msg("%s: Playbook initiated by %s against %s" % self.send_msg("%s: Playbook initiated by %s against %s" %

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (C) 2016 maxn nikolaev.makc@gmail.com
# Copyright (c) 2017 Ansible Project # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -32,7 +31,7 @@ import time
import json import json
from ansible.utils.path import makedirs_safe from ansible.utils.path import makedirs_safe
from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase from ansible.plugins.callback import CallbackBase

View File

@@ -1,235 +0,0 @@
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: loganalytics
type: aggregate
short_description: Posts task results to Azure Log Analytics
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
description:
- This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
- Credits to authors of splunk callback plugin.
version_added: "2.4.0"
requirements:
- Whitelisting this callback plugin.
- An Azure log analytics work space has been established.
options:
workspace_id:
description: Workspace ID of the Azure log analytics workspace.
required: true
env:
- name: WORKSPACE_ID
ini:
- section: callback_loganalytics
key: workspace_id
shared_key:
description: Shared key to connect to Azure log analytics workspace.
required: true
env:
- name: WORKSPACE_SHARED_KEY
ini:
- section: callback_loganalytics
key: shared_key
'''
EXAMPLES = '''
examples: |
Whitelist the plugin in ansible.cfg:
[defaults]
callback_whitelist = community.general.loganalytics
Set the environment variable:
export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
Or configure the plugin in ansible.cfg in the callback_loganalytics block:
[callback_loganalytics]
workspace_id = 01234567-0123-0123-0123-01234567890a
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
'''
import hashlib
import hmac
import base64
import logging
import json
import uuid
import socket
import getpass
from datetime import datetime
from os.path import basename
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class AzureLogAnalyticsSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.user = getpass.getuser()
self.extra_vars = ""
def __build_signature(self, date, workspace_id, shared_key, content_length):
# Build authorisation signature for Azure log analytics API call
sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
str(content_length), date)
utf8_sigs = sigs.encode('utf-8')
decoded_shared_key = base64.b64decode(shared_key)
hmac_sha256_sigs = hmac.new(
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
return signature
def __build_workspace_url(self, workspace_id):
return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
def __rfc1123date(self):
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
def send_event(self, workspace_id, shared_key, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = self.__rfc1123date()
data['host'] = self.host
data['user'] = self.user
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
# Removing args since it can contain sensitive data
if 'args' in data['ansible_task']:
data['ansible_task'].pop('args')
data['ansible_result'] = result._result
if 'content' in data['ansible_result']:
data['ansible_result'].pop('content')
# Adding extra vars info
data['extra_vars'] = self.extra_vars
# Preparing the playbook logs as JSON format and send to Azure log analytics
jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
content_length = len(jsondata)
rfc1123date = self.__rfc1123date()
signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
workspace_url = self.__build_workspace_url(workspace_id)
open_url(
workspace_url,
jsondata,
headers={
'content-type': 'application/json',
'Authorization': signature,
'Log-Type': 'ansible_playbook',
'x-ms-date': rfc1123date
},
method='POST'
)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'loganalytics'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.workspace_id = None
self.shared_key = None
self.loganalytics = AzureLogAnalyticsSource()
def _seconds_since_start(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.workspace_id = self.get_option('workspace_id')
self.shared_key = self.get_option('shared_key')
def v2_playbook_on_play_start(self, play):
vm = play.get_variable_manager()
extra_vars = vm.extra_vars
self.loganalytics.extra_vars = extra_vars
def v2_playbook_on_start(self, playbook):
self.loganalytics.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.loganalytics.send_event(
self.workspace_id,
self.shared_key,
'OK',
result,
self._seconds_since_start(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.loganalytics.send_event(
self.workspace_id,
self.shared_key,
'SKIPPED',
result,
self._seconds_since_start(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.loganalytics.send_event(
self.workspace_id,
self.shared_key,
'FAILED',
result,
self._seconds_since_start(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.loganalytics.send_event(
self.workspace_id,
self.shared_key,
'FAILED',
result,
self._seconds_since_start(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.loganalytics.send_event(
self.workspace_id,
self.shared_key,
'UNREACHABLE',
result,
self._seconds_since_start(result)
)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2018, Samir Musali <samir.musali@logdna.com> # (c) 2018, Samir Musali <samir.musali@logdna.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -78,7 +77,7 @@ def get_mac():
# Getting hostname of system: # Getting hostname of system:
def get_hostname(): def get_hostname():
return str(socket.gethostname()).split('.local', 1)[0] return str(socket.gethostname()).split('.local')[0]
# Getting IP of system: # Getting IP of system:

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com> # (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -112,7 +111,7 @@ try:
except ImportError: except ImportError:
HAS_FLATDICT = False HAS_FLATDICT = False
from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.callback import CallbackBase from ansible.plugins.callback import CallbackBase
# Todo: # Todo:

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com> # (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
# (C) 2017 Ansible Project # (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -62,7 +62,7 @@ import re
import smtplib import smtplib
from ansible.module_utils.six import string_types from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils._text import to_bytes
from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase from ansible.plugins.callback import CallbackBase

View File

@@ -10,23 +10,22 @@ DOCUMENTATION = '''
name: nrdp name: nrdp
type: notification type: notification
author: "Remi VERCHERE (@rverchere)" author: "Remi VERCHERE (@rverchere)"
short_description: Post task results to a Nagios server through nrdp short_description: post task result to a nagios server through nrdp
description: description:
- This callback send playbook result to Nagios. - this callback send playbook result to nagios
- Nagios shall use NRDP to recive passive events. - nagios shall use NRDP to recive passive events
- The passive check is sent to a dedicated host/service for Ansible. - the passive check is sent to a dedicated host/service for ansible
options: options:
url: url:
description: URL of the nrdp server. description: url of the nrdp server
required: true required: True
env: env:
- name : NRDP_URL - name : NRDP_URL
ini: ini:
- section: callback_nrdp - section: callback_nrdp
key: url key: url
type: string
validate_certs: validate_certs:
description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url)
env: env:
- name: NRDP_VALIDATE_CERTS - name: NRDP_VALIDATE_CERTS
ini: ini:
@@ -34,36 +33,32 @@ DOCUMENTATION = '''
key: validate_nrdp_certs key: validate_nrdp_certs
- section: callback_nrdp - section: callback_nrdp
key: validate_certs key: validate_certs
type: boolean default: False
default: false
aliases: [ validate_nrdp_certs ] aliases: [ validate_nrdp_certs ]
token: token:
description: Token to be allowed to push nrdp events. description: token to be allowed to push nrdp events
required: true required: True
env: env:
- name: NRDP_TOKEN - name: NRDP_TOKEN
ini: ini:
- section: callback_nrdp - section: callback_nrdp
key: token key: token
type: string
hostname: hostname:
description: Hostname where the passive check is linked to. description: hostname where the passive check is linked to
required: true required: True
env: env:
- name : NRDP_HOSTNAME - name : NRDP_HOSTNAME
ini: ini:
- section: callback_nrdp - section: callback_nrdp
key: hostname key: hostname
type: string
servicename: servicename:
description: Service where the passive check is linked to. description: service where the passive check is linked to
required: true required: True
env: env:
- name : NRDP_SERVICENAME - name : NRDP_SERVICENAME
ini: ini:
- section: callback_nrdp - section: callback_nrdp
key: servicename key: servicename
type: string
''' '''
import os import os

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,423 +0,0 @@
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
name: opentelemetry
type: notification
short_description: Create distributed traces with OpenTelemetry
version_added: 3.7.0
description:
- This callback creates distributed traces for each Ansible task with OpenTelemetry.
- You can configure the OpenTelemetry exporter and SDK with environment variables.
- See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
- See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
options:
hide_task_arguments:
default: false
type: bool
description:
- Hide the arguments for a task.
env:
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
otel_service_name:
default: ansible
type: str
description:
- The service name resource attribute.
env:
- name: OTEL_SERVICE_NAME
traceparent:
default: None
type: str
description:
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
env:
- name: TRACEPARENT
requirements:
- opentelemetry-api (python lib)
- opentelemetry-exporter-otlp (python lib)
- opentelemetry-sdk (python lib)
'''
EXAMPLES = '''
examples: |
Enable the plugin in ansible.cfg:
[defaults]
callbacks_enabled = community.general.opentelemetry
Set the environment variable:
export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
export OTEL_SERVICE_NAME=your_service_name
'''
import getpass
import os
import socket
import sys
import time
import uuid
from os.path import basename
from ansible.errors import AnsibleError
from ansible.module_utils.six import raise_from
from ansible.plugins.callback import CallbackBase
try:
from opentelemetry import trace
from opentelemetry.trace import SpanKind
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.trace.status import Status, StatusCode
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
ConsoleSpanExporter,
SimpleSpanProcessor,
BatchSpanProcessor
)
from opentelemetry.util._time import _time_ns
except ImportError as imp_exc:
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
else:
OTEL_LIBRARY_IMPORT_ERROR = None
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError as imp_exc:
ORDER_LIBRARY_IMPORT_ERROR = imp_exc
else:
ORDER_LIBRARY_IMPORT_ERROR = None
else:
ORDER_LIBRARY_IMPORT_ERROR = None
class TaskData:
"""
Data about an individual task.
"""
def __init__(self, uuid, name, path, play, action, args):
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.host_data = OrderedDict()
if sys.version_info >= (3, 7):
self.start = time.time_ns()
else:
self.start = _time_ns()
self.action = action
self.args = args
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
else:
return
self.host_data[host.uuid] = host
class HostData:
"""
Data about an individual host.
"""
def __init__(self, uuid, name, status, result):
self.uuid = uuid
self.name = name
self.status = status
self.result = result
if sys.version_info >= (3, 7):
self.finish = time.time_ns()
else:
self.finish = _time_ns()
class OpenTelemetrySource(object):
def __init__(self, display):
self.ansible_playbook = ""
self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
self.ip_address = socket.gethostbyname(socket.gethostname())
except Exception as e:
self.ip_address = None
self.user = getpass.getuser()
self._display = display
def traceparent_context(self, traceparent):
carrier = dict()
carrier['traceparent'] = traceparent
return TraceContextTextMapPropagator().extract(carrier=carrier)
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in tasks_data:
return
name = task.get_name().strip()
path = task.get_path()
action = task.action
args = None
if not task.no_log and not hide_task_arguments:
args = ', '.join(('%s=%s' % a for a in task.args.items()))
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
def finish_task(self, tasks_data, status, result):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
if hasattr(result, '_host') and result._host is not None:
host_uuid = result._host._uuid
host_name = result._host.name
else:
host_uuid = 'include'
host_name = 'include'
task = tasks_data[task_uuid]
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
self.ansible_version = result._task_fields['args'].get('_ansible_version')
task.add_host(HostData(host_uuid, host_name, status, result))
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
""" generate distributed traces from the collected TaskData and HostData """
tasks = []
parent_start_time = None
for task_uuid, task in tasks_data.items():
if parent_start_time is None:
parent_start_time = task.start
tasks.append(task)
trace.set_tracer_provider(
TracerProvider(
resource=Resource.create({SERVICE_NAME: otel_service_name})
)
)
processor = BatchSpanProcessor(OTLPSpanExporter())
trace.get_tracer_provider().add_span_processor(processor)
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
parent.set_status(status)
# Populate trace metadata attributes
if self.ansible_version is not None:
parent.set_attribute("ansible.version", self.ansible_version)
parent.set_attribute("ansible.session", self.session)
parent.set_attribute("ansible.host.name", self.host)
if self.ip_address is not None:
parent.set_attribute("ansible.host.ip", self.ip_address)
parent.set_attribute("ansible.host.user", self.user)
for task in tasks:
for host_uuid, host_data in task.host_data.items():
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
self.update_span_data(task, host_data, span)
def update_span_data(self, task_data, host_data, span):
""" update the span with the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
message = 'success'
status = Status(status_code=StatusCode.OK)
if host_data.status == 'included':
rc = 0
else:
res = host_data.result._result
rc = res.get('rc', 0)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
elif 'msg' in res:
message = res['msg']
else:
message = 'failed'
status = Status(status_code=StatusCode.ERROR, description=message)
# Record an exception with the task message
span.record_exception(BaseException(message))
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
status = Status(status_code=StatusCode.UNSET)
span.set_status(status)
self.set_span_attribute(span, "ansible.task.args", task_data.args)
self.set_span_attribute(span, "ansible.task.module", task_data.action)
self.set_span_attribute(span, "ansible.task.message", message)
self.set_span_attribute(span, "ansible.task.name", name)
self.set_span_attribute(span, "ansible.task.result", rc)
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
span.end(end_time=host_data.finish)
def set_span_attribute(self, span, attributeName, attributeValue):
""" update the span attribute with the given attribute and value if not None """
if span is None and self._display is not None:
self._display.warning('span object is None. Please double check if that is expected.')
else:
if attributeValue is not None:
span.set_attribute(attributeName, attributeValue)
class CallbackModule(CallbackBase):
"""
This callback creates distributed traces.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.opentelemetry'
CALLBACK_NEEDS_ENABLED = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.hide_task_arguments = None
self.otel_service_name = None
self.ansible_playbook = None
self.play_name = None
self.tasks_data = None
self.errors = 0
self.disabled = False
self.traceparent = False
if OTEL_LIBRARY_IMPORT_ERROR:
raise_from(
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
OTEL_LIBRARY_IMPORT_ERROR)
if ORDER_LIBRARY_IMPORT_ERROR:
raise_from(
AnsibleError('The `ordereddict` must be installed to use this plugin'),
ORDER_LIBRARY_IMPORT_ERROR)
else:
self.tasks_data = OrderedDict()
self.opentelemetry = OpenTelemetrySource(display=self._display)
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys,
var_options=var_options,
direct=direct)
self.hide_task_arguments = self.get_option('hide_task_arguments')
self.otel_service_name = self.get_option('otel_service_name')
if not self.otel_service_name:
self.otel_service_name = 'ansible'
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
self.traceparent = self.get_option('traceparent')
def v2_playbook_on_start(self, playbook):
self.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_play_start(self, play):
self.play_name = play.get_name()
def v2_runner_on_no_hosts(self, task):
self.opentelemetry.start_task(
self.tasks_data,
self.hide_task_arguments,
self.play_name,
task
)
def v2_playbook_on_task_start(self, task, is_conditional):
self.opentelemetry.start_task(
self.tasks_data,
self.hide_task_arguments,
self.play_name,
task
)
def v2_playbook_on_cleanup_task_start(self, task):
self.opentelemetry.start_task(
self.tasks_data,
self.hide_task_arguments,
self.play_name,
task
)
def v2_playbook_on_handler_task_start(self, task):
self.opentelemetry.start_task(
self.tasks_data,
self.hide_task_arguments,
self.play_name,
task
)
def v2_runner_on_failed(self, result, ignore_errors=False):
self.errors += 1
self.opentelemetry.finish_task(
self.tasks_data,
'failed',
result
)
def v2_runner_on_ok(self, result):
self.opentelemetry.finish_task(
self.tasks_data,
'ok',
result
)
def v2_runner_on_skipped(self, result):
self.opentelemetry.finish_task(
self.tasks_data,
'skipped',
result
)
def v2_playbook_on_include(self, included_file):
self.opentelemetry.finish_task(
self.tasks_data,
'included',
included_file
)
def v2_playbook_on_stats(self, stats):
if self.errors == 0:
status = Status(status_code=StatusCode.OK)
else:
status = Status(status_code=StatusCode.ERROR)
self.opentelemetry.generate_distributed_traces(
self.otel_service_name,
self.ansible_playbook,
self.tasks_data,
status,
self.traceparent
)
def v2_runner_on_async_failed(self, result, **kwargs):
self.errors += 1

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) Fastly, inc 2016 # (c) Fastly, inc 2016
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -41,17 +40,8 @@ import difflib
from ansible import constants as C from ansible import constants as C
from ansible.plugins.callback import CallbackBase from ansible.plugins.callback import CallbackBase
from ansible.module_utils.common.text.converters import to_text from ansible.module_utils._text import to_text
from ansible.utils.color import codeCodes
try:
codeCodes = C.COLOR_CODES
except AttributeError:
# This constant was moved to ansible.constants in
# https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
# (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
# we include from the original location.
from ansible.utils.color import codeCodes
DONT_COLORIZE = False DONT_COLORIZE = False
COLORS = { COLORS = {
@@ -68,7 +58,7 @@ COLORS = {
def dict_diff(prv, nxt): def dict_diff(prv, nxt):
"""Return a dict of keys that differ with another config object.""" """Return a dict of keys that differ with another config object."""
keys = set(list(prv.keys()) + list(nxt.keys())) keys = set(prv.keys() + nxt.keys())
result = {} result = {}
for k in keys: for k in keys:
if prv.get(k) != nxt.get(k): if prv.get(k) != nxt.get(k):

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (C) 2014-2015, Matt Martz <matt@sivel.net> # (C) 2014-2015, Matt Martz <matt@sivel.net>
# (C) 2017 Ansible Project # (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,7 +58,7 @@ import os
import uuid import uuid
from ansible import context from ansible import context
from ansible.module_utils.common.text.converters import to_text from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase from ansible.plugins.callback import CallbackBase

View File

@@ -68,16 +68,6 @@ DOCUMENTATION = '''
type: bool type: bool
default: false default: false
version_added: 2.0.0 version_added: 2.0.0
batch:
description:
- Correlation ID which can be set across multiple playbook executions.
env:
- name: SPLUNK_BATCH
ini:
- section: callback_splunk
key: batch
type: str
version_added: 3.3.0
''' '''
EXAMPLES = ''' EXAMPLES = '''
@@ -117,7 +107,7 @@ class SplunkHTTPCollectorSource(object):
self.ip_address = socket.gethostbyname(socket.gethostname()) self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser() self.user = getpass.getuser()
def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime): def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True: if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True self.ansible_check_mode = True
@@ -136,8 +126,6 @@ class SplunkHTTPCollectorSource(object):
data = {} data = {}
data['uuid'] = result._task._uuid data['uuid'] = result._task._uuid
data['session'] = self.session data['session'] = self.session
if batch is not None:
data['batch'] = batch
data['status'] = state data['status'] = state
if include_milliseconds: if include_milliseconds:
@@ -187,7 +175,6 @@ class CallbackModule(CallbackBase):
self.authtoken = None self.authtoken = None
self.validate_certs = None self.validate_certs = None
self.include_milliseconds = None self.include_milliseconds = None
self.batch = None
self.splunk = SplunkHTTPCollectorSource() self.splunk = SplunkHTTPCollectorSource()
def _runtime(self, result): def _runtime(self, result):
@@ -225,8 +212,6 @@ class CallbackModule(CallbackBase):
self.include_milliseconds = self.get_option('include_milliseconds') self.include_milliseconds = self.get_option('include_milliseconds')
self.batch = self.get_option('batch')
def v2_playbook_on_start(self, playbook): def v2_playbook_on_start(self, playbook):
self.splunk.ansible_playbook = basename(playbook._file_name) self.splunk.ansible_playbook = basename(playbook._file_name)
@@ -242,7 +227,6 @@ class CallbackModule(CallbackBase):
self.authtoken, self.authtoken,
self.validate_certs, self.validate_certs,
self.include_milliseconds, self.include_milliseconds,
self.batch,
'OK', 'OK',
result, result,
self._runtime(result) self._runtime(result)
@@ -254,7 +238,6 @@ class CallbackModule(CallbackBase):
self.authtoken, self.authtoken,
self.validate_certs, self.validate_certs,
self.include_milliseconds, self.include_milliseconds,
self.batch,
'SKIPPED', 'SKIPPED',
result, result,
self._runtime(result) self._runtime(result)
@@ -266,7 +249,6 @@ class CallbackModule(CallbackBase):
self.authtoken, self.authtoken,
self.validate_certs, self.validate_certs,
self.include_milliseconds, self.include_milliseconds,
self.batch,
'FAILED', 'FAILED',
result, result,
self._runtime(result) self._runtime(result)
@@ -278,7 +260,6 @@ class CallbackModule(CallbackBase):
self.authtoken, self.authtoken,
self.validate_certs, self.validate_certs,
self.include_milliseconds, self.include_milliseconds,
self.batch,
'FAILED', 'FAILED',
result, result,
self._runtime(result) self._runtime(result)
@@ -290,7 +271,6 @@ class CallbackModule(CallbackBase):
self.authtoken, self.authtoken,
self.validate_certs, self.validate_certs,
self.include_milliseconds, self.include_milliseconds,
self.batch,
'UNREACHABLE', 'UNREACHABLE',
result, result,
self._runtime(result) self._runtime(result)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Allyson Bowles <@akatch> # Copyright: (c) 2017, Allyson Bowles <@akatch>
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -23,7 +22,7 @@ DOCUMENTATION = '''
from os.path import basename from os.path import basename
from ansible import constants as C from ansible import constants as C
from ansible import context from ansible import context
from ansible.module_utils.common.text.converters import to_text from ansible.module_utils._text import to_text
from ansible.utils.color import colorize, hostcolor from ansible.utils.color import colorize, hostcolor
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default from ansible.plugins.callback.default import CallbackModule as CallbackModule_default

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -26,7 +25,7 @@ import re
import string import string
import sys import sys
from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import string_types from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# #
# (c) 2013, Maykel Moya <mmoya@speedyrails.com> # (c) 2013, Maykel Moya <mmoya@speedyrails.com>
@@ -55,7 +54,7 @@ from ansible.errors import AnsibleError
from ansible.module_utils.basic import is_executable from ansible.module_utils.basic import is_executable
from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display from ansible.utils.display import Display
@@ -63,7 +62,7 @@ display = Display()
class Connection(ConnectionBase): class Connection(ConnectionBase):
""" Local chroot based connections """ ''' Local chroot based connections '''
transport = 'community.general.chroot' transport = 'community.general.chroot'
has_pipelining = True has_pipelining = True
@@ -96,7 +95,7 @@ class Connection(ConnectionBase):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
def _connect(self): def _connect(self):
""" connect to the chroot """ ''' connect to the chroot '''
if os.path.isabs(self.get_option('chroot_exe')): if os.path.isabs(self.get_option('chroot_exe')):
self.chroot_cmd = self.get_option('chroot_exe') self.chroot_cmd = self.get_option('chroot_exe')
else: else:
@@ -111,17 +110,17 @@ class Connection(ConnectionBase):
self._connected = True self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
""" run a command on the chroot. This is only needed for implementing ''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file put_file() get_file() so that we don't have to read the whole file
into memory. into memory.
compared to exec_command() it looses some niceties like being able to compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately. return the process's exit code immediately.
""" '''
executable = self.get_option('executable') executable = self.get_option('executable')
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
display.vvv("EXEC %s" % local_cmd, host=self.chroot) display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -129,17 +128,16 @@ class Connection(ConnectionBase):
return p return p
def exec_command(self, cmd, in_data=None, sudoable=False): def exec_command(self, cmd, in_data=None, sudoable=False):
""" run a command on the chroot """ ''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd) p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data) stdout, stderr = p.communicate(in_data)
return p.returncode, stdout, stderr return (p.returncode, stdout, stderr)
@staticmethod def _prefix_login_path(self, remote_path):
def _prefix_login_path(remote_path): ''' Make sure that we put files into a standard path
""" Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it. If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will ssh chooses $HOME but we aren't guaranteed that a home dir will
@@ -147,13 +145,13 @@ class Connection(ConnectionBase):
This also happens to be the former default. This also happens to be the former default.
Can revisit using $HOME instead if it's a problem Can revisit using $HOME instead if it's a problem
""" '''
if not remote_path.startswith(os.path.sep): if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path) remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path) return os.path.normpath(remote_path)
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):
""" transfer a file from local to chroot """ ''' transfer a file from local to chroot '''
super(Connection, self).put_file(in_path, out_path) super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
@@ -179,7 +177,7 @@ class Connection(ConnectionBase):
raise AnsibleError("file or module does not exist at: %s" % in_path) raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path): def fetch_file(self, in_path, out_path):
""" fetch a file from chroot to local """ ''' fetch a file from chroot to local '''
super(Connection, self).fetch_file(in_path, out_path) super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
@@ -203,6 +201,6 @@ class Connection(ConnectionBase):
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self): def close(self):
""" terminate the connection; nothing to do here """ ''' terminate the connection; nothing to do here '''
super(Connection, self).close() super(Connection, self).close()
self._connected = False self._connected = False

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# Copyright (c) 2013, Michael Scherer <misc@zarb.org> # Copyright (c) 2013, Michael Scherer <misc@zarb.org>
@@ -9,7 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type __metaclass__ = type
DOCUMENTATION = ''' DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <misc@zarb.org> author: Michael Scherer (@msherer) <misc@zarb.org>
name: funcd name: funcd
short_description: Use funcd to connect to target short_description: Use funcd to connect to target
description: description:
@@ -38,14 +37,13 @@ import tempfile
import shutil import shutil
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display from ansible.utils.display import Display
display = Display() display = Display()
class Connection(ConnectionBase): class Connection(object):
""" Func-based connections """ ''' Func-based connections '''
has_pipelining = False has_pipelining = False
@@ -54,7 +52,6 @@ class Connection(ConnectionBase):
self.host = host self.host = host
# port is unused, this go on func # port is unused, this go on func
self.port = port self.port = port
self.client = None
def connect(self, port=None): def connect(self, port=None):
if not HAVE_FUNC: if not HAVE_FUNC:
@@ -64,32 +61,31 @@ class Connection(ConnectionBase):
return self return self
def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
""" run a command on the remote minion """ ''' run a command on the remote minion '''
if in_data: if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining") raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
# totally ignores privlege escalation # totally ignores privlege escalation
display.vvv("EXEC %s" % cmd, host=self.host) display.vvv("EXEC %s" % (cmd), host=self.host)
p = self.client.command.run(cmd)[self.host] p = self.client.command.run(cmd)[self.host]
return p[0], p[1], p[2] return (p[0], p[1], p[2])
@staticmethod def _normalize_path(self, path, prefix):
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep): if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path) path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path) normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:]) return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):
""" transfer a file from local to remote """ ''' transfer a file from local to remote '''
out_path = self._normalize_path(out_path, '/') out_path = self._normalize_path(out_path, '/')
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
self.client.local.copyfile.send(in_path, out_path) self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path): def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """ ''' fetch a file from remote to local '''
in_path = self._normalize_path(in_path, '/') in_path = self._normalize_path(in_path, '/')
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
@@ -102,5 +98,5 @@ class Connection(ConnectionBase):
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
def close(self): def close(self):
""" terminate the connection; nothing to do here """ ''' terminate the connection; nothing to do here '''
pass pass

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on jail.py # Based on jail.py
# (c) 2013, Michael Scherer <misc@zarb.org> # (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
@@ -33,7 +32,7 @@ DOCUMENTATION = '''
import subprocess import subprocess
from ansible_collections.community.general.plugins.connection.jail import Connection as Jail from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
from ansible.module_utils.common.text.converters import to_native from ansible.module_utils._text import to_native
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.utils.display import Display from ansible.utils.display import Display
@@ -41,7 +40,7 @@ display = Display()
class Connection(Jail): class Connection(Jail):
""" Local iocage based connections """ ''' Local iocage based connections '''
transport = 'community.general.iocage' transport = 'community.general.iocage'

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com> # Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py by Maykel Moya <mmoya@speedyrails.com> # and chroot.py by Maykel Moya <mmoya@speedyrails.com>
# Copyright (c) 2013, Michael Scherer <misc@zarb.org> # Copyright (c) 2013, Michael Scherer <misc@zarb.org>
@@ -36,10 +35,11 @@ import os
import os.path import os.path
import subprocess import subprocess
import traceback import traceback
import ansible.constants as C
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display from ansible.utils.display import Display
@@ -47,7 +47,7 @@ display = Display()
class Connection(ConnectionBase): class Connection(ConnectionBase):
""" Local BSD Jail based connections """ ''' Local BSD Jail based connections '''
modified_jailname_key = 'conn_jail_name' modified_jailname_key = 'conn_jail_name'
@@ -90,20 +90,20 @@ class Connection(ConnectionBase):
return to_text(stdout, errors='surrogate_or_strict').split() return to_text(stdout, errors='surrogate_or_strict').split()
def _connect(self): def _connect(self):
""" connect to the jail; nothing to do here """ ''' connect to the jail; nothing to do here '''
super(Connection, self)._connect() super(Connection, self)._connect()
if not self._connected: if not self._connected:
display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
self._connected = True self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
""" run a command on the jail. This is only needed for implementing ''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file put_file() get_file() so that we don't have to read the whole file
into memory. into memory.
compared to exec_command() it looses some niceties like being able to compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately. return the process's exit code immediately.
""" '''
local_cmd = [self.jexec_cmd] local_cmd = [self.jexec_cmd]
set_env = '' set_env = ''
@@ -123,17 +123,16 @@ class Connection(ConnectionBase):
return p return p
def exec_command(self, cmd, in_data=None, sudoable=False): def exec_command(self, cmd, in_data=None, sudoable=False):
""" run a command on the jail """ ''' run a command on the jail '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd) p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data) stdout, stderr = p.communicate(in_data)
return p.returncode, stdout, stderr return (p.returncode, stdout, stderr)
@staticmethod def _prefix_login_path(self, remote_path):
def _prefix_login_path(remote_path): ''' Make sure that we put files into a standard path
""" Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it. If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will ssh chooses $HOME but we aren't guaranteed that a home dir will
@@ -141,13 +140,13 @@ class Connection(ConnectionBase):
This also happens to be the former default. This also happens to be the former default.
Can revisit using $HOME instead if it's a problem Can revisit using $HOME instead if it's a problem
""" '''
if not remote_path.startswith(os.path.sep): if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path) remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path) return os.path.normpath(remote_path)
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):
""" transfer a file from local to jail """ ''' transfer a file from local to jail '''
super(Connection, self).put_file(in_path, out_path) super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
@@ -173,7 +172,7 @@ class Connection(ConnectionBase):
raise AnsibleError("file or module does not exist at: %s" % in_path) raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path): def fetch_file(self, in_path, out_path):
""" fetch a file from jail to local """ ''' fetch a file from jail to local '''
super(Connection, self).fetch_file(in_path, out_path) super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
@@ -197,6 +196,6 @@ class Connection(ConnectionBase):
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
def close(self): def close(self):
""" terminate the connection; nothing to do here """ ''' terminate the connection; nothing to do here '''
super(Connection, self).close() super(Connection, self).close()
self._connected = False self._connected = False

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk> # (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
# Copyright (c) 2017 Ansible Project # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -43,13 +42,14 @@ try:
except ImportError: except ImportError:
pass pass
from ansible import constants as C
from ansible import errors from ansible import errors
from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase): class Connection(ConnectionBase):
""" Local lxc based connections """ ''' Local lxc based connections '''
transport = 'community.general.lxc' transport = 'community.general.lxc'
has_pipelining = True has_pipelining = True
@@ -62,7 +62,7 @@ class Connection(ConnectionBase):
self.container = None self.container = None
def _connect(self): def _connect(self):
""" connect to the lxc; nothing to do here """ ''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect() super(Connection, self)._connect()
if not HAS_LIBLXC: if not HAS_LIBLXC:
@@ -77,8 +77,7 @@ class Connection(ConnectionBase):
if self.container.state == "STOPPED": if self.container.state == "STOPPED":
raise errors.AnsibleError("%s is not running" % self.container_name) raise errors.AnsibleError("%s is not running" % self.container_name)
@staticmethod def _communicate(self, pid, in_data, stdin, stdout, stderr):
def _communicate(pid, in_data, stdin, stdout, stderr):
buf = {stdout: [], stderr: []} buf = {stdout: [], stderr: []}
read_fds = [stdout, stderr] read_fds = [stdout, stderr]
if in_data: if in_data:
@@ -87,7 +86,7 @@ class Connection(ConnectionBase):
write_fds = [] write_fds = []
while len(read_fds) > 0 or len(write_fds) > 0: while len(read_fds) > 0 or len(write_fds) > 0:
try: try:
ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, []) ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
except select.error as e: except select.error as e:
if e.args[0] == errno.EINTR: if e.args[0] == errno.EINTR:
continue continue
@@ -112,7 +111,7 @@ class Connection(ConnectionBase):
return fd return fd
def exec_command(self, cmd, in_data=None, sudoable=False): def exec_command(self, cmd, in_data=None, sudoable=False):
""" run a command on the chroot """ ''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text. # python2-lxc needs bytes. python3-lxc needs text.

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2016 Matt Clay <matt@mystile.com> # (c) 2016 Matt Clay <matt@mystile.com>
# (c) 2017 Ansible Project # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -47,7 +46,7 @@ from distutils.spawn import find_executable
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.connection import ConnectionBase from ansible.plugins.connection import ConnectionBase

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on the buildah connection plugin # Based on the buildah connection plugin
# Copyright (c) 2017 Ansible Project # Copyright (c) 2017 Ansible Project
# 2018 Kushal Das # 2018 Kushal Das
@@ -38,9 +37,15 @@ DOCUMENTATION = '''
# - name: hosts # - name: hosts
''' '''
import shlex
import shutil
import os
import base64
import subprocess import subprocess
from ansible.module_utils.common.text.converters import to_bytes import ansible.constants as C
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase, ensure_connect from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.errors import AnsibleConnectionFailure from ansible.errors import AnsibleConnectionFailure
from ansible.utils.display import Display from ansible.utils.display import Display

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# Based on func.py # Based on func.py
@@ -17,11 +16,14 @@ DOCUMENTATION = '''
- This allows you to use existing Saltstack infrastructure to connect to targets. - This allows you to use existing Saltstack infrastructure to connect to targets.
''' '''
import re
import os import os
import base64 import pty
import codecs
import subprocess
from ansible import errors from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.connection import ConnectionBase from ansible.module_utils.six.moves import cPickle
HAVE_SALTSTACK = False HAVE_SALTSTACK = False
try: try:
@@ -30,9 +32,13 @@ try:
except ImportError: except ImportError:
pass pass
import os
from ansible import errors
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase): class Connection(ConnectionBase):
""" Salt-based connections """ ''' Salt-based connections '''
has_pipelining = False has_pipelining = False
# while the name of the product is salt, naming that module salt cause # while the name of the product is salt, naming that module salt cause
@@ -51,31 +57,30 @@ class Connection(ConnectionBase):
self._connected = True self._connected = True
return self return self
def exec_command(self, cmd, in_data=None, sudoable=False): def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """ ''' run a command on the remote minion '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data: if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host) self._display.vvv("EXEC %s" % (cmd), host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res: if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host] p = res[self.host]
return p['retcode'], p['stdout'], p['stderr'] return (p['retcode'], p['stdout'], p['stderr'])
@staticmethod def _normalize_path(self, path, prefix):
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep): if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path) path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path) normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:]) return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):
""" transfer a file from local to remote """ ''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path) super(Connection, self).put_file(in_path, out_path)
@@ -83,11 +88,11 @@ class Connection(ConnectionBase):
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh: with open(in_path, 'rb') as in_fh:
content = in_fh.read() content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
# TODO test it # TODO test it
def fetch_file(self, in_path, out_path): def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """ ''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path) super(Connection, self).fetch_file(in_path, out_path)
@@ -97,5 +102,5 @@ class Connection(ConnectionBase):
open(out_path, 'wb').write(content) open(out_path, 'wb').write(content)
def close(self): def close(self):
""" terminate the connection; nothing to do here """ ''' terminate the connection; nothing to do here '''
pass pass

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# and jail.py (c) 2013, Michael Scherer <misc@zarb.org> # and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
@@ -32,9 +31,10 @@ import os.path
import subprocess import subprocess
import traceback import traceback
from ansible import constants as C
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display from ansible.utils.display import Display
@@ -42,7 +42,7 @@ display = Display()
class Connection(ConnectionBase): class Connection(ConnectionBase):
""" Local zone based connections """ ''' Local zone based connections '''
transport = 'community.general.zone' transport = 'community.general.zone'
has_pipelining = True has_pipelining = True
@@ -75,9 +75,9 @@ class Connection(ConnectionBase):
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
zones = [] zones = []
for line in process.stdout.readlines(): for l in process.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = line.split(':') s = l.split(':')
if s[1] != 'global': if s[1] != 'global':
zones.append(s[1]) zones.append(s[1])
@@ -95,20 +95,20 @@ class Connection(ConnectionBase):
return path + '/root' return path + '/root'
def _connect(self): def _connect(self):
""" connect to the zone; nothing to do here """ ''' connect to the zone; nothing to do here '''
super(Connection, self)._connect() super(Connection, self)._connect()
if not self._connected: if not self._connected:
display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
self._connected = True self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
""" run a command on the zone. This is only needed for implementing ''' run a command on the zone. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file put_file() get_file() so that we don't have to read the whole file
into memory. into memory.
compared to exec_command() it looses some niceties like being able to compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately. return the process's exit code immediately.
""" '''
# NOTE: zlogin invokes a shell (just like ssh does) so we do not pass # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
# this through /bin/sh -c here. Instead it goes through the shell # this through /bin/sh -c here. Instead it goes through the shell
# that zlogin selects. # that zlogin selects.
@@ -122,16 +122,16 @@ class Connection(ConnectionBase):
return p return p
def exec_command(self, cmd, in_data=None, sudoable=False): def exec_command(self, cmd, in_data=None, sudoable=False):
""" run a command on the zone """ ''' run a command on the zone '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd) p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data) stdout, stderr = p.communicate(in_data)
return p.returncode, stdout, stderr return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path): def _prefix_login_path(self, remote_path):
""" Make sure that we put files into a standard path ''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it. If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will ssh chooses $HOME but we aren't guaranteed that a home dir will
@@ -139,13 +139,13 @@ class Connection(ConnectionBase):
This also happens to be the former default. This also happens to be the former default.
Can revisit using $HOME instead if it's a problem Can revisit using $HOME instead if it's a problem
""" '''
if not remote_path.startswith(os.path.sep): if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path) remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path) return os.path.normpath(remote_path)
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):
""" transfer a file from local to zone """ ''' transfer a file from local to zone '''
super(Connection, self).put_file(in_path, out_path) super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
@@ -171,7 +171,7 @@ class Connection(ConnectionBase):
raise AnsibleError("file or module does not exist at: %s" % in_path) raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path): def fetch_file(self, in_path, out_path):
""" fetch a file from zone to local """ ''' fetch a file from zone to local '''
super(Connection, self).fetch_file(in_path, out_path) super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
@@ -195,6 +195,6 @@ class Connection(ConnectionBase):
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self): def close(self):
""" terminate the connection; nothing to do here """ ''' terminate the connection; nothing to do here '''
super(Connection, self).close() super(Connection, self).close()
self._connected = False self._connected = False

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ # GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Huawei Inc. # Copyright: (c) 2018, Huawei Inc.
# GNU General Public License v3.0+ # GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -30,6 +30,7 @@ options:
description: description:
- Keycloak realm name to authenticate to for API access. - Keycloak realm name to authenticate to for API access.
type: str type: str
required: true
auth_client_secret: auth_client_secret:
description: description:
@@ -40,6 +41,7 @@ options:
description: description:
- Username to authenticate for API access with. - Username to authenticate for API access with.
type: str type: str
required: true
aliases: aliases:
- username - username
@@ -47,15 +49,10 @@ options:
description: description:
- Password to authenticate for API access with. - Password to authenticate for API access with.
type: str type: str
required: true
aliases: aliases:
- password - password
token:
description:
- Authentication token for Keycloak API.
type: str
version_added: 3.0.0
validate_certs: validate_certs:
description: description:
- Verify TLS certificates (do not disable this in production). - Verify TLS certificates (do not disable this in production).

View File

@@ -13,32 +13,12 @@ class ModuleDocFragment(object):
DOCUMENTATION = r''' DOCUMENTATION = r'''
options: options:
config: config:
description: description:
- Path to a .json configuration file containing the OneView client configuration. - Path to a .json configuration file containing the OneView client configuration.
The configuration file is optional and when used should be present in the host running the ansible commands. The configuration file is optional and when used should be present in the host running the ansible commands.
If the file path is not provided, the configuration will be loaded from environment variables. If the file path is not provided, the configuration will be loaded from environment variables.
For links to example configuration files or how to use the environment variables verify the notes section. For links to example configuration files or how to use the environment variables verify the notes section.
type: path type: path
api_version:
description:
- OneView API Version.
type: int
image_streamer_hostname:
description:
- IP address or hostname for the HPE Image Streamer REST API.
type: str
hostname:
description:
- IP address or hostname for the appliance.
type: str
username:
description:
- Username for API authentication.
type: str
password:
description:
- Password for API authentication.
type: str
requirements: requirements:
- python >= 2.7.9 - python >= 2.7.9

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -48,7 +47,7 @@ class ModuleDocFragment(object):
OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
not specified through a configuration file (See C(config_file_location)). If the key is encrypted not specified through a configuration file (See C(config_file_location)). If the key is encrypted
with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
type: path type: str
api_user_key_pass_phrase: api_user_key_pass_phrase:
description: description:
- Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -21,5 +20,4 @@ class ModuleDocFragment(object):
identify an instance of the resource. By default, all the attributes of a resource except identify an instance of the resource. By default, all the attributes of a resource except
I(freeform_tags) are used to uniquely identify a resource. I(freeform_tags) are used to uniquely identify a resource.
type: list type: list
elements: str
""" """

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# info standard oVirt documentation fragment
DOCUMENTATION = r'''
options:
fetch_nested:
description:
- If I(yes) the module will fetch additional data from the API.
- It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
Only the attributes of the current entity. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes).
type: bool
default: false
nested_attributes:
description:
- Specifies list of the attributes which should be fetched from the API.
- This parameter apply only when C(fetch_nested) is I(true).
type: list
auth:
description:
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
Default value is set by I(OVIRT_USERNAME) environment variable.
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
- "C(url)- A string containing the API URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
Either C(url) or C(hostname) is required."
- "C(hostname) - A string containing the hostname of the server, usually
something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
Either C(url) or C(hostname) is required."
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
- "C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
type: dict
required: true
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.3.0
notes:
- "In order to use this module you have to install oVirt Python SDK.
To ensure it's installed with correct version you can create the following task:
ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0"
'''

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
pritunl_url:
type: str
required: true
description:
- URL and port of the Pritunl server on which the API is enabled.
pritunl_api_token:
type: str
required: true
description:
- API Token of a Pritunl admin user.
- It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
pritunl_api_secret:
type: str
required: true
description:
- API Secret found in Administrators > USERNAME > API Secret.
validate_certs:
type: bool
required: false
default: true
description:
- If certificates should be validated or not.
- This should never be set to C(false), except if you are very sure that
your connection to the server can not be subject to a Man In The Middle
attack.
"""

View File

@@ -1,57 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Common parameters for Redis modules
DOCUMENTATION = r'''
options:
login_host:
description:
- Specify the target host running the database.
default: localhost
type: str
login_port:
description:
- Specify the port to connect to.
default: 6379
type: int
login_user:
description:
- Specify the user to authenticate with.
- Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
type: str
login_password:
description:
- Specify the password to authenticate with.
- Usually not used when target is localhost.
type: str
tls:
description:
- Specify whether or not to use TLS for the connection.
type: bool
default: true
validate_certs:
description:
- Specify whether or not to validate TLS certificates.
- This should only be turned off for personally controlled sites or with
C(localhost) as target.
type: bool
default: true
ca_certs:
description:
- Path to root certificates file. If not set and I(tls) is
set to C(true), certifi ca-certificates will be used.
type: str
requirements: [ "redis", "certifi" ]
notes:
- Requires the C(redis) Python package on the remote host. You can
install it with pip (C(pip install redis)) or with a package manager.
Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
'''

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# #
# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com> # Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
def dict_filter(sequence):
'''Convert a list of tuples to a dictionary.
Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
'''
return dict(sequence)
class FilterModule(object):
'''Ansible jinja2 filters'''
def filters(self):
return {
'dict': dict_filter,
}

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com> # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

View File

@@ -1,49 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
DialectNotAvailableError,
CustomDialectFailureError)
def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
dialect_params = {
"delimiter": delimiter,
"skipinitialspace": skipinitialspace,
"strict": strict,
}
try:
dialect = initialize_dialect(dialect, **dialect_params)
except (CustomDialectFailureError, DialectNotAvailableError) as e:
raise AnsibleFilterError(to_native(e))
reader = read_csv(data, dialect, fieldnames)
data_list = []
try:
for row in reader:
data_list.append(row)
except CSVError as e:
raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
return data_list
class FilterModule(object):
def filters(self):
return {
'from_csv': from_csv
}

View File

@@ -1,42 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common._collections_compat import Mapping, Sequence
def groupby_as_dict(sequence, attribute):
'''
Given a sequence of dictionaries and an attribute name, returns a dictionary mapping
the value of this attribute to the dictionary.
If multiple dictionaries in the sequence have the same value for this attribute,
the filter will fail.
'''
if not isinstance(sequence, Sequence):
raise AnsibleFilterError('Input is not a sequence')
result = dict()
for list_index, element in enumerate(sequence):
if not isinstance(element, Mapping):
raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index))
if attribute not in element:
raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index))
result_index = element[attribute]
if result_index in result:
raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index))
result[result_index] = element
return result
class FilterModule(object):
''' Ansible list filters '''
def filters(self):
return {
'groupby_as_dict': groupby_as_dict,
}

View File

@@ -1,97 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.errors import (
AnsibleError,
AnsibleFilterError,
AnsibleFilterTypeError,
)
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_sequence
try:
from hashids import Hashids
HAS_HASHIDS = True
except ImportError:
HAS_HASHIDS = False
def initialize_hashids(**kwargs):
if not HAS_HASHIDS:
raise AnsibleError("The hashids library must be installed in order to use this plugin")
params = dict((k, v) for k, v in kwargs.items() if v)
try:
return Hashids(**params)
except TypeError as e:
raise AnsibleFilterError(
"The provided parameters %s are invalid: %s" % (
', '.join(["%s=%s" % (k, v) for k, v in params.items()]),
to_native(e)
)
)
def hashids_encode(nums, salt=None, alphabet=None, min_length=None):
"""Generates a YouTube-like hash from a sequence of ints
:nums: Sequence of one or more ints to hash
:salt: String to use as salt when hashing
:alphabet: String of 16 or more unique characters to produce a hash
:min_length: Minimum length of hash produced
"""
hashids = initialize_hashids(
salt=salt,
alphabet=alphabet,
min_length=min_length
)
# Handles the case where a single int is not encapsulated in a list or tuple.
# User convenience seems preferable to strict typing in this case
# Also avoids obfuscated error messages related to single invalid inputs
if not is_sequence(nums):
nums = [nums]
try:
hashid = hashids.encode(*nums)
except TypeError as e:
raise AnsibleFilterTypeError(
"Data to encode must by a tuple or list of ints: %s" % to_native(e)
)
return hashid
def hashids_decode(hashid, salt=None, alphabet=None, min_length=None):
"""Decodes a YouTube-like hash to a sequence of ints
:hashid: Hash string to decode
:salt: String to use as salt when hashing
:alphabet: String of 16 or more unique characters to produce a hash
:min_length: Minimum length of hash produced
"""
hashids = initialize_hashids(
salt=salt,
alphabet=alphabet,
min_length=min_length
)
nums = hashids.decode(hashid)
return list(nums)
class FilterModule(object):
def filters(self):
return {
'hashids_encode': hashids_encode,
'hashids_decode': hashids_decode,
}

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com> # (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
# #
# This file is part of Ansible # This file is part of Ansible

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com> # (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
# #
# This file is part of Ansible # This file is part of Ansible
@@ -36,11 +35,9 @@ def json_query(data, expr):
raise AnsibleError('You need to install "jmespath" prior to running ' raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter') 'json_query filter')
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence # Hack to handle Ansible String Types
# See issue: https://github.com/ansible-collections/community.general/issues/320 # See issue: https://github.com/ansible-collections/community.general/issues/320
jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
try: try:
return jmespath.search(expr, data) return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e: except jmespath.exceptions.JMESPathError as e:

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os.path
def path_join(list):
'''Join list of paths.
This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10.
This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details.
'''
return os.path.join(*list)
class FilterModule(object):
'''Ansible jinja2 filters'''
def filters(self):
return {
'path_join': path_join,
}

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# (c) 2020 Ansible Project # (c) 2020 Ansible Project
# #
# This file is part of Ansible # This file is part of Ansible

View File

@@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from unicodedata import normalize
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.six import text_type
def unicode_normalize(data, form='NFC'):
"""Applies normalization to 'unicode' strings.
Args:
data: A unicode string piped into the Jinja filter
form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
Returns:
A normalized unicode string of the specified 'form'.
"""
if not isinstance(data, text_type):
raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
raise AnsibleFilterError("%s is not a valid form" % form)
return normalize(form, data)
class FilterModule(object):
def filters(self):
return {
'unicode_normalize': unicode_normalize,
}

View File

@@ -1,22 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from distutils.version import LooseVersion
def version_sort(value, reverse=False):
'''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
return sorted(value, key=LooseVersion, reverse=reverse)
class FilterModule(object):
''' Version sort filter '''
def filters(self):
return {
'version_sort': version_sort
}

View File

View File

@@ -72,7 +72,7 @@ from distutils.version import LooseVersion
import socket import socket
from ansible.errors import AnsibleError from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import iteritems from ansible.module_utils.six import iteritems
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name

View File

@@ -82,7 +82,7 @@ keyed_groups:
''' '''
from ansible.errors import AnsibleError, AnsibleParserError from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_native from ansible.module_utils._text import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
try: try:

View File

@@ -1,222 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: icinga2
short_description: Icinga2 inventory source
version_added: 3.7.0
author:
- Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
description:
- Get inventory hosts from the Icinga2 API.
- "Uses a configuration file as an inventory source, it must end in
C(.icinga2.yml) or C(.icinga2.yaml)."
options:
plugin:
description: Name of the plugin.
required: true
type: string
choices: ['community.general.icinga2']
url:
description: Root URL of Icinga2 API.
type: string
required: true
user:
description: Username to query the API.
type: string
required: true
password:
description: Password to query the API.
type: string
required: true
host_filter:
description: An Icinga2 API valid host filter.
type: string
required: false
validate_certs:
description: Enables or disables SSL certificate verification.
type: boolean
default: true
'''
EXAMPLES = r'''
# my.icinga2.yml
plugin: community.general.icinga2
url: http://localhost:5665
user: ansible
password: secure
host_filter: \"linux-servers\" in host.groups
validate_certs: false
'''
import json
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.urls import open_url
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Icinga2 as source. '''
NAME = 'community.general.icinga2'
def __init__(self):
super(InventoryModule, self).__init__()
# from config
self.icinga2_url = None
self.icinga2_user = None
self.icinga2_password = None
self.ssl_verify = None
self.host_filter = None
self.cache_key = None
self.use_cache = None
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('icinga2.yaml', 'icinga2.yml')):
valid = True
else:
self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
return valid
def _api_connect(self):
self.headers = {
'User-Agent': "ansible-icinga2-inv",
'Accept': "application/json",
}
api_status_url = self.icinga2_url + "/status"
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
'url_password': self.icinga2_password,
'validate_certs': self.ssl_verify
}
open_url(api_status_url, **request_args)
def _post_request(self, request_url, data=None):
self.display.vvv("Requested URL: %s" % request_url)
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
'url_password': self.icinga2_password,
'validate_certs': self.ssl_verify
}
if data is not None:
request_args['data'] = json.dumps(data)
self.display.vvv("Request Args: %s" % request_args)
response = open_url(request_url, **request_args)
response_body = response.read()
json_data = json.loads(response_body.decode('utf-8'))
if 200 <= response.status <= 299:
return json_data
if response.status == 404 and json_data['status'] == "No objects found.":
raise AnsibleParserError(
"API returned no data -- Response: %s - %s"
% (response.status, json_data['status']))
if response.status == 401:
raise AnsibleParserError(
"API was unable to complete query -- Response: %s - %s"
% (response.status, json_data['status']))
if response.status == 500:
raise AnsibleParserError(
"API Response - %s - %s"
% (json_data['status'], json_data['errors']))
raise AnsibleParserError(
"Unexpected data returned - %s - %s"
% (json_data['status'], json_data['errors']))
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
self.headers['X-HTTP-Method-Override'] = 'GET'
data_dict = dict()
if hosts:
data_dict['hosts'] = hosts
if attrs is not None:
data_dict['attrs'] = attrs
if joins is not None:
data_dict['joins'] = joins
if host_filter is not None:
data_dict['filter'] = host_filter.replace("\\\"", "\"")
self.display.vvv(host_filter)
host_dict = self._post_request(query_hosts_url, data_dict)
return host_dict['results']
def get_inventory_from_icinga(self):
"""Query for all hosts """
self.display.vvv("Querying Icinga2 for inventory")
query_args = {
"attrs": ["address", "state_type", "state", "groups"],
}
if self.host_filter is not None:
query_args['host_filter'] = self.host_filter
# Icinga2 API Call
results_json = self._query_hosts(**query_args)
# Manipulate returned API data to Ansible inventory spec
ansible_inv = self._convert_inv(results_json)
return ansible_inv
def _populate(self):
groups = self._to_json(self.get_inventory_from_icinga())
return groups
def _to_json(self, in_dict):
"""Convert dictionary to JSON"""
return json.dumps(in_dict, sort_keys=True, indent=2)
def _convert_inv(self, json_data):
"""Convert Icinga2 API data to JSON format for Ansible"""
groups_dict = {"_meta": {"hostvars": {}}}
for entry in json_data:
host_name = entry['name']
host_attrs = entry['attrs']
if host_attrs['state'] == 0:
host_attrs['state'] = 'on'
else:
host_attrs['state'] = 'off'
host_groups = host_attrs['groups']
host_addr = host_attrs['address']
self.inventory.add_host(host_addr)
for group in host_groups:
if group not in self.inventory.groups.keys():
self.inventory.add_group(group)
self.inventory.add_child(group, host_addr)
self.inventory.set_variable(host_addr, 'address', host_addr)
self.inventory.set_variable(host_addr, 'hostname', host_name)
self.inventory.set_variable(host_addr, 'state',
host_attrs['state'])
self.inventory.set_variable(host_addr, 'state_type',
host_attrs['state_type'])
return groups_dict
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
# read config from file, this sets 'options'
self._read_config_data(path)
# Store the options from the YAML file
self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
self.icinga2_user = self.get_option('user')
self.icinga2_password = self.get_option('password')
self.ssl_verify = self.get_option('validate_certs')
self.host_filter = self.get_option('host_filter')
# Not currently enabled
# self.cache_key = self.get_cache_key(path)
# self.use_cache = cache and self.get_option('cache')
# Test connection to API
self._api_connect()
# Call our internal helper to populate the dynamic inventory
self._populate()

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -23,17 +22,9 @@ DOCUMENTATION = r'''
- constructed - constructed
options: options:
plugin: plugin:
description: Marks this as an instance of the 'linode' plugin. description: marks this as an instance of the 'linode' plugin
required: true required: true
choices: ['linode', 'community.general.linode'] choices: ['linode', 'community.general.linode']
ip_style:
description: Populate hostvars with all information available from the Linode APIv4.
type: string
default: plain
choices:
- plain
- api
version_added: 3.6.0
access_token: access_token:
description: The Linode account personal access token. description: The Linode account personal access token.
required: true required: true
@@ -43,15 +34,18 @@ DOCUMENTATION = r'''
description: Populate inventory with instances in this region. description: Populate inventory with instances in this region.
default: [] default: []
type: list type: list
required: false
tags: tags:
description: Populate inventory only with instances which have at least one of the tags listed here. description: Populate inventory only with instances which have at least one of the tags listed here.
default: [] default: []
type: list type: list
reqired: false
version_added: 2.0.0 version_added: 2.0.0
types: types:
description: Populate inventory with instances with this type. description: Populate inventory with instances with this type.
default: [] default: []
type: list type: list
required: false
strict: strict:
version_added: 2.0.0 version_added: 2.0.0
compose: compose:
@@ -86,18 +80,7 @@ groups:
webservers: "'web' in (tags|list)" webservers: "'web' in (tags|list)"
mailservers: "'mail' in (tags|list)" mailservers: "'mail' in (tags|list)"
compose: compose:
# By default, Ansible tries to connect to the label of the instance.
# Since that might not be a valid name to connect to, you can
# replace it with the first IPv4 address of the linode as follows:
ansible_ssh_host: ipv4[0]
ansible_port: 2222 ansible_port: 2222
# Example where control traffic limited to internal network
plugin: community.general.linode
access_token: foobar
ip_style: api
compose:
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
''' '''
import os import os
@@ -185,44 +168,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
def _add_hostvars_for_instances(self): def _add_hostvars_for_instances(self):
"""Add hostvars for instances in the dynamic inventory.""" """Add hostvars for instances in the dynamic inventory."""
ip_style = self.get_option('ip_style')
for instance in self.instances: for instance in self.instances:
hostvars = instance._raw_json hostvars = instance._raw_json
for hostvar_key in hostvars: for hostvar_key in hostvars:
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
continue
self.inventory.set_variable( self.inventory.set_variable(
instance.label, instance.label,
hostvar_key, hostvar_key,
hostvars[hostvar_key] hostvars[hostvar_key]
) )
if ip_style == 'api':
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
ips += instance.ips.ipv6.pools
for ip_type in set(ip.type for ip in ips):
self.inventory.set_variable(
instance.label,
ip_type,
self._ip_data([ip for ip in ips if ip.type == ip_type])
)
def _ip_data(self, ip_list):
data = []
for ip in list(ip_list):
data.append(
{
'address': ip.address,
'subnet_mask': ip.subnet_mask,
'gateway': ip.gateway,
'public': ip.public,
'prefix': ip.prefix,
'rdns': ip.rdns,
'type': ip.type
}
)
return data
def _validate_option(self, name, desired_type, option_value): def _validate_option(self, name, desired_type, option_value):
"""Validate user specified configuration data against types.""" """Validate user specified configuration data against types."""

View File

@@ -1,950 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Frank Dornheim <dornheim@posteo.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: lxd
short_description: Returns Ansible inventory from lxd host
description:
- Get inventory from the lxd.
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
version_added: "3.0.0"
author: "Frank Dornheim (@conloos)"
options:
plugin:
description: Token that ensures this is a source file for the 'lxd' plugin.
required: true
choices: [ 'community.general.lxd' ]
url:
description:
- The unix domain socket path or the https URL for the lxd server.
- Sockets in filesystem have to start with C(unix:).
- Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
default: unix:/var/snap/lxd/common/lxd/unix.socket
type: str
client_key:
description:
- The client certificate key file path.
aliases: [ key_file ]
default: $HOME/.config/lxc/client.key
type: path
client_cert:
description:
- The client certificate file path.
aliases: [ cert_file ]
default: $HOME/.config/lxc/client.crt
type: path
trust_password:
description:
- The client trusted password.
- You need to set this password on the lxd server before
running this module using the following command
C(lxc config set core.trust_password <some random password>)
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
type: str
state:
description: Filter the container according to the current status.
type: str
default: none
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
prefered_container_network_interface:
description:
- If a container has multiple network interfaces, select which one is the prefered as pattern.
- Combined with the first number that can be found e.g. 'eth' + 0.
type: str
default: eth
prefered_container_network_family:
description:
- If a container has multiple network interfaces, which one is the prefered by family.
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
type: str
default: inet
choices: [ 'inet', 'inet6' ]
groupby:
description:
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
- See example for syntax.
type: dict
'''
EXAMPLES = '''
# simple lxd.yml
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
# simple lxd.yml including filter
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
state: RUNNING
# grouping lxd.yml
groupby:
testpattern:
type: pattern
attribute: test
vlan666:
type: vlanid
attribute: 666
locationBerlin:
type: location
attribute: Berlin
osUbuntu:
type: os
attribute: ubuntu
releaseFocal:
type: release
attribute: focal
releaseBionic:
type: release
attribute: bionic
profileDefault:
type: profile
attribute: default
profileX11:
type: profile
attribute: x11
netRangeIPv4:
type: network_range
attribute: 10.98.143.0/24
netRangeIPv6:
type: network_range
attribute: fd42:bd00:7b11:2167:216:3eff::/24
'''
import binascii
import json
import re
import time
import os
import socket
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.dict_transformations import dict_merge
from ansible.errors import AnsibleError, AnsibleParserError
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
class InventoryModule(BaseInventoryPlugin):
DEBUG = 4
NAME = 'community.general.lxd'
SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket'
SOCKET_URL = 'unix:/var/lib/lxd/unix.socket'
@staticmethod
def load_json_data(path):
"""Load json data
Load json data from file
Args:
list(path): Path elements
str(file_name): Filename of data
Kwargs:
None
Raises:
None
Returns:
dict(json_data): json data"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (IOError, json.decoder.JSONDecodeError) as err:
raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
def save_json_data(self, path, file_name=None):
"""save data as json
Save data as json file
Args:
list(path): Path elements
str(file_name): Filename of data
Kwargs:
None
Raises:
None
Returns:
None"""
if file_name:
path.append(file_name)
else:
prefix = 'lxd_data-'
time_stamp = time.strftime('%Y%m%d-%H%M%S')
suffix = '.atd'
path.append(prefix + time_stamp + suffix)
try:
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
json.dump(self.data, json_file)
except IOError as err:
raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
def verify_file(self, path):
"""Check the config
Return true/false if the config-file is valid for this plugin
Args:
str(path): path to the config
Kwargs:
None
Raises:
None
Returns:
bool(valid): is valid"""
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('lxd.yaml', 'lxd.yml')):
valid = True
else:
self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"')
return valid
@staticmethod
def validate_url(url):
"""validate url
check whether the url is correctly formatted
Args:
url
Kwargs:
None
Raises:
AnsibleError
Returns:
bool"""
if not isinstance(url, str):
return False
if not url.startswith(('unix:', 'https:')):
raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
return True
def _connect_to_socket(self):
"""connect to lxd socket
Connect to lxd socket by provided url or defaults
Args:
None
Kwargs:
None
Raises:
AnsibleError
Returns:
None"""
error_storage = {}
url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL]
urls = (url for url in url_list if self.validate_url(url))
for url in urls:
try:
socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
return socket_connection
except LXDClientException as err:
error_storage[url] = err
raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
def _get_networks(self):
"""Get Networknames
Returns all network config names
Args:
None
Kwargs:
None
Raises:
None
Returns:
list(names): names of all network_configs"""
# e.g. {'type': 'sync',
# 'status': 'Success',
# 'status_code': 200,
# 'operation': '',
# 'error_code': 0,
# 'error': '',
# 'metadata': ['/1.0/networks/lxdbr0']}
network_configs = self.socket.do('GET', '/1.0/networks')
return [m.split('/')[3] for m in network_configs['metadata']]
def _get_containers(self):
"""Get Containernames
Returns all containernames
Args:
None
Kwargs:
None
Raises:
None
Returns:
list(names): names of all containers"""
# e.g. {'type': 'sync',
# 'status': 'Success',
# 'status_code': 200,
# 'operation': '',
# 'error_code': 0,
# 'error': '',
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
containers = self.socket.do('GET', '/1.0/containers')
return [m.split('/')[3] for m in containers['metadata']]
def _get_config(self, branch, name):
"""Get inventory of container
Get config of container
Args:
str(branch): Name oft the API-Branch
str(name): Name of Container
Kwargs:
None
Source:
https://github.com/lxc/lxd/blob/master/doc/rest-api.md
Raises:
None
Returns:
dict(config): Config of the container"""
config = {}
if isinstance(branch, (tuple, list)):
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
else:
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
return config
def get_container_data(self, names):
"""Create Inventory of the container
Iterate through the different branches of the containers and collect Informations.
Args:
list(names): List of container names
Kwargs:
None
Raises:
None
Returns:
None"""
# tuple(('instances','metadata/templates')) to get section in branch
# e.g. /1.0/instances/<name>/metadata/templates
branches = ['containers', ('instances', 'state')]
container_config = {}
for branch in branches:
for name in names:
container_config['containers'] = self._get_config(branch, name)
self.data = dict_merge(container_config, self.data)
def get_network_data(self, names):
"""Create Inventory of the container
Iterate through the different branches of the containers and collect Informations.
Args:
list(names): List of container names
Kwargs:
None
Raises:
None
Returns:
None"""
# tuple(('instances','metadata/templates')) to get section in branch
# e.g. /1.0/instances/<name>/metadata/templates
branches = [('networks', 'state')]
network_config = {}
for branch in branches:
for name in names:
try:
network_config['networks'] = self._get_config(branch, name)
except LXDClientException:
network_config['networks'] = {name: None}
self.data = dict_merge(network_config, self.data)
def extract_network_information_from_container_config(self, container_name):
"""Returns the network interface configuration
Returns the network ipv4 and ipv6 config of the container without local-link
Args:
str(container_name): Name oft he container
Kwargs:
None
Raises:
None
Returns:
dict(network_configuration): network config"""
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
network_configuration = None
if container_network_interfaces:
network_configuration = {}
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
for interface_name in gen_interface_names:
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
network_configuration[interface_name] = []
for address in gen_address:
address_set = {}
address_set['family'] = address.get('family')
address_set['address'] = address.get('address')
address_set['netmask'] = address.get('netmask')
address_set['combined'] = address.get('address') + '/' + address.get('netmask')
network_configuration[interface_name].append(address_set)
return network_configuration
def get_prefered_container_network_interface(self, container_name):
"""Helper to get the prefered interface of thr container
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
Args:
str(containe_name): name of container
Kwargs:
None
Raises:
None
Returns:
str(prefered_interface): None or interface name"""
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
prefered_interface = None # init
if container_network_interfaces: # container have network interfaces
# generator if interfaces which start with the desired pattern
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
selected_interfaces = [] # init
for interface in net_generator:
selected_interfaces.append(interface)
if len(selected_interfaces) > 0:
prefered_interface = sorted(selected_interfaces)[0]
return prefered_interface
def get_container_vlans(self, container_name):
"""Get VLAN(s) from container
Helper to get the VLAN_ID from the container
Args:
str(containe_name): name of container
Kwargs:
None
Raises:
None
Returns:
None"""
# get network device configuration and store {network: vlan_id}
network_vlans = {}
for network in self._get_data_entry('networks'):
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
# get networkdevices of container and return
# e.g.
# "eth0":{ "name":"eth0",
# "network":"lxdbr0",
# "type":"nic"},
vlan_ids = {}
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
for device in devices:
if 'network' in devices[device]:
if devices[device]['network'] in network_vlans:
vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')]
return vlan_ids if vlan_ids else None
def _get_data_entry(self, path, data=None, delimiter='/'):
"""Helper to get data
Helper to get data from self.data by a path like 'path/to/target'
Attention: Escaping of the delimiter is not (yet) provided.
Args:
str(path): path to nested dict
Kwargs:
dict(data): datastore
str(delimiter): delimiter in Path.
Raises:
None
Returns:
*(value)"""
try:
if not data:
data = self.data
if delimiter in path:
path = path.split(delimiter)
if isinstance(path, list) and len(path) > 1:
data = data[path.pop(0)]
path = delimiter.join(path)
return self._get_data_entry(path, data, delimiter) # recursion
return data[path]
except KeyError:
return None
def _set_data_entry(self, container_name, key, value, path=None):
"""Helper to save data
Helper to save the data in self.data
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
Args:
str(container_name): name of container
str(key): same as dict
*(value): same as dict
Kwargs:
str(path): path to branch-part
Raises:
AnsibleParserError
Returns:
None"""
if not path:
path = self.data['inventory']
if container_name not in path:
path[container_name] = {}
try:
if isinstance(value, dict) and key in path[container_name]:
path[container_name] = dict_merge(value, path[container_name][key])
else:
path[container_name][key] = value
except KeyError as err:
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
def extract_information_from_container_configs(self):
"""Process configuration information
Preparation of the data
Args:
dict(configs): Container configurations
Kwargs:
None
Raises:
None
Returns:
None"""
# create branch "inventory"
if 'inventory' not in self.data:
self.data['inventory'] = {}
for container_name in self.data['containers']:
self._set_data_entry(container_name, 'os', self._get_data_entry(
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
self._set_data_entry(container_name, 'release', self._get_data_entry(
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
self._set_data_entry(container_name, 'version', self._get_data_entry(
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
self._set_data_entry(container_name, 'profile', self._get_data_entry(
'containers/{0}/containers/metadata/profiles'.format(container_name)))
self._set_data_entry(container_name, 'location', self._get_data_entry(
'containers/{0}/containers/metadata/location'.format(container_name)))
self._set_data_entry(container_name, 'state', self._get_data_entry(
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
def build_inventory_network(self, container_name):
"""Add the network interfaces of the container to the inventory
Logic:
- if the container have no interface -> 'ansible_connection: local'
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
Args:
str(container_name): name of container
Kwargs:
None
Raises:
None
Returns:
None"""
def interface_selection(container_name):
"""Select container Interface for inventory
Logic:
- get preferred_interface & prefered_container_network_family -> str(IP)
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
Args:
str(container_name): name of container
Kwargs:
None
Raises:
None
Returns:
dict(interface_name: ip)"""
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
prefered_container_network_family = self.prefered_container_network_family
ip_address = ''
if prefered_interface:
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
for config in interface:
if config['family'] == prefered_container_network_family:
ip_address = config['address']
break
else:
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
for config in interface:
if config['family'] == prefered_container_network_family:
ip_address = config['address']
break
return ip_address
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
else:
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
def build_inventory_hosts(self):
"""Build host-part dynamic inventory
Build the host-part of the dynamic inventory.
Add Hosts and host_vars to the inventory.
Args:
None
Kwargs:
None
Raises:
None
Returns:
None"""
for container_name in self.data['inventory']:
# Only consider containers that match the "state" filter, if self.state is not None
if self.filter:
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
continue
# add container
self.inventory.add_host(container_name)
# add network informations
self.build_inventory_network(container_name)
# add os
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
# add release
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
# add profile
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
# add state
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
# add location information
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
# add VLAN_ID information
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
def build_inventory_groups_location(self, group_name):
"""create group by attribute: location
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
for container_name in self.inventory.hosts:
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
self.inventory.add_child(group_name, container_name)
def build_inventory_groups_pattern(self, group_name):
"""create group by name pattern
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
regex_pattern = self.groupby[group_name].get('attribute')
for container_name in self.inventory.hosts:
result = re.search(regex_pattern, container_name)
if result:
self.inventory.add_child(group_name, container_name)
def build_inventory_groups_network_range(self, group_name):
"""check if IP is in network-class
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
try:
network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
except ValueError as err:
raise AnsibleParserError(
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
for container_name in self.inventory.hosts:
if self.data['inventory'][container_name].get('network_interfaces') is not None:
for interface in self.data['inventory'][container_name].get('network_interfaces'):
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
try:
address = ipaddress.ip_address(to_text(interface_family['address']))
if address.version == network.version and address in network:
self.inventory.add_child(group_name, container_name)
except ValueError:
# Ignore invalid IP addresses returned by lxd
pass
def build_inventory_groups_os(self, group_name):
"""create group by attribute: os
Args:
str(group_name): Group name
Kwargs:
Noneself.data['inventory'][container_name][interface]
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
gen_containers = [
container_name for container_name in self.inventory.hosts
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
for container_name in gen_containers:
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
self.inventory.add_child(group_name, container_name)
def build_inventory_groups_release(self, group_name):
"""create group by attribute: release
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
gen_containers = [
container_name for container_name in self.inventory.hosts
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
for container_name in gen_containers:
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
self.inventory.add_child(group_name, container_name)
def build_inventory_groups_profile(self, group_name):
"""create group by attribute: profile
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
gen_containers = [
container_name for container_name in self.inventory.hosts.keys()
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
for container_name in gen_containers:
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
self.inventory.add_child(group_name, container_name)
def build_inventory_groups_vlanid(self, group_name):
"""create group by attribute: vlanid
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# maybe we just want to expand one group
if group_name not in self.inventory.groups:
self.inventory.add_group(group_name)
gen_containers = [
container_name for container_name in self.inventory.hosts.keys()
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
for container_name in gen_containers:
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
self.inventory.add_child(group_name, container_name)
def build_inventory_groups(self):
"""Build group-part dynamic inventory
Build the group-part of the dynamic inventory.
Add groups to the inventory.
Args:
None
Kwargs:
None
Raises:
None
Returns:
None"""
def group_type(group_name):
"""create groups defined by lxd.yml or defaultvalues
create groups defined by lxd.yml or defaultvalues
supportetd:
* 'location'
* 'pattern'
* 'network_range'
* 'os'
* 'release'
* 'profile'
* 'vlanid'
Args:
str(group_name): Group name
Kwargs:
None
Raises:
None
Returns:
None"""
# Due to the compatibility with python 2 no use of map
if self.groupby[group_name].get('type') == 'location':
self.build_inventory_groups_location(group_name)
elif self.groupby[group_name].get('type') == 'pattern':
self.build_inventory_groups_pattern(group_name)
elif self.groupby[group_name].get('type') == 'network_range':
self.build_inventory_groups_network_range(group_name)
elif self.groupby[group_name].get('type') == 'os':
self.build_inventory_groups_os(group_name)
elif self.groupby[group_name].get('type') == 'release':
self.build_inventory_groups_release(group_name)
elif self.groupby[group_name].get('type') == 'profile':
self.build_inventory_groups_profile(group_name)
elif self.groupby[group_name].get('type') == 'vlanid':
self.build_inventory_groups_vlanid(group_name)
else:
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
if self.groupby:
for group_name in self.groupby:
if not group_name.isalnum():
raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
group_type(group_name)
def build_inventory(self):
"""Build dynamic inventory
Build the dynamic inventory.
Args:
None
Kwargs:
None
Raises:
None
Returns:
None"""
self.build_inventory_hosts()
self.build_inventory_groups()
def _populate(self):
"""Return the hosts and groups
Returns the processed container configurations from the lxd import
Args:
None
Kwargs:
None
Raises:
None
Returns:
None"""
if len(self.data) == 0: # If no data is injected by unittests open socket
self.socket = self._connect_to_socket()
self.get_container_data(self._get_containers())
self.get_network_data(self._get_networks())
self.extract_information_from_container_configs()
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
self.build_inventory()
def parse(self, inventory, loader, path, cache):
"""Return dynamic inventory from source
Returns the processed inventory from the lxd import
Args:
str(inventory): inventory object with existing data and
the methods to add hosts/groups/variables
to inventory
str(loader): Ansible's DataLoader
str(path): path to the config
bool(cache): use or avoid caches
Kwargs:
None
Raises:
AnsibleParserError
Returns:
None"""
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
# Read the inventory YAML file
self._read_config_data(path)
try:
self.client_key = self.get_option('client_key')
self.client_cert = self.get_option('client_cert')
self.debug = self.DEBUG
self.data = {} # store for inventory-data
self.groupby = self.get_option('groupby')
self.plugin = self.get_option('plugin')
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
if self.get_option('state').lower() == 'none': # none in config is str()
self.filter = None
else:
self.filter = self.get_option('state').lower()
self.trust_password = self.get_option('trust_password')
self.url = self.get_option('url')
except Exception as err:
raise AnsibleParserError(
'All correct options required: {0}'.format(to_native(err)))
# Call our internal helper to populate the dynamic inventory
self._populate()

Some files were not shown because too many files have changed in this diff Show More