mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
101 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f2111582d | ||
|
|
bd8634e04e | ||
|
|
1ae57fc5dd | ||
|
|
1e5e0824d2 | ||
|
|
7eaf795774 | ||
|
|
3dc25edeac | ||
|
|
a67ee6cead | ||
|
|
9c5461dc12 | ||
|
|
0b59a71ae7 | ||
|
|
720de141b5 | ||
|
|
7ec6025690 | ||
|
|
53a5cdaed7 | ||
|
|
693efb35b3 | ||
|
|
07cd51a33b | ||
|
|
c80416164b | ||
|
|
a61bc5ab34 | ||
|
|
8ac8fa0aa9 | ||
|
|
b76994ee6e | ||
|
|
746bd3ea5d | ||
|
|
68baf56ea6 | ||
|
|
87377dd23f | ||
|
|
29f028e33b | ||
|
|
196e8fe4e3 | ||
|
|
83c6d18bc0 | ||
|
|
1314b0d7b2 | ||
|
|
be94a014c8 | ||
|
|
039c3da7dc | ||
|
|
2480250f1b | ||
|
|
860f0e12c0 | ||
|
|
2f56fd7b2a | ||
|
|
084879632a | ||
|
|
4eef56b7b3 | ||
|
|
13929acf02 | ||
|
|
070bcf80c4 | ||
|
|
0cf2a5ad05 | ||
|
|
76a64ea733 | ||
|
|
115eab2cfa | ||
|
|
dbba813e23 | ||
|
|
7daf78962b | ||
|
|
cf9fff5238 | ||
|
|
d8d68babe4 | ||
|
|
3f46cdc588 | ||
|
|
ea530784b8 | ||
|
|
dc2fa05b1f | ||
|
|
b2e51272ad | ||
|
|
afba9a11af | ||
|
|
c3ac479ae2 | ||
|
|
7e367244f7 | ||
|
|
331d2c7651 | ||
|
|
b35a262378 | ||
|
|
7d400663b6 | ||
|
|
0d0884b069 | ||
|
|
dd400e8c21 | ||
|
|
a60f9bc78b | ||
|
|
47714ecf79 | ||
|
|
d15ed4135b | ||
|
|
bd61228e40 | ||
|
|
26d7c28b33 | ||
|
|
2e533daffa | ||
|
|
6c50119eab | ||
|
|
bc3435b993 | ||
|
|
370f5d8082 | ||
|
|
e77c5413c9 | ||
|
|
800ee1bae0 | ||
|
|
8de8d21062 | ||
|
|
81e71b5034 | ||
|
|
44ce63ed85 | ||
|
|
a3c9c688b9 | ||
|
|
a332ed4429 | ||
|
|
91571f8bff | ||
|
|
43856eaa6f | ||
|
|
ae87b5479a | ||
|
|
42cd462780 | ||
|
|
d871378574 | ||
|
|
983b292399 | ||
|
|
6831aa5501 | ||
|
|
2d8a94a459 | ||
|
|
f721e76fdc | ||
|
|
3eadb9d637 | ||
|
|
033582b696 | ||
|
|
974997594f | ||
|
|
fa8ce6dea8 | ||
|
|
1d90e91528 | ||
|
|
a90e2c8002 | ||
|
|
c506375f2a | ||
|
|
4def9439bd | ||
|
|
023654473b | ||
|
|
a216f15dd9 | ||
|
|
f613983cb4 | ||
|
|
c22199794d | ||
|
|
24b1d92e84 | ||
|
|
4bc44e4062 | ||
|
|
06fd6d8742 | ||
|
|
dd0ae4a003 | ||
|
|
646ca74810 | ||
|
|
d60c107818 | ||
|
|
ef2d14f24e | ||
|
|
b3cde9b8a4 | ||
|
|
dc4222df0d | ||
|
|
b9a89d6d0f | ||
|
|
f48913d91b |
@@ -13,25 +13,13 @@ pr:
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 8 * * *
|
||||
displayName: Nightly (main)
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- cron: 0 10 * * *
|
||||
displayName: Nightly (active stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-2
|
||||
- stable-3
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-1
|
||||
- stable-*
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -48,7 +36,7 @@ variables:
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.9.0
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.8.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
@@ -68,19 +56,6 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.11/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
@@ -124,23 +99,6 @@ stages:
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
@@ -188,33 +146,14 @@ stages:
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 11.4
|
||||
test: freebsd/11.4
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
@@ -269,10 +208,10 @@ stages:
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
@@ -285,25 +224,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
@@ -350,16 +270,6 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
displayName: Cloud 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
@@ -389,22 +299,17 @@ stages:
|
||||
- Sanity_devel
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
@@ -7,7 +7,7 @@ set -o pipefail -eu
|
||||
|
||||
output_path="$1"
|
||||
|
||||
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
|
||||
curl --silent --show-error https://codecov.io/bash > codecov.sh
|
||||
|
||||
for file in "${output_path}"/reports/coverage*.xml; do
|
||||
name="${file}"
|
||||
|
||||
113
.github/BOTMETA.yml
vendored
113
.github/BOTMETA.yml
vendored
@@ -4,16 +4,17 @@ files:
|
||||
support: community
|
||||
$actions:
|
||||
labels: action
|
||||
$actions/system/iptables_state.py:
|
||||
maintainers: quidame
|
||||
$actions/system/shutdown.py:
|
||||
$actions/aireos.py:
|
||||
labels: aireos cisco networking
|
||||
$actions/ironware.py:
|
||||
maintainers: paulquack
|
||||
labels: ironware networking
|
||||
$actions/shutdown.py:
|
||||
maintainers: nitzmahone samdoran aminvakil
|
||||
$becomes/:
|
||||
labels: become
|
||||
$callbacks/:
|
||||
labels: callbacks
|
||||
$callbacks/loganalytics.py:
|
||||
maintainers: zhcli
|
||||
$callbacks/logstash.py:
|
||||
maintainers: ujenmr
|
||||
$callbacks/say.py:
|
||||
@@ -52,24 +53,14 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/dict.py:
|
||||
maintainers: felixfontein
|
||||
$filters/dict_kv.py:
|
||||
maintainers: giner
|
||||
$filters/from_csv.py:
|
||||
maintainers: Ajpantuso
|
||||
$filters/hashids:
|
||||
maintainers: Ajpantuso
|
||||
$filters/jc.py:
|
||||
maintainers: kellyjonbrazil
|
||||
$filters/list.py:
|
||||
maintainers: vbotka
|
||||
$filters/path_join_shim.py:
|
||||
maintainers: felixfontein
|
||||
$filters/time.py:
|
||||
maintainers: resmo
|
||||
$filters/version_sort.py:
|
||||
maintainers: ericzolf
|
||||
$httpapis/:
|
||||
maintainers: $team_networking
|
||||
labels: networking
|
||||
@@ -83,10 +74,6 @@ files:
|
||||
maintainers: $team_linode
|
||||
labels: cloud linode
|
||||
keywords: linode dynamic inventory script
|
||||
$inventories/lxd.py:
|
||||
maintainers: conloos
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -115,8 +102,6 @@ files:
|
||||
$lookups/nios:
|
||||
maintainers: $team_networking sganesh-infoblox
|
||||
labels: infoblox networking
|
||||
$lookups/random_string.py:
|
||||
maintainers: Akasurde
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/gitlab.py:
|
||||
@@ -139,9 +124,6 @@ files:
|
||||
$module_utils/memset.py:
|
||||
maintainers: glitchcrab
|
||||
labels: cloud memset
|
||||
$module_utils/mh/:
|
||||
maintainers: russoz
|
||||
labels: module_helper
|
||||
$module_utils/module_helper.py:
|
||||
maintainers: russoz
|
||||
labels: module_helper
|
||||
@@ -157,6 +139,7 @@ files:
|
||||
$module_utils/redfish_utils.py:
|
||||
maintainers: $team_redfish
|
||||
labels: redfish_utils
|
||||
$module_utils/remote_management/dellemc/: rajeevarakkal
|
||||
$module_utils/remote_management/lxca/common.py: navalkp prabhosa
|
||||
$module_utils/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
@@ -192,14 +175,14 @@ files:
|
||||
maintainers: zbal
|
||||
$modules/cloud/lxc/lxc_container.py:
|
||||
maintainers: cloudnull
|
||||
$modules/cloud/lxc/lxc_profile.py:
|
||||
maintainers: conloos
|
||||
$modules/cloud/lxd/:
|
||||
ignore: hnakamur
|
||||
$modules/cloud/memset/:
|
||||
maintainers: glitchcrab
|
||||
$modules/cloud/misc/cloud_init_data_facts.py:
|
||||
maintainers: resmo
|
||||
$modules/cloud/misc/helm.py:
|
||||
maintainers: flaper87
|
||||
$modules/cloud/misc/proxmox.py:
|
||||
maintainers: $team_virt UnderGreen
|
||||
labels: proxmox virt
|
||||
@@ -227,7 +210,7 @@ files:
|
||||
$modules/cloud/misc/:
|
||||
ignore: ryansb
|
||||
$modules/cloud/misc/terraform.py:
|
||||
maintainers: m-yosefpor rainerleber
|
||||
maintainers: m-yosefpor
|
||||
$modules/cloud/misc/xenserver_facts.py:
|
||||
maintainers: caphrim007 cheese
|
||||
labels: xenserver_facts
|
||||
@@ -311,7 +294,6 @@ files:
|
||||
maintainers: bvitnik
|
||||
$modules/clustering/consul/:
|
||||
maintainers: $team_consul
|
||||
ignore: colin-nolan
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
@@ -344,14 +326,10 @@ files:
|
||||
$modules/database/mssql/mssql_db.py:
|
||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||
labels: mssql_db
|
||||
$modules/database/saphana/hana_query.py:
|
||||
maintainers: rainerleber
|
||||
$modules/database/vertica/:
|
||||
maintainers: dareko
|
||||
$modules/files/archive.py:
|
||||
maintainers: bendoh
|
||||
$modules/files/filesize.py:
|
||||
maintainers: quidame
|
||||
$modules/files/ini_file.py:
|
||||
maintainers: jpmens noseka1
|
||||
$modules/files/iso_extract.py:
|
||||
@@ -365,6 +343,8 @@ files:
|
||||
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
|
||||
labels: m:xml xml
|
||||
ignore: magnus919
|
||||
$modules/identity/onepassword_facts.py:
|
||||
maintainers: Rylon
|
||||
$modules/identity/ipa/:
|
||||
maintainers: $team_ipa
|
||||
$modules/identity/ipa/ipa_pwpolicy.py:
|
||||
@@ -377,8 +357,6 @@ files:
|
||||
maintainers: $team_keycloak
|
||||
$modules/identity/keycloak/keycloak_group.py:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_realm.py:
|
||||
maintainers: kris2kris
|
||||
$modules/identity/onepassword_info.py:
|
||||
maintainers: Rylon
|
||||
$modules/identity/opendj/opendj_backendprop.py:
|
||||
@@ -440,8 +418,6 @@ files:
|
||||
maintainers: andsens
|
||||
$modules/monitoring/spectrum_device.py:
|
||||
maintainers: orgito
|
||||
$modules/monitoring/spectrum_model_attrs.py:
|
||||
maintainers: tgates81
|
||||
$modules/monitoring/stackdriver.py:
|
||||
maintainers: bwhaley
|
||||
$modules/monitoring/statsd.py:
|
||||
@@ -458,7 +434,7 @@ files:
|
||||
$modules/net_tools/dnsmadeeasy.py:
|
||||
maintainers: briceburg
|
||||
$modules/net_tools/haproxy.py:
|
||||
maintainers: ravibhure Normo
|
||||
maintainers: ravibhure
|
||||
$modules/net_tools/:
|
||||
maintainers: nerzhul
|
||||
$modules/net_tools/infinity/infinity.py:
|
||||
@@ -471,6 +447,8 @@ files:
|
||||
maintainers: akostyuk
|
||||
$modules/net_tools/ipwcli_dns.py:
|
||||
maintainers: cwollinger
|
||||
$modules/net_tools/ldap/ldap_attr.py:
|
||||
maintainers: jtyr
|
||||
$modules/net_tools/ldap/ldap_attrs.py:
|
||||
maintainers: drybjed jtyr noles
|
||||
$modules/net_tools/ldap/ldap_entry.py:
|
||||
@@ -566,10 +544,9 @@ files:
|
||||
$modules/packaging/language/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/packaging/language/composer.py:
|
||||
maintainers: dmtrs
|
||||
ignore: resmo
|
||||
maintainers: dmtrs resmo
|
||||
$modules/packaging/language/cpanm.py:
|
||||
maintainers: fcuny russoz
|
||||
maintainers: fcuny
|
||||
$modules/packaging/language/easy_install.py:
|
||||
maintainers: mattupstate
|
||||
$modules/packaging/language/gem.py:
|
||||
@@ -649,9 +626,6 @@ files:
|
||||
maintainers: elasticdog indrajitr tchernomax
|
||||
labels: pacman
|
||||
ignore: elasticdog
|
||||
$modules/packaging/os/pacman_key.py:
|
||||
maintainers: grawlinson
|
||||
labels: pacman
|
||||
$modules/packaging/os/pkgin.py:
|
||||
maintainers: $team_solaris L2G jasperla szinck martinm82
|
||||
labels: pkgin solaris
|
||||
@@ -717,11 +691,15 @@ files:
|
||||
labels: zypper
|
||||
ignore: dirtyharrycallahan robinro
|
||||
$modules/packaging/os/zypper_repository.py:
|
||||
maintainers: $team_suse
|
||||
labels: zypper
|
||||
ignore: matze
|
||||
maintainers: matze
|
||||
$modules/remote_management/cobbler/:
|
||||
maintainers: dagwieers
|
||||
$modules/remote_management/dellemc/:
|
||||
maintainers: rajeevarakkal
|
||||
$modules/remote_management/dellemc/idrac_server_config_profile.py:
|
||||
maintainers: jagadeeshnv
|
||||
$modules/remote_management/dellemc/ome_device_info.py:
|
||||
maintainers: Sajna-Shetty
|
||||
$modules/remote_management/hpilo/:
|
||||
maintainers: haad
|
||||
ignore: dagwieers
|
||||
@@ -730,8 +708,6 @@ files:
|
||||
labels: cisco
|
||||
$modules/remote_management/ipmi/:
|
||||
maintainers: bgaifullin cloudnull
|
||||
$modules/remote_management/lenovoxcc/:
|
||||
maintainers: panyy3 renxulei
|
||||
$modules/remote_management/lxca/:
|
||||
maintainers: navalkp prabhosa
|
||||
$modules/remote_management/manageiq/:
|
||||
@@ -741,6 +717,8 @@ files:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/manageiq/manageiq_tenant.py:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/oneview/oneview_datacenter_facts.py:
|
||||
maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
|
||||
$modules/remote_management/oneview/:
|
||||
maintainers: adriane-cardozo fgbulsoni tmiotto
|
||||
$modules/remote_management/oneview/oneview_datacenter_info.py:
|
||||
@@ -750,7 +728,7 @@ files:
|
||||
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
||||
maintainers: fgbulsoni
|
||||
$modules/remote_management/redfish/:
|
||||
maintainers: $team_redfish
|
||||
maintainers: $team_redfish billdodd
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/stacki/stacki_host.py:
|
||||
maintainers: bsanders bbyhuy
|
||||
@@ -773,8 +751,6 @@ files:
|
||||
ignore: erydo
|
||||
$modules/source_control/github/github_release.py:
|
||||
maintainers: adrianmoisey
|
||||
$modules/source_control/github/github_repo.py:
|
||||
maintainers: atorrescogollo
|
||||
$modules/source_control/github/:
|
||||
maintainers: stpierre
|
||||
$modules/source_control/gitlab/:
|
||||
@@ -789,6 +765,12 @@ files:
|
||||
maintainers: yeukhon
|
||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||
maintainers: remixtj
|
||||
$modules/storage/glusterfs/:
|
||||
maintainers: devyanikota
|
||||
$modules/storage/glusterfs/gluster_peer.py:
|
||||
maintainers: sac
|
||||
$modules/storage/glusterfs/gluster_volume.py:
|
||||
maintainers: rosmo
|
||||
$modules/storage/hpe3par/ss_3par_cpg.py:
|
||||
maintainers: farhan7500 gautamphegde
|
||||
$modules/storage/ibm/:
|
||||
@@ -810,6 +792,9 @@ files:
|
||||
maintainers: johanwiren
|
||||
$modules/storage/zfs/zfs_delegate_admin.py:
|
||||
maintainers: natefoo
|
||||
$modules/system/python_requirements_facts.py:
|
||||
maintainers: willthames
|
||||
ignore: ryansb
|
||||
$modules/system/aix:
|
||||
maintainers: $team_aix
|
||||
labels: aix
|
||||
@@ -848,10 +833,8 @@ files:
|
||||
labels: interfaces_file
|
||||
$modules/system/iptables_state.py:
|
||||
maintainers: quidame
|
||||
$modules/system/shutdown.py:
|
||||
maintainers: nitzmahone samdoran aminvakil
|
||||
$modules/system/java_cert.py:
|
||||
maintainers: haad absynth76
|
||||
maintainers: haad
|
||||
$modules/system/java_keystore.py:
|
||||
maintainers: Mogztter
|
||||
$modules/system/kernel_blacklist.py:
|
||||
@@ -936,12 +919,16 @@ files:
|
||||
maintainers: ahtik ovcharenko pyykkis
|
||||
labels: ufw
|
||||
$modules/system/vdo.py:
|
||||
maintainers: rhawalsh
|
||||
maintainers: bgurney-rh
|
||||
$modules/system/xfconf.py:
|
||||
maintainers: russoz jbenden
|
||||
labels: xfconf
|
||||
$modules/system/xfs_quota.py:
|
||||
maintainers: bushvin
|
||||
$modules/web_infrastructure/jenkins_job_facts.py:
|
||||
maintainers: stpierre
|
||||
$modules/web_infrastructure/nginx_status_facts.py:
|
||||
maintainers: resmo
|
||||
$modules/web_infrastructure/apache2_mod_proxy.py:
|
||||
maintainers: oboukili
|
||||
$modules/web_infrastructure/apache2_module.py:
|
||||
@@ -1017,27 +1004,27 @@ macros:
|
||||
terminals: plugins/terminal
|
||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||
team_consul: sgargan
|
||||
team_consul: colin-nolan sgargan
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_ipa: Akasurde Nosmoht fxfitz
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||
team_redfish: billdodd mraineri tomasg2012
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
||||
team_virt: joshainglis karmab Aversiste Thulium-Drake
|
||||
|
||||
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,135 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Explain the problem briefly below.
|
||||
placeholder: >-
|
||||
When I try to do X with the collection from the main branch on GitHub, Y
|
||||
breaks in a way Z under the env E. Here are all the details I know
|
||||
about this problem...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Bug Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
# For smaller collections we could use a multi-select and hardcode the list
|
||||
# May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
|
||||
# Select from list, filter as you type (`mysql` would only show the 3 mysql components)
|
||||
# OR freeform - doesn't seem to be supported in adaptivecards
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
|
||||
This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. target OS versions,
|
||||
network device firmware, etc.
|
||||
placeholder: RHEL 8, CentOS Stream etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: |
|
||||
Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Results
|
||||
description: >-
|
||||
Describe what you expected to happen when running the steps above.
|
||||
placeholder: >-
|
||||
I expected X to happen because I assumed Y.
|
||||
that it did not.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Actual Results
|
||||
description: |
|
||||
Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
|
||||
|
||||
Paste verbatim command output between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
|
||||
```
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
27
.github/ISSUE_TEMPLATE/config.yml
vendored
27
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,27 +0,0 @@
|
||||
---
|
||||
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||
blank_issues_enabled: false # default: true
|
||||
contact_links:
|
||||
- name: Security bug report
|
||||
url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: |
|
||||
Please learn how to report security vulnerabilities here.
|
||||
|
||||
For all security related bugs, email security@ansible.com
|
||||
instead of using this issue tracker and you will receive
|
||||
a prompt response.
|
||||
|
||||
For more information, see
|
||||
https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
|
||||
- name: Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Be nice to other members of the community.
|
||||
- name: Talks to the community
|
||||
url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
|
||||
about: Please ask and answer usage questions here
|
||||
- name: Working groups
|
||||
url: https://github.com/ansible/community/wiki
|
||||
about: Interested in improving a specific area? Become a part of a working group!
|
||||
- name: For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -1,111 +0,0 @@
|
||||
---
|
||||
name: Documentation Report
|
||||
description: Ask us about docs
|
||||
# NOTE: issue body is enabled to allow screenshots
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: |
|
||||
Explain the problem briefly below, add suggestions to wording or structure.
|
||||
|
||||
**HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
|
||||
placeholder: >-
|
||||
I was reading the Collection documentation of version X and I'm having
|
||||
problems understanding Y. It would be very helpful if that got
|
||||
rephrased as Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Documentation Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the rst file, module, plugin, task or
|
||||
feature below, *use your best guess if unsure*.
|
||||
placeholder: mysql_user
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. OS version,
|
||||
browser, etc.
|
||||
placeholder: Fedora 33, Firefox etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how this improves the documentation, e.g. before/after situation or screenshots.
|
||||
|
||||
**Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
placeholder: >-
|
||||
When the improvement is applied, it makes it more straightforward
|
||||
to understand X.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,69 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
description: Suggest an idea for this project
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Describe the new feature/improvement briefly below.
|
||||
placeholder: >-
|
||||
I am trying to do X with the collection from the main branch on GitHub and
|
||||
I think that implementing a feature Y would be very helpful for me and
|
||||
every other user of ansible-core because of Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Feature Idea
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how the feature would be used, why it is needed and what it would solve.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
1278
CHANGELOG.rst
1278
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -1,32 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
|
||||
|
||||
If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
|
||||
## Issue tracker
|
||||
|
||||
Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
|
||||
There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
|
||||
Also somebody may already have started discussing or working on implementing the same or a similar idea,
|
||||
so you can cooperate to create a better solution together.
|
||||
|
||||
* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
|
||||
* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
|
||||
|
||||
## Open pull requests
|
||||
|
||||
Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
|
||||
You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
|
||||
Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
|
||||
|
||||
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
||||
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though!
|
||||
* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
||||
16
README.md
16
README.md
@@ -1,17 +1,15 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
|
||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -50,8 +48,6 @@ export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Running tests
|
||||
|
||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||
@@ -60,10 +56,10 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
||||
|
||||
We have a dedicated Working Group for Ansible development.
|
||||
|
||||
You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels -
|
||||
You can find other people interested on the following Freenode IRC channels -
|
||||
- `#ansible` - For general use questions and support.
|
||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core.
|
||||
- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections.
|
||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
|
||||
- `#ansible-community` - For discussions on community topics and community meetings.
|
||||
|
||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||
|
||||
@@ -80,7 +76,7 @@ Basic instructions without release branches:
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +0,0 @@
|
||||
Committers Guidelines for community.general
|
||||
===========================================
|
||||
|
||||
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
|
||||
|
||||
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
|
||||
|
||||
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
|
||||
|
||||
That said, use the trust wisely.
|
||||
|
||||
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
|
||||
|
||||
Our workflow on GitHub
|
||||
----------------------
|
||||
|
||||
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
|
||||
|
||||
* Fork the repository upon which you want to do some work to your own personal repository
|
||||
* Work on the specific branch upon which you need to commit
|
||||
* Create a Pull Request back to the collection repository and await reviews
|
||||
* Adjust code as necessary based on the Comments provided
|
||||
* Ask someone from the other committers to do a final review and merge
|
||||
|
||||
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
|
||||
|
||||
Roles
|
||||
-----
|
||||
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
|
||||
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
|
||||
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
|
||||
|
||||
General rules
|
||||
-------------
|
||||
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
|
||||
|
||||
* Do NOTs:
|
||||
|
||||
- Do not commit directly.
|
||||
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
|
||||
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
|
||||
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
|
||||
- Do not break playbooks. Always keep backwards compatibility in mind.
|
||||
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
|
||||
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
|
||||
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
|
||||
|
||||
* Do:
|
||||
|
||||
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
|
||||
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
|
||||
- Consider backwards compatibility (goes back to "do not break existing playbooks").
|
||||
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
|
||||
- Discuss with other committers, specially when you are unsure of something.
|
||||
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
|
||||
- Consider scope, sometimes a fix can be generalized.
|
||||
- Keep it simple, then things are maintainable, debuggable and intelligible.
|
||||
|
||||
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
|
||||
|
||||
|
||||
People
|
||||
------
|
||||
|
||||
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
|
||||
|
||||
| Name | GitHub ID | IRC Nick | Other |
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Alexei Znamensky | russoz | russoz | |
|
||||
| Amin Vakil | aminvakil | aminvakil | |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
sections:
|
||||
- title: Guides
|
||||
toctree:
|
||||
- filter_guide
|
||||
@@ -1,753 +0,0 @@
|
||||
.. _ansible_collections.community.general.docsite.filter_guide:
|
||||
|
||||
community.general Filter Guide
|
||||
==============================
|
||||
|
||||
The :ref:`community.general collection <plugins_in_community.general>` offers several useful filter plugins.
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
Paths
|
||||
-----
|
||||
|
||||
The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
# ansible-base 2.10 or newer:
|
||||
path: {{ ('/etc', path, 'subdir', file) | path_join }}
|
||||
|
||||
# Also works with Ansible 2.9:
|
||||
path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Abstract transformations
|
||||
------------------------
|
||||
|
||||
Dictionaries
|
||||
^^^^^^^^^^^^
|
||||
|
||||
You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a single-entry dictionary
|
||||
debug:
|
||||
msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
|
||||
vars:
|
||||
myvar: myvalue
|
||||
|
||||
- name: Create a list of dictionaries where the 'server' field is taken from a list
|
||||
debug:
|
||||
msg: >-
|
||||
{{ myservers | map('community.general.dict_kv', 'server')
|
||||
| map('combine', common_config) }}
|
||||
vars:
|
||||
common_config:
|
||||
type: host
|
||||
database: all
|
||||
myservers:
|
||||
- server1
|
||||
- server2
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a single-entry dictionary] **************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"thatsmyvar": "myvalue"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server1",
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"database": "all",
|
||||
"server": "server2",
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Create a dictionary with the dict function
|
||||
debug:
|
||||
msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
|
||||
|
||||
- name: Create a dictionary with the community.general.dict filter
|
||||
debug:
|
||||
msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
|
||||
|
||||
- name: Create a list of dictionaries with map and the community.general.dict filter
|
||||
debug:
|
||||
msg: >-
|
||||
{{ values | map('zip', ['k1', 'k2', 'k3'])
|
||||
| map('map', 'reverse')
|
||||
| map('community.general.dict') }}
|
||||
vars:
|
||||
values:
|
||||
- - foo
|
||||
- 23
|
||||
- a
|
||||
- - bar
|
||||
- 42
|
||||
- b
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a dictionary with the dict function] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a dictionary with the community.general.dict filter] ************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"1": 2,
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"k1": "foo",
|
||||
"k2": 23,
|
||||
"k3": "a"
|
||||
},
|
||||
{
|
||||
"k1": "bar",
|
||||
"k2": 42,
|
||||
"k3": "b"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
Grouping
|
||||
^^^^^^^^
|
||||
|
||||
If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
|
||||
|
||||
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Output mount facts grouped by device name
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('device')
|
||||
|
||||
- name: Output mount facts grouped by mount point
|
||||
debug:
|
||||
var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Output mount facts grouped by device name] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('device')": {
|
||||
"/dev/sda1": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
},
|
||||
"/dev/sda2": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "abcdef01-2345-6789-0abc-def012345678"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Output mount facts grouped by mount point] ******************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
|
||||
"/": {
|
||||
"block_available": 1234,
|
||||
"block_size": 4096,
|
||||
"block_total": 12345,
|
||||
"block_used": 11111,
|
||||
"device": "/dev/sda2",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 1111,
|
||||
"inode_total": 1234,
|
||||
"inode_used": 123,
|
||||
"mount": "/",
|
||||
"options": "rw,relatime",
|
||||
"size_available": 42143,
|
||||
"size_total": 543210,
|
||||
"uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
|
||||
},
|
||||
"/boot": {
|
||||
"block_available": 2000,
|
||||
"block_size": 4096,
|
||||
"block_total": 2345,
|
||||
"block_used": 345,
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"inode_available": 500,
|
||||
"inode_total": 512,
|
||||
"inode_used": 12,
|
||||
"mount": "/boot",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 56821,
|
||||
"size_total": 543210,
|
||||
"uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Merging lists of dictionaries
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Merge two lists by common attribute 'name'
|
||||
debug:
|
||||
var: list1 | community.general.lists_mergeby(list2, 'name')
|
||||
vars:
|
||||
list1:
|
||||
- name: foo
|
||||
extra: true
|
||||
- name: bar
|
||||
extra: false
|
||||
- name: meh
|
||||
extra: true
|
||||
list2:
|
||||
- name: foo
|
||||
path: /foo
|
||||
- name: baz
|
||||
path: /bazzz
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Merge two lists by common attribute 'name'] ****************************************
|
||||
ok: [localhost] => {
|
||||
"list1 | community.general.lists_mergeby(list2, 'name')": [
|
||||
{
|
||||
"extra": false,
|
||||
"name": "bar"
|
||||
},
|
||||
{
|
||||
"name": "baz",
|
||||
"path": "/bazzz"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "foo",
|
||||
"path": "/foo"
|
||||
},
|
||||
{
|
||||
"extra": true,
|
||||
"name": "meh"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
Working with times
|
||||
------------------
|
||||
|
||||
The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
|
||||
|
||||
There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
|
||||
|
||||
.. list-table:: Units
|
||||
:widths: 25 25 25 25
|
||||
:header-rows: 1
|
||||
|
||||
* - Unit name
|
||||
- Unit value in seconds
|
||||
- Unit strings for filter
|
||||
- Shorthand filter
|
||||
* - Millisecond
|
||||
- 1/1000 second
|
||||
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
|
||||
- ``to_milliseconds``
|
||||
* - Second
|
||||
- 1 second
|
||||
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
|
||||
- ``to_seconds``
|
||||
* - Minute
|
||||
- 60 seconds
|
||||
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
|
||||
- ``to_minutes``
|
||||
* - Hour
|
||||
- 60*60 seconds
|
||||
- ``h``, ``hour``, ``hours``
|
||||
- ``to_hours``
|
||||
* - Day
|
||||
- 24*60*60 seconds
|
||||
- ``d``, ``day``, ``days``
|
||||
- ``to_days``
|
||||
* - Week
|
||||
- 7*24*60*60 seconds
|
||||
- ``w``, ``week``, ``weeks``
|
||||
- ``to_weeks``
|
||||
* - Month
|
||||
- 30*24*60*60 seconds
|
||||
- ``mo``, ``month``, ``months``
|
||||
- ``to_months``
|
||||
* - Year
|
||||
- 365*24*60*60 seconds
|
||||
- ``y``, ``year``, ``years``
|
||||
- ``to_years``
|
||||
|
||||
Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Convert string to seconds
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
|
||||
|
||||
- name: Convert string to hours
|
||||
debug:
|
||||
msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
|
||||
|
||||
- name: Convert string to years (using 365.25 days == 1 year)
|
||||
debug:
|
||||
msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Convert string to seconds] **********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "109210.123"
|
||||
}
|
||||
|
||||
TASK [Convert string to hours] ************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "30.336145277778"
|
||||
}
|
||||
|
||||
TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
|
||||
ok: [localhost] => {
|
||||
"msg": "1.096851471595"
|
||||
}
|
||||
|
||||
.. versionadded: 0.2.0
|
||||
|
||||
Working with versions
|
||||
---------------------
|
||||
|
||||
If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Sort list by version number
|
||||
debug:
|
||||
var: ansible_versions | community.general.version_sort
|
||||
vars:
|
||||
ansible_versions:
|
||||
- '2.8.0'
|
||||
- '2.11.0'
|
||||
- '2.7.0'
|
||||
- '2.10.0'
|
||||
- '2.9.0'
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Sort list by version number] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"ansible_versions | community.general.version_sort": [
|
||||
"2.7.0",
|
||||
"2.8.0",
|
||||
"2.9.0",
|
||||
"2.10.0",
|
||||
"2.11.0"
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.2.0
|
||||
|
||||
Creating identifiers
|
||||
--------------------
|
||||
|
||||
The following filters allow to create identifiers.
|
||||
|
||||
Hashids
|
||||
^^^^^^^
|
||||
|
||||
`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create hashid"
|
||||
debug:
|
||||
msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
|
||||
|
||||
- name: "Decode hashid"
|
||||
debug:
|
||||
msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "jm2Cytn"
|
||||
}
|
||||
|
||||
TASK [Decode hashid] **********************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
1234,
|
||||
5,
|
||||
6
|
||||
]
|
||||
}
|
||||
|
||||
The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
|
||||
|
||||
:salt: String to use as salt when hashing.
|
||||
:alphabet: String of 16 or more unique characters to produce a hash.
|
||||
:min_length: Minimum length of hash produced.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Random MACs
|
||||
^^^^^^^^^^^
|
||||
|
||||
You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Create a random MAC starting with ff:"
|
||||
debug:
|
||||
msg: "{{ 'FF' | community.general.random_mac }}"
|
||||
|
||||
- name: "Create a random MAC starting with 00:11:22:"
|
||||
debug:
|
||||
msg: "{{ '00:11:22' | community.general.random_mac }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Create a random MAC starting with ff:] **********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "ff:69:d3:78:7f:b4"
|
||||
}
|
||||
|
||||
TASK [Create a random MAC starting with 00:11:22:] ****************************************
|
||||
ok: [localhost] => {
|
||||
"msg": "00:11:22:71:5d:3b"
|
||||
}
|
||||
|
||||
You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
"{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
|
||||
|
||||
Conversions
|
||||
-----------
|
||||
|
||||
Parsing CSV files
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: "Parse CSV from string"
|
||||
debug:
|
||||
msg: "{{ csv_string | community.general.from_csv }}"
|
||||
vars:
|
||||
csv_string: |
|
||||
foo,bar,baz
|
||||
1,2,3
|
||||
you,this,then
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Parse CSV from string] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"bar": "2",
|
||||
"baz": "3",
|
||||
"foo": "1"
|
||||
},
|
||||
{
|
||||
"bar": "this",
|
||||
"baz": "then",
|
||||
"foo": "you"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
The ``from_csv`` filter has several keyword arguments to control its behavior:
|
||||
|
||||
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
|
||||
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
|
||||
:delimiter: Sets the delimiter to use. Default depends on the dialect used.
|
||||
:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
|
||||
:strict: Set to ``true`` to error out on invalid CSV input.
|
||||
|
||||
.. versionadded: 3.0.0
|
||||
|
||||
Converting to JSON
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Run 'ls' to list files in /
|
||||
command: ls /
|
||||
register: result
|
||||
|
||||
- name: Parse the ls output
|
||||
debug:
|
||||
msg: "{{ result.stdout | community.general.jc('ls') }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Run 'ls' to list files in /] ********************************************************
|
||||
changed: [localhost]
|
||||
|
||||
TASK [Parse the ls output] ****************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": [
|
||||
{
|
||||
"filename": "bin"
|
||||
},
|
||||
{
|
||||
"filename": "boot"
|
||||
},
|
||||
{
|
||||
"filename": "dev"
|
||||
},
|
||||
{
|
||||
"filename": "etc"
|
||||
},
|
||||
{
|
||||
"filename": "home"
|
||||
},
|
||||
{
|
||||
"filename": "lib"
|
||||
},
|
||||
{
|
||||
"filename": "proc"
|
||||
},
|
||||
{
|
||||
"filename": "root"
|
||||
},
|
||||
{
|
||||
"filename": "run"
|
||||
},
|
||||
{
|
||||
"filename": "tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
.. _ansible_collections.community.general.docsite.json_query_filter:
|
||||
|
||||
Selecting JSON data: JSON queries
|
||||
---------------------------------
|
||||
|
||||
To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
|
||||
|
||||
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
|
||||
|
||||
Consider this data structure:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
{
|
||||
"domain_definition": {
|
||||
"domain": {
|
||||
"cluster": [
|
||||
{
|
||||
"name": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "cluster2"
|
||||
}
|
||||
],
|
||||
"server": [
|
||||
{
|
||||
"name": "server11",
|
||||
"cluster": "cluster1",
|
||||
"port": "8080"
|
||||
},
|
||||
{
|
||||
"name": "server12",
|
||||
"cluster": "cluster1",
|
||||
"port": "8090"
|
||||
},
|
||||
{
|
||||
"name": "server21",
|
||||
"cluster": "cluster2",
|
||||
"port": "9080"
|
||||
},
|
||||
{
|
||||
"name": "server22",
|
||||
"cluster": "cluster2",
|
||||
"port": "9090"
|
||||
}
|
||||
],
|
||||
"library": [
|
||||
{
|
||||
"name": "lib1",
|
||||
"target": "cluster1"
|
||||
},
|
||||
{
|
||||
"name": "lib2",
|
||||
"target": "cluster2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
To extract all clusters from this structure, you can use the following query:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all cluster names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
|
||||
|
||||
To extract all server names:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server names
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
|
||||
|
||||
To extract ports from cluster1:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
|
||||
|
||||
.. note:: You can use a variable to make the query more readable.
|
||||
|
||||
To print out the ports from cluster1 in a comma separated string:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1 as a string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
|
||||
|
||||
.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
|
||||
|
||||
You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
|
||||
|
||||
.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
|
||||
|
||||
To get a hash map with all ports and names of a cluster:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all server ports and names from cluster1
|
||||
ansible.builtin.debug:
|
||||
var: item
|
||||
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
|
||||
vars:
|
||||
server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
|
||||
|
||||
To extract ports from all clusters with name starting with 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?starts_with(name,'server1')].port"
|
||||
|
||||
To extract ports from all clusters with name containing 'server1':
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Display all ports from cluster1
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
|
||||
vars:
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.2.0
|
||||
version: 2.2.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
327
meta/runtime.yml
327
meta/runtime.yml
@@ -1,5 +1,31 @@
|
||||
---
|
||||
requires_ansible: '>=2.9.10'
|
||||
action_groups:
|
||||
ovirt:
|
||||
- ovirt_affinity_label_facts
|
||||
- ovirt_api_facts
|
||||
- ovirt_cluster_facts
|
||||
- ovirt_datacenter_facts
|
||||
- ovirt_disk_facts
|
||||
- ovirt_event_facts
|
||||
- ovirt_external_provider_facts
|
||||
- ovirt_group_facts
|
||||
- ovirt_host_facts
|
||||
- ovirt_host_storage_facts
|
||||
- ovirt_network_facts
|
||||
- ovirt_nic_facts
|
||||
- ovirt_permission_facts
|
||||
- ovirt_quota_facts
|
||||
- ovirt_scheduling_policy_facts
|
||||
- ovirt_snapshot_facts
|
||||
- ovirt_storage_domain_facts
|
||||
- ovirt_storage_template_facts
|
||||
- ovirt_storage_vm_facts
|
||||
- ovirt_tag_facts
|
||||
- ovirt_template_facts
|
||||
- ovirt_user_facts
|
||||
- ovirt_vm_facts
|
||||
- ovirt_vmpool_facts
|
||||
plugin_routing:
|
||||
connection:
|
||||
docker:
|
||||
@@ -11,24 +37,9 @@ plugin_routing:
|
||||
redirect: community.google.gcp_storage_file
|
||||
hashi_vault:
|
||||
redirect: community.hashi_vault.hashi_vault
|
||||
nios:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios lookup plugin has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_lookup instead.
|
||||
nios_next_ip:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_next_ip lookup plugin has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_next_ip instead.
|
||||
nios_next_network:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_next_network lookup plugin has been
|
||||
deprecated. Please use infoblox.nios_modules.nios_next_network instead.
|
||||
modules:
|
||||
ali_instance_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.ali_instance_info instead.
|
||||
docker_compose:
|
||||
@@ -130,13 +141,11 @@ plugin_routing:
|
||||
gcp_forwarding_rule:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule
|
||||
instead.
|
||||
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead.
|
||||
gcp_healthcheck:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check
|
||||
or google.cloud.gcp_compute_https_health_check instead.
|
||||
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead.
|
||||
gcp_target_proxy:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -147,22 +156,37 @@ plugin_routing:
|
||||
warning_text: Use google.cloud.gcp_compute_url_map instead.
|
||||
gcpubsub:
|
||||
redirect: community.google.gcpubsub
|
||||
gcpubsub_facts:
|
||||
tombstone:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.google.gcpubsub_info instead.
|
||||
gcpubsub_info:
|
||||
redirect: community.google.gcpubsub_info
|
||||
gcpubsub_facts:
|
||||
redirect: community.google.gcpubsub_info
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.google.gcpubsub_info instead.
|
||||
gcspanner:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance
|
||||
instead.
|
||||
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead.
|
||||
github_hooks:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use community.general.github_webhook and community.general.github_webhook_info
|
||||
instead.
|
||||
warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
|
||||
gluster_heal_info:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
|
||||
gluster_peer:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
|
||||
gluster_volume:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
|
||||
helm:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
||||
hetzner_failover_ip:
|
||||
redirect: community.hrobot.failover_ip
|
||||
hetzner_failover_ip_info:
|
||||
@@ -172,19 +196,15 @@ plugin_routing:
|
||||
hetzner_firewall_info:
|
||||
redirect: community.hrobot.firewall_info
|
||||
hpilo_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.hpilo_info instead.
|
||||
idrac_firmware:
|
||||
redirect: dellemc.openmanage.idrac_firmware
|
||||
idrac_redfish_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.idrac_redfish_info instead.
|
||||
idrac_server_config_profile:
|
||||
redirect: dellemc.openmanage.idrac_server_config_profile
|
||||
jenkins_job_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.jenkins_job_info instead.
|
||||
katello:
|
||||
@@ -204,25 +224,23 @@ plugin_routing:
|
||||
kubevirt_vm:
|
||||
redirect: community.kubevirt.kubevirt_vm
|
||||
ldap_attr:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.ldap_attrs instead.
|
||||
logicmonitor:
|
||||
tombstone:
|
||||
removal_version: 1.0.0
|
||||
warning_text: The logicmonitor_facts module is no longer maintained and the
|
||||
API used has been disabled in 2017.
|
||||
warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
|
||||
logicmonitor_facts:
|
||||
tombstone:
|
||||
removal_version: 1.0.0
|
||||
warning_text: The logicmonitor_facts module is no longer maintained and the
|
||||
API used has been disabled in 2017.
|
||||
warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
|
||||
memset_memstore_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.memset_memstore_info instead.
|
||||
memset_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.memset_server_info instead.
|
||||
na_cdot_aggregate:
|
||||
@@ -258,242 +276,159 @@ plugin_routing:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use netapp.ontap.na_ontap_volume instead.
|
||||
na_ontap_gather_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use netapp.ontap.na_ontap_info instead.
|
||||
nginx_status_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.nginx_status_info instead.
|
||||
nios_a_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_a_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_a_record instead.
|
||||
nios_aaaa_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_aaaa_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_aaaa_record instead.
|
||||
nios_cname_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_cname_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_cname_record instead.
|
||||
nios_dns_view:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_dns_view module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_dns_view instead.
|
||||
nios_fixed_address:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_fixed_address module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_fixed_address instead.
|
||||
nios_host_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_host_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_host_record instead.
|
||||
nios_member:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_member module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_member instead.
|
||||
nios_mx_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_mx_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_mx_record instead.
|
||||
nios_naptr_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_naptr_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_naptr_record instead.
|
||||
nios_network:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_network module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_network instead.
|
||||
nios_network_view:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_network_view module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_network_view instead.
|
||||
nios_nsgroup:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_nsgroup module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_nsgroup instead.
|
||||
nios_ptr_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_ptr_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_ptr_record instead.
|
||||
nios_srv_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_srv_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_srv_record instead.
|
||||
nios_txt_record:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_txt_record module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_txt_record instead.
|
||||
nios_zone:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios_zone module has been deprecated.
|
||||
Please use infoblox.nios_modules.nios_zone instead.
|
||||
ome_device_info:
|
||||
redirect: dellemc.openmanage.ome_device_info
|
||||
one_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.one_image_info instead.
|
||||
onepassword_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.onepassword_info instead.
|
||||
oneview_datacenter_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_datacenter_info instead.
|
||||
oneview_enclosure_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_enclosure_info instead.
|
||||
oneview_ethernet_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_ethernet_network_info instead.
|
||||
oneview_fc_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_fc_network_info instead.
|
||||
oneview_fcoe_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_fcoe_network_info instead.
|
||||
oneview_logical_interconnect_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_logical_interconnect_group_info
|
||||
instead.
|
||||
warning_text: Use community.general.oneview_logical_interconnect_group_info instead.
|
||||
oneview_network_set_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_network_set_info instead.
|
||||
oneview_san_manager_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_san_manager_info instead.
|
||||
online_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.online_server_info instead.
|
||||
online_user_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.online_user_info instead.
|
||||
ovirt:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm instead.
|
||||
ovirt_affinity_label_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead.
|
||||
ovirt_api_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_api_info instead.
|
||||
ovirt_cluster_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_cluster_info instead.
|
||||
ovirt_datacenter_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead.
|
||||
ovirt_disk_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_disk_info instead.
|
||||
ovirt_event_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_event_info instead.
|
||||
ovirt_external_provider_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead.
|
||||
ovirt_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_group_info instead.
|
||||
ovirt_host_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_info instead.
|
||||
ovirt_host_storage_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead.
|
||||
ovirt_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_network_info instead.
|
||||
ovirt_nic_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_nic_info instead.
|
||||
ovirt_permission_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_permission_info instead.
|
||||
ovirt_quota_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_quota_info instead.
|
||||
ovirt_scheduling_policy_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead.
|
||||
ovirt_snapshot_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead.
|
||||
ovirt_storage_domain_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead.
|
||||
ovirt_storage_template_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead.
|
||||
ovirt_storage_vm_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead.
|
||||
ovirt_tag_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_tag_info instead.
|
||||
ovirt_template_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_template_info instead.
|
||||
ovirt_user_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_user_info instead.
|
||||
ovirt_vm_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm_info instead.
|
||||
ovirt_vmpool_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
|
||||
postgresql_copy:
|
||||
@@ -536,52 +471,52 @@ plugin_routing:
|
||||
redirect: community.postgresql.postgresql_table
|
||||
postgresql_tablespace:
|
||||
redirect: community.postgresql.postgresql_tablespace
|
||||
postgresql_user:
|
||||
redirect: community.postgresql.postgresql_user
|
||||
postgresql_user_obj_stat_info:
|
||||
redirect: community.postgresql.postgresql_user_obj_stat_info
|
||||
postgresql_user:
|
||||
redirect: community.postgresql.postgresql_user
|
||||
purefa_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use purestorage.flasharray.purefa_info instead.
|
||||
purefb_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use purestorage.flashblade.purefb_info instead.
|
||||
python_requirements_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.python_requirements_info instead.
|
||||
redfish_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.redfish_info instead.
|
||||
scaleway_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_image_info instead.
|
||||
scaleway_ip_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_ip_info instead.
|
||||
scaleway_organization_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_organization_info instead.
|
||||
scaleway_security_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_security_group_info instead.
|
||||
scaleway_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_server_info instead.
|
||||
scaleway_snapshot_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_snapshot_info instead.
|
||||
scaleway_volume_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_volume_info instead.
|
||||
sf_account_manager:
|
||||
@@ -605,15 +540,15 @@ plugin_routing:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
|
||||
smartos_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.smartos_image_info instead.
|
||||
vertica_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.vertica_info instead.
|
||||
xenserver_guest_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.xenserver_guest_info instead.
|
||||
doc_fragments:
|
||||
@@ -627,11 +562,6 @@ plugin_routing:
|
||||
redirect: community.kubevirt.kubevirt_common_options
|
||||
kubevirt_vm_options:
|
||||
redirect: community.kubevirt.kubevirt_vm_options
|
||||
nios:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.nios document fragment has been deprecated.
|
||||
Please use infoblox.nios_modules.nios instead.
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
module_utils:
|
||||
@@ -649,33 +579,21 @@ plugin_routing:
|
||||
redirect: community.hrobot.robot
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
net_tools.nios.api:
|
||||
deprecation:
|
||||
removal_version: 5.0.0
|
||||
warning_text: The community.general.net_tools.nios.api module_utils has been
|
||||
deprecated. Please use infoblox.nios_modules.api instead.
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
remote_management.dellemc.dellemc_idrac:
|
||||
redirect: dellemc.openmanage.dellemc_idrac
|
||||
remote_management.dellemc.ome:
|
||||
redirect: dellemc.openmanage.ome
|
||||
callback:
|
||||
actionable:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
|
||||
= no' and 'display_ok_hosts = no' options.
|
||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options.
|
||||
full_skip:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
|
||||
= no' option.
|
||||
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option.
|
||||
stderr:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use the 'default' callback plugin with 'display_failed_stderr
|
||||
= yes' option.
|
||||
warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option.
|
||||
inventory:
|
||||
docker_machine:
|
||||
redirect: community.docker.docker_machine
|
||||
@@ -683,10 +601,3 @@ plugin_routing:
|
||||
redirect: community.docker.docker_swarm
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
filter:
|
||||
path_join:
|
||||
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
|
||||
# Since plugin routing is only available since ansible-base 2.10, this
|
||||
# redirect will be used for ansible-base 2.10 or later, and the included
|
||||
# path_join filter will be used for Ansible 2.9 or earlier.
|
||||
redirect: ansible.builtin.path_join
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
import time
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -40,27 +40,19 @@ class ActionModule(ActionBase):
|
||||
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
|
||||
"'ansible_timeout' (=%s) (recommended).")
|
||||
|
||||
def _async_result(self, async_status_args, task_vars, timeout):
|
||||
def _async_result(self, module_args, task_vars, timeout):
|
||||
'''
|
||||
Retrieve results of the asynchonous task, and display them in place of
|
||||
the async wrapper results (those with the ansible_job_id key).
|
||||
'''
|
||||
async_status = self._task.copy()
|
||||
async_status.args = async_status_args
|
||||
async_status.action = 'ansible.builtin.async_status'
|
||||
async_status.async_val = 0
|
||||
async_action = self._shared_loader_obj.action_loader.get(
|
||||
async_status.action, task=async_status, connection=self._connection,
|
||||
play_context=self._play_context, loader=self._loader, templar=self._templar,
|
||||
shared_loader_obj=self._shared_loader_obj)
|
||||
|
||||
if async_status.args['mode'] == 'cleanup':
|
||||
return async_action.run(task_vars=task_vars)
|
||||
|
||||
# At least one iteration is required, even if timeout is 0.
|
||||
for dummy in range(max(1, timeout)):
|
||||
async_result = async_action.run(task_vars=task_vars)
|
||||
if async_result.get('finished', 0) == 1:
|
||||
for i in range(max(1, timeout)):
|
||||
async_result = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=module_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
if async_result['finished'] == 1:
|
||||
break
|
||||
time.sleep(min(1, timeout))
|
||||
|
||||
@@ -84,6 +76,7 @@ class ActionModule(ActionBase):
|
||||
task_async = self._task.async_val
|
||||
check_mode = self._play_context.check_mode
|
||||
max_timeout = self._connection._play_context.timeout
|
||||
module_name = self._task.action
|
||||
module_args = self._task.args
|
||||
|
||||
if module_args.get('state', None) == 'restored':
|
||||
@@ -114,7 +107,7 @@ class ActionModule(ActionBase):
|
||||
# longer on the controller); and set a backup file path.
|
||||
module_args['_timeout'] = task_async
|
||||
module_args['_back'] = '%s/iptables.state' % async_dir
|
||||
async_status_args = dict(mode='status')
|
||||
async_status_args = dict(_async_dir=async_dir)
|
||||
confirm_cmd = 'rm -f %s' % module_args['_back']
|
||||
starter_cmd = 'touch %s.starter' % module_args['_back']
|
||||
remaining_time = max(task_async, max_timeout)
|
||||
@@ -140,7 +133,7 @@ class ActionModule(ActionBase):
|
||||
# The module is aware to not process the main iptables-restore
|
||||
# command before finding (and deleting) the 'starter' cookie on
|
||||
# the host, so the previous query will not reach ssh timeout.
|
||||
dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
|
||||
# As the main command is not yet executed on the target, here
|
||||
# 'finished' means 'failed before main command be executed'.
|
||||
@@ -150,7 +143,7 @@ class ActionModule(ActionBase):
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
for dummy in range(max_timeout):
|
||||
for x in range(max_timeout):
|
||||
time.sleep(1)
|
||||
remaining_time -= 1
|
||||
# - AnsibleConnectionFailure covers rejected requests (i.e.
|
||||
@@ -158,7 +151,7 @@ class ActionModule(ActionBase):
|
||||
# - ansible_timeout is able to cover dropped requests (due
|
||||
# to a rule or policy DROP) if not lower than async_val.
|
||||
try:
|
||||
dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
continue
|
||||
@@ -171,12 +164,16 @@ class ActionModule(ActionBase):
|
||||
del result[key]
|
||||
|
||||
if result.get('invocation', {}).get('module_args'):
|
||||
for key in ('_back', '_timeout', '_async_dir', 'jid'):
|
||||
if result['invocation']['module_args'].get(key):
|
||||
del result['invocation']['module_args'][key]
|
||||
if '_timeout' in result['invocation']['module_args']:
|
||||
del result['invocation']['module_args']['_back']
|
||||
del result['invocation']['module_args']['_timeout']
|
||||
|
||||
async_status_args['mode'] = 'cleanup'
|
||||
dummy = self._async_result(async_status_args, task_vars, 0)
|
||||
garbage = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=async_status_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
|
||||
if not wrap_async:
|
||||
# remove a temporary path we created
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
name: sudosu
|
||||
short_description: Run tasks using sudo su -
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
version_added: 2.4.0
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task.
|
||||
default: root
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_user
|
||||
- section: sudo_become_plugin
|
||||
key: user
|
||||
vars:
|
||||
- name: ansible_become_user
|
||||
- name: ansible_sudo_user
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_USER
|
||||
- name: ANSIBLE_SUDO_USER
|
||||
become_flags:
|
||||
description: Options to pass to C(sudo).
|
||||
default: -H -S -n
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_flags
|
||||
- section: sudo_become_plugin
|
||||
key: flags
|
||||
vars:
|
||||
- name: ansible_become_flags
|
||||
- name: ansible_sudo_flags
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_FLAGS
|
||||
- name: ANSIBLE_SUDO_FLAGS
|
||||
become_pass:
|
||||
description: Password to pass to C(sudo).
|
||||
required: false
|
||||
vars:
|
||||
- name: ansible_become_password
|
||||
- name: ansible_become_pass
|
||||
- name: ansible_sudo_pass
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_PASS
|
||||
- name: ANSIBLE_SUDO_PASS
|
||||
ini:
|
||||
- section: sudo_become_plugin
|
||||
key: password
|
||||
"""
|
||||
|
||||
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
class BecomeModule(BecomeBase):
|
||||
|
||||
name = 'community.general.sudosu'
|
||||
|
||||
# messages for detecting prompted password issues
|
||||
fail = ('Sorry, try again.',)
|
||||
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
|
||||
|
||||
def build_become_command(self, cmd, shell):
|
||||
super(BecomeModule, self).build_become_command(cmd, shell)
|
||||
|
||||
if not cmd:
|
||||
return cmd
|
||||
|
||||
becomecmd = 'sudo'
|
||||
|
||||
flags = self.get_option('become_flags') or ''
|
||||
prompt = ''
|
||||
if self.get_option('become_pass'):
|
||||
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
|
||||
if flags: # this could be simplified, but kept as is for now for backwards string matching
|
||||
flags = flags.replace('-n', '')
|
||||
prompt = '-p "%s"' % (self.prompt)
|
||||
|
||||
user = self.get_option('become_user') or ''
|
||||
if user:
|
||||
user = '%s' % (user)
|
||||
|
||||
return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
|
||||
14
plugins/cache/redis.py
vendored
14
plugins/cache/redis.py
vendored
@@ -61,7 +61,6 @@ DOCUMENTATION = '''
|
||||
type: integer
|
||||
'''
|
||||
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
|
||||
@@ -92,8 +91,6 @@ class CacheModule(BaseCacheModule):
|
||||
performance.
|
||||
"""
|
||||
_sentinel_service_name = None
|
||||
re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
|
||||
re_sent_conn = re.compile(r'^(.*):(\d+)$')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
uri = ''
|
||||
@@ -133,18 +130,11 @@ class CacheModule(BaseCacheModule):
|
||||
self._db = self._get_sentinel_connection(uri, kw)
|
||||
# normal connection
|
||||
else:
|
||||
connection = self._parse_connection(self.re_url_conn, uri)
|
||||
connection = uri.split(':')
|
||||
self._db = StrictRedis(*connection, **kw)
|
||||
|
||||
display.vv('Redis connection: %s' % self._db)
|
||||
|
||||
@staticmethod
|
||||
def _parse_connection(re_patt, uri):
|
||||
match = re_patt.match(uri)
|
||||
if not match:
|
||||
raise AnsibleError("Unable to parse connection string")
|
||||
return match.groups()
|
||||
|
||||
def _get_sentinel_connection(self, uri, kw):
|
||||
"""
|
||||
get sentinel connection details from _uri
|
||||
@@ -168,7 +158,7 @@ class CacheModule(BaseCacheModule):
|
||||
except IndexError:
|
||||
pass # password is optional
|
||||
|
||||
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
|
||||
sentinels = [tuple(shost.split(':')) for shost in connections]
|
||||
display.vv('\nUsing redis sentinels: %s' % sentinels)
|
||||
scon = Sentinel(sentinels, **kw)
|
||||
try:
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: loganalytics
|
||||
type: aggregate
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
- This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
|
||||
- Credits to authors of splunk callback plugin.
|
||||
version_added: "2.4.0"
|
||||
requirements:
|
||||
- Whitelisting this callback plugin.
|
||||
- An Azure log analytics work space has been established.
|
||||
options:
|
||||
workspace_id:
|
||||
description: Workspace ID of the Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_ID
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: workspace_id
|
||||
shared_key:
|
||||
description: Shared key to connect to Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_SHARED_KEY
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: shared_key
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Whitelist the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callback_whitelist = community.general.loganalytics
|
||||
Set the environment variable:
|
||||
export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
|
||||
export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
Or configure the plugin in ansible.cfg in the callback_loganalytics block:
|
||||
[callback_loganalytics]
|
||||
workspace_id = 01234567-0123-0123-0123-01234567890a
|
||||
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
'''
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import socket
|
||||
import getpass
|
||||
|
||||
from datetime import datetime
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class AzureLogAnalyticsSource(object):
|
||||
def __init__(self):
|
||||
self.ansible_check_mode = False
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = ""
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
self.user = getpass.getuser()
|
||||
self.extra_vars = ""
|
||||
|
||||
def __build_signature(self, date, workspace_id, shared_key, content_length):
|
||||
# Build authorisation signature for Azure log analytics API call
|
||||
sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
|
||||
str(content_length), date)
|
||||
utf8_sigs = sigs.encode('utf-8')
|
||||
decoded_shared_key = base64.b64decode(shared_key)
|
||||
hmac_sha256_sigs = hmac.new(
|
||||
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
|
||||
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
|
||||
signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
|
||||
return signature
|
||||
|
||||
def __build_workspace_url(self, workspace_id):
|
||||
return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
|
||||
|
||||
def __rfc1123date(self):
|
||||
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
|
||||
def send_event(self, workspace_id, shared_key, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
if result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = \
|
||||
result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
if result._task._role:
|
||||
ansible_role = str(result._task._role)
|
||||
else:
|
||||
ansible_role = None
|
||||
|
||||
data = {}
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
data['status'] = state
|
||||
data['timestamp'] = self.__rfc1123date()
|
||||
data['host'] = self.host
|
||||
data['user'] = self.user
|
||||
data['runtime'] = runtime
|
||||
data['ansible_version'] = self.ansible_version
|
||||
data['ansible_check_mode'] = self.ansible_check_mode
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_playbook'] = self.ansible_playbook
|
||||
data['ansible_role'] = ansible_role
|
||||
data['ansible_task'] = result._task_fields
|
||||
# Removing args since it can contain sensitive data
|
||||
if 'args' in data['ansible_task']:
|
||||
data['ansible_task'].pop('args')
|
||||
data['ansible_result'] = result._result
|
||||
if 'content' in data['ansible_result']:
|
||||
data['ansible_result'].pop('content')
|
||||
|
||||
# Adding extra vars info
|
||||
data['extra_vars'] = self.extra_vars
|
||||
|
||||
# Preparing the playbook logs as JSON format and send to Azure log analytics
|
||||
jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
|
||||
content_length = len(jsondata)
|
||||
rfc1123date = self.__rfc1123date()
|
||||
signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
|
||||
workspace_url = self.__build_workspace_url(workspace_id)
|
||||
|
||||
open_url(
|
||||
workspace_url,
|
||||
jsondata,
|
||||
headers={
|
||||
'content-type': 'application/json',
|
||||
'Authorization': signature,
|
||||
'Log-Type': 'ansible_playbook',
|
||||
'x-ms-date': rfc1123date
|
||||
},
|
||||
method='POST'
|
||||
)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.start_datetimes = {} # Collect task start times
|
||||
self.workspace_id = None
|
||||
self.shared_key = None
|
||||
self.loganalytics = AzureLogAnalyticsSource()
|
||||
|
||||
def _seconds_since_start(self, result):
|
||||
return (
|
||||
datetime.utcnow() -
|
||||
self.start_datetimes[result._task._uuid]
|
||||
).total_seconds()
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
self.workspace_id = self.get_option('workspace_id')
|
||||
self.shared_key = self.get_option('shared_key')
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
vm = play.get_variable_manager()
|
||||
extra_vars = vm.extra_vars
|
||||
self.loganalytics.extra_vars = extra_vars
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.loganalytics.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'OK',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def runner_on_async_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
0
plugins/connection/__init__.py
Normal file
0
plugins/connection/__init__.py
Normal file
@@ -62,7 +62,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local chroot based connections """
|
||||
''' Local chroot based connections '''
|
||||
|
||||
transport = 'community.general.chroot'
|
||||
has_pipelining = True
|
||||
@@ -95,7 +95,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the chroot """
|
||||
''' connect to the chroot '''
|
||||
if os.path.isabs(self.get_option('chroot_exe')):
|
||||
self.chroot_cmd = self.get_option('chroot_exe')
|
||||
else:
|
||||
@@ -110,17 +110,17 @@ class Connection(ConnectionBase):
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the chroot. This is only needed for implementing
|
||||
''' run a command on the chroot. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
executable = self.get_option('executable')
|
||||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % local_cmd, host=self.chroot)
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
@@ -128,17 +128,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the chroot """
|
||||
''' run a command on the chroot '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
@staticmethod
|
||||
def _prefix_login_path(remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -146,13 +145,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to chroot """
|
||||
''' transfer a file from local to chroot '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
@@ -178,7 +177,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from chroot to local """
|
||||
''' fetch a file from chroot to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
@@ -202,6 +201,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
@@ -37,14 +37,13 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Func-based connections """
|
||||
class Connection(object):
|
||||
''' Func-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
|
||||
@@ -53,7 +52,6 @@ class Connection(ConnectionBase):
|
||||
self.host = host
|
||||
# port is unused, this go on func
|
||||
self.port = port
|
||||
self.client = None
|
||||
|
||||
def connect(self, port=None):
|
||||
if not HAVE_FUNC:
|
||||
@@ -63,32 +61,31 @@ class Connection(ConnectionBase):
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
|
||||
""" run a command on the remote minion """
|
||||
''' run a command on the remote minion '''
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# totally ignores privlege escalation
|
||||
display.vvv("EXEC %s" % cmd, host=self.host)
|
||||
display.vvv("EXEC %s" % (cmd), host=self.host)
|
||||
p = self.client.command.run(cmd)[self.host]
|
||||
return p[0], p[1], p[2]
|
||||
return (p[0], p[1], p[2])
|
||||
|
||||
@staticmethod
|
||||
def _normalize_path(path, prefix):
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to remote """
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
self.client.local.copyfile.send(in_path, out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from remote to local """
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, '/')
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
@@ -101,5 +98,5 @@ class Connection(ConnectionBase):
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
||||
|
||||
@@ -40,7 +40,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(Jail):
|
||||
""" Local iocage based connections """
|
||||
''' Local iocage based connections '''
|
||||
|
||||
transport = 'community.general.iocage'
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import traceback
|
||||
import ansible.constants as C
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
@@ -46,7 +47,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local BSD Jail based connections """
|
||||
''' Local BSD Jail based connections '''
|
||||
|
||||
modified_jailname_key = 'conn_jail_name'
|
||||
|
||||
@@ -89,20 +90,20 @@ class Connection(ConnectionBase):
|
||||
return to_text(stdout, errors='surrogate_or_strict').split()
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the jail; nothing to do here """
|
||||
''' connect to the jail; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the jail. This is only needed for implementing
|
||||
''' run a command on the jail. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
|
||||
local_cmd = [self.jexec_cmd]
|
||||
set_env = ''
|
||||
@@ -122,17 +123,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the jail """
|
||||
''' run a command on the jail '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
@staticmethod
|
||||
def _prefix_login_path(remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -140,13 +140,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to jail """
|
||||
''' transfer a file from local to jail '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
@@ -172,7 +172,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from jail to local """
|
||||
''' fetch a file from jail to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
@@ -196,6 +196,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
@@ -42,13 +42,14 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible import errors
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local lxc based connections """
|
||||
''' Local lxc based connections '''
|
||||
|
||||
transport = 'community.general.lxc'
|
||||
has_pipelining = True
|
||||
@@ -61,7 +62,7 @@ class Connection(ConnectionBase):
|
||||
self.container = None
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the lxc; nothing to do here """
|
||||
''' connect to the lxc; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
|
||||
if not HAS_LIBLXC:
|
||||
@@ -76,8 +77,7 @@ class Connection(ConnectionBase):
|
||||
if self.container.state == "STOPPED":
|
||||
raise errors.AnsibleError("%s is not running" % self.container_name)
|
||||
|
||||
@staticmethod
|
||||
def _communicate(pid, in_data, stdin, stdout, stderr):
|
||||
def _communicate(self, pid, in_data, stdin, stdout, stderr):
|
||||
buf = {stdout: [], stderr: []}
|
||||
read_fds = [stdout, stderr]
|
||||
if in_data:
|
||||
@@ -111,7 +111,7 @@ class Connection(ConnectionBase):
|
||||
return fd
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the chroot """
|
||||
''' run a command on the chroot '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# python2-lxc needs bytes. python3-lxc needs text.
|
||||
|
||||
@@ -37,9 +37,15 @@ DOCUMENTATION = '''
|
||||
# - name: hosts
|
||||
'''
|
||||
|
||||
import shlex
|
||||
import shutil
|
||||
|
||||
import os
|
||||
import base64
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils._text import to_bytes
|
||||
import ansible.constants as C
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase, ensure_connect
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -16,11 +16,14 @@ DOCUMENTATION = '''
|
||||
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
||||
'''
|
||||
|
||||
import re
|
||||
import os
|
||||
import base64
|
||||
import pty
|
||||
import codecs
|
||||
import subprocess
|
||||
|
||||
from ansible import errors
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.module_utils.six.moves import cPickle
|
||||
|
||||
HAVE_SALTSTACK = False
|
||||
try:
|
||||
@@ -29,9 +32,13 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import os
|
||||
from ansible import errors
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Salt-based connections """
|
||||
''' Salt-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
# while the name of the product is salt, naming that module salt cause
|
||||
@@ -51,30 +58,29 @@ class Connection(ConnectionBase):
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, sudoable=False, in_data=None):
|
||||
""" run a command on the remote minion """
|
||||
''' run a command on the remote minion '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
self._display.vvv("EXEC %s" % cmd, host=self.host)
|
||||
self._display.vvv("EXEC %s" % (cmd), host=self.host)
|
||||
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
|
||||
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
|
||||
if self.host not in res:
|
||||
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
|
||||
|
||||
p = res[self.host]
|
||||
return p['retcode'], p['stdout'], p['stderr']
|
||||
return (p['retcode'], p['stdout'], p['stderr'])
|
||||
|
||||
@staticmethod
|
||||
def _normalize_path(path, prefix):
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to remote """
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
@@ -82,11 +88,11 @@ class Connection(ConnectionBase):
|
||||
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
with open(in_path, 'rb') as in_fh:
|
||||
content = in_fh.read()
|
||||
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
|
||||
self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
|
||||
|
||||
# TODO test it
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from remote to local """
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
@@ -96,5 +102,5 @@ class Connection(ConnectionBase):
|
||||
open(out_path, 'wb').write(content)
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
||||
|
||||
@@ -31,6 +31,7 @@ import os.path
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_bytes
|
||||
@@ -41,7 +42,7 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
""" Local zone based connections """
|
||||
''' Local zone based connections '''
|
||||
|
||||
transport = 'community.general.zone'
|
||||
has_pipelining = True
|
||||
@@ -74,9 +75,9 @@ class Connection(ConnectionBase):
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
zones = []
|
||||
for line in process.stdout.readlines():
|
||||
for l in process.stdout.readlines():
|
||||
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
|
||||
s = line.split(':')
|
||||
s = l.split(':')
|
||||
if s[1] != 'global':
|
||||
zones.append(s[1])
|
||||
|
||||
@@ -94,20 +95,20 @@ class Connection(ConnectionBase):
|
||||
return path + '/root'
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the zone; nothing to do here """
|
||||
''' connect to the zone; nothing to do here '''
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
""" run a command on the zone. This is only needed for implementing
|
||||
''' run a command on the zone. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
"""
|
||||
'''
|
||||
# NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
|
||||
# this through /bin/sh -c here. Instead it goes through the shell
|
||||
# that zlogin selects.
|
||||
@@ -121,16 +122,16 @@ class Connection(ConnectionBase):
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the zone """
|
||||
''' run a command on the zone '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return p.returncode, stdout, stderr
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
""" Make sure that we put files into a standard path
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
@@ -138,13 +139,13 @@ class Connection(ConnectionBase):
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
"""
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" transfer a file from local to zone """
|
||||
''' transfer a file from local to zone '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
|
||||
|
||||
@@ -170,7 +171,7 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" fetch a file from zone to local """
|
||||
''' fetch a file from zone to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
|
||||
|
||||
@@ -194,6 +195,6 @@ class Connection(ConnectionBase):
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
""" terminate the connection; nothing to do here """
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
0
plugins/doc_fragments/__init__.py
Normal file
0
plugins/doc_fragments/__init__.py
Normal file
@@ -30,6 +30,7 @@ options:
|
||||
description:
|
||||
- Keycloak realm name to authenticate to for API access.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
auth_client_secret:
|
||||
description:
|
||||
@@ -40,6 +41,7 @@ options:
|
||||
description:
|
||||
- Username to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- username
|
||||
|
||||
@@ -47,15 +49,10 @@ options:
|
||||
description:
|
||||
- Password to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- password
|
||||
|
||||
token:
|
||||
description:
|
||||
- Authentication token for Keycloak API.
|
||||
type: str
|
||||
version_added: 3.0.0
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- Verify TLS certificates (do not disable this in production).
|
||||
|
||||
@@ -13,32 +13,12 @@ class ModuleDocFragment(object):
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
config:
|
||||
description:
|
||||
description:
|
||||
- Path to a .json configuration file containing the OneView client configuration.
|
||||
The configuration file is optional and when used should be present in the host running the ansible commands.
|
||||
If the file path is not provided, the configuration will be loaded from environment variables.
|
||||
For links to example configuration files or how to use the environment variables verify the notes section.
|
||||
type: path
|
||||
api_version:
|
||||
description:
|
||||
- OneView API Version.
|
||||
type: int
|
||||
image_streamer_hostname:
|
||||
description:
|
||||
- IP address or hostname for the HPE Image Streamer REST API.
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
- IP address or hostname for the appliance.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Username for API authentication.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Password for API authentication.
|
||||
type: str
|
||||
type: path
|
||||
|
||||
requirements:
|
||||
- python >= 2.7.9
|
||||
|
||||
59
plugins/doc_fragments/ovirt_facts.py
Normal file
59
plugins/doc_fragments/ovirt_facts.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# info standard oVirt documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
fetch_nested:
|
||||
description:
|
||||
- If I(yes) the module will fetch additional data from the API.
|
||||
- It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
|
||||
Only the attributes of the current entity. User can configure to fetch other
|
||||
attributes of the nested entities by specifying C(nested_attributes).
|
||||
type: bool
|
||||
default: false
|
||||
nested_attributes:
|
||||
description:
|
||||
- Specifies list of the attributes which should be fetched from the API.
|
||||
- This parameter apply only when C(fetch_nested) is I(true).
|
||||
type: list
|
||||
auth:
|
||||
description:
|
||||
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
|
||||
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
|
||||
Default value is set by I(OVIRT_USERNAME) environment variable.
|
||||
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
|
||||
- "C(url)- A string containing the API URL of the server, usually
|
||||
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
|
||||
Either C(url) or C(hostname) is required."
|
||||
- "C(hostname) - A string containing the hostname of the server, usually
|
||||
something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
|
||||
Either C(url) or C(hostname) is required."
|
||||
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
|
||||
- "C(insecure) - A boolean flag that indicates if the server TLS
|
||||
certificate and host name should be checked."
|
||||
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
|
||||
certificate presented by the server will be verified using these CA
|
||||
certificates. If `C(ca_file)` parameter is not set, system wide
|
||||
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
|
||||
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
|
||||
should be used instead of the default basic authentication."
|
||||
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
|
||||
type: dict
|
||||
required: true
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- ovirt-engine-sdk-python >= 4.3.0
|
||||
notes:
|
||||
- "In order to use this module you have to install oVirt Python SDK.
|
||||
To ensure it's installed with correct version you can create the following task:
|
||||
ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0"
|
||||
'''
|
||||
@@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
pritunl_url:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- URL and port of the Pritunl server on which the API is enabled.
|
||||
|
||||
pritunl_api_token:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Token of a Pritunl admin user.
|
||||
- It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
|
||||
|
||||
pritunl_api_secret:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Secret found in Administrators > USERNAME > API Secret.
|
||||
|
||||
validate_certs:
|
||||
type: bool
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- If certificates should be validated or not.
|
||||
- This should never be set to C(false), except if you are very sure that
|
||||
your connection to the server can not be subject to a Man In The Middle
|
||||
attack.
|
||||
"""
|
||||
0
plugins/filter/__init__.py
Normal file
0
plugins/filter/__init__.py
Normal file
@@ -1,24 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def dict_filter(sequence):
|
||||
'''Convert a list of tuples to a dictionary.
|
||||
|
||||
Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
|
||||
'''
|
||||
return dict(sequence)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'dict': dict_filter,
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||
DialectNotAvailableError,
|
||||
CustomDialectFailureError)
|
||||
|
||||
|
||||
def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
|
||||
|
||||
dialect_params = {
|
||||
"delimiter": delimiter,
|
||||
"skipinitialspace": skipinitialspace,
|
||||
"strict": strict,
|
||||
}
|
||||
|
||||
try:
|
||||
dialect = initialize_dialect(dialect, **dialect_params)
|
||||
except (CustomDialectFailureError, DialectNotAvailableError) as e:
|
||||
raise AnsibleFilterError(to_native(e))
|
||||
|
||||
reader = read_csv(data, dialect, fieldnames)
|
||||
|
||||
data_list = []
|
||||
|
||||
try:
|
||||
for row in reader:
|
||||
data_list.append(row)
|
||||
except CSVError as e:
|
||||
raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
|
||||
|
||||
return data_list
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'from_csv': from_csv
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
|
||||
|
||||
def groupby_as_dict(sequence, attribute):
|
||||
'''
|
||||
Given a sequence of dictionaries and an attribute name, returns a dictionary mapping
|
||||
the value of this attribute to the dictionary.
|
||||
|
||||
If multiple dictionaries in the sequence have the same value for this attribute,
|
||||
the filter will fail.
|
||||
'''
|
||||
if not isinstance(sequence, Sequence):
|
||||
raise AnsibleFilterError('Input is not a sequence')
|
||||
|
||||
result = dict()
|
||||
for list_index, element in enumerate(sequence):
|
||||
if not isinstance(element, Mapping):
|
||||
raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index))
|
||||
if attribute not in element:
|
||||
raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index))
|
||||
result_index = element[attribute]
|
||||
if result_index in result:
|
||||
raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index))
|
||||
result[result_index] = element
|
||||
return result
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Ansible list filters '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'groupby_as_dict': groupby_as_dict,
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import (
|
||||
AnsibleError,
|
||||
AnsibleFilterError,
|
||||
AnsibleFilterTypeError,
|
||||
)
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
|
||||
try:
|
||||
from hashids import Hashids
|
||||
HAS_HASHIDS = True
|
||||
except ImportError:
|
||||
HAS_HASHIDS = False
|
||||
|
||||
|
||||
def initialize_hashids(**kwargs):
|
||||
if not HAS_HASHIDS:
|
||||
raise AnsibleError("The hashids library must be installed in order to use this plugin")
|
||||
|
||||
params = dict((k, v) for k, v in kwargs.items() if v)
|
||||
|
||||
try:
|
||||
return Hashids(**params)
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterError(
|
||||
"The provided parameters %s are invalid: %s" % (
|
||||
', '.join(["%s=%s" % (k, v) for k, v in params.items()]),
|
||||
to_native(e)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def hashids_encode(nums, salt=None, alphabet=None, min_length=None):
|
||||
"""Generates a YouTube-like hash from a sequence of ints
|
||||
|
||||
:nums: Sequence of one or more ints to hash
|
||||
:salt: String to use as salt when hashing
|
||||
:alphabet: String of 16 or more unique characters to produce a hash
|
||||
:min_length: Minimum length of hash produced
|
||||
"""
|
||||
|
||||
hashids = initialize_hashids(
|
||||
salt=salt,
|
||||
alphabet=alphabet,
|
||||
min_length=min_length
|
||||
)
|
||||
|
||||
# Handles the case where a single int is not encapsulated in a list or tuple.
|
||||
# User convenience seems preferable to strict typing in this case
|
||||
# Also avoids obfuscated error messages related to single invalid inputs
|
||||
if not is_sequence(nums):
|
||||
nums = [nums]
|
||||
|
||||
try:
|
||||
hashid = hashids.encode(*nums)
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterTypeError(
|
||||
"Data to encode must by a tuple or list of ints: %s" % to_native(e)
|
||||
)
|
||||
|
||||
return hashid
|
||||
|
||||
|
||||
def hashids_decode(hashid, salt=None, alphabet=None, min_length=None):
|
||||
"""Decodes a YouTube-like hash to a sequence of ints
|
||||
|
||||
:hashid: Hash string to decode
|
||||
:salt: String to use as salt when hashing
|
||||
:alphabet: String of 16 or more unique characters to produce a hash
|
||||
:min_length: Minimum length of hash produced
|
||||
"""
|
||||
|
||||
hashids = initialize_hashids(
|
||||
salt=salt,
|
||||
alphabet=alphabet,
|
||||
min_length=min_length
|
||||
)
|
||||
nums = hashids.decode(hashid)
|
||||
return list(nums)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'hashids_encode': hashids_encode,
|
||||
'hashids_decode': hashids_decode,
|
||||
}
|
||||
@@ -35,11 +35,9 @@ def json_query(data, expr):
|
||||
raise AnsibleError('You need to install "jmespath" prior to running '
|
||||
'json_query filter')
|
||||
|
||||
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
|
||||
# Hack to handle Ansible String Types
|
||||
# See issue: https://github.com/ansible-collections/community.general/issues/320
|
||||
jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
|
||||
jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
|
||||
jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
|
||||
try:
|
||||
return jmespath.search(expr, data)
|
||||
except jmespath.exceptions.JMESPathError as e:
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os.path
|
||||
|
||||
|
||||
def path_join(list):
|
||||
'''Join list of paths.
|
||||
|
||||
This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10.
|
||||
This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details.
|
||||
'''
|
||||
return os.path.join(*list)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'path_join': path_join,
|
||||
}
|
||||
0
plugins/inventory/__init__.py
Normal file
0
plugins/inventory/__init__.py
Normal file
@@ -1,950 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Frank Dornheim <dornheim@posteo.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: lxd
|
||||
short_description: Returns Ansible inventory from lxd host
|
||||
description:
|
||||
- Get inventory from the lxd.
|
||||
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
||||
version_added: "3.0.0"
|
||||
author: "Frank Dornheim (@conloos)"
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||
required: true
|
||||
choices: [ 'community.general.lxd' ]
|
||||
url:
|
||||
description:
|
||||
- The unix domain socket path or the https URL for the lxd server.
|
||||
- Sockets in filesystem have to start with C(unix:).
|
||||
- Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
|
||||
default: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
type: str
|
||||
client_key:
|
||||
description:
|
||||
- The client certificate key file path.
|
||||
aliases: [ key_file ]
|
||||
default: $HOME/.config/lxc/client.key
|
||||
type: path
|
||||
client_cert:
|
||||
description:
|
||||
- The client certificate file path.
|
||||
aliases: [ cert_file ]
|
||||
default: $HOME/.config/lxc/client.crt
|
||||
type: path
|
||||
trust_password:
|
||||
description:
|
||||
- The client trusted password.
|
||||
- You need to set this password on the lxd server before
|
||||
running this module using the following command
|
||||
C(lxc config set core.trust_password <some random password>)
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
|
||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||
type: str
|
||||
state:
|
||||
description: Filter the container according to the current status.
|
||||
type: str
|
||||
default: none
|
||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||
prefered_container_network_interface:
|
||||
description:
|
||||
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||
type: str
|
||||
default: eth
|
||||
prefered_container_network_family:
|
||||
description:
|
||||
- If a container has multiple network interfaces, which one is the prefered by family.
|
||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||
type: str
|
||||
default: inet
|
||||
choices: [ 'inet', 'inet6' ]
|
||||
groupby:
|
||||
description:
|
||||
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
||||
- See example for syntax.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# simple lxd.yml
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
|
||||
# simple lxd.yml including filter
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
state: RUNNING
|
||||
|
||||
# grouping lxd.yml
|
||||
groupby:
|
||||
testpattern:
|
||||
type: pattern
|
||||
attribute: test
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
locationBerlin:
|
||||
type: location
|
||||
attribute: Berlin
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
releaseFocal:
|
||||
type: release
|
||||
attribute: focal
|
||||
releaseBionic:
|
||||
type: release
|
||||
attribute: bionic
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
netRangeIPv4:
|
||||
type: network_range
|
||||
attribute: 10.98.143.0/24
|
||||
netRangeIPv6:
|
||||
type: network_range
|
||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||
'''
|
||||
|
||||
import binascii
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import os
|
||||
import socket
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
|
||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
DEBUG = 4
|
||||
NAME = 'community.general.lxd'
|
||||
SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket'
|
||||
SOCKET_URL = 'unix:/var/lib/lxd/unix.socket'
|
||||
|
||||
@staticmethod
|
||||
def load_json_data(path):
|
||||
"""Load json data
|
||||
|
||||
Load json data from file
|
||||
|
||||
Args:
|
||||
list(path): Path elements
|
||||
str(file_name): Filename of data
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(json_data): json data"""
|
||||
try:
|
||||
with open(path, 'r') as json_file:
|
||||
return json.load(json_file)
|
||||
except (IOError, json.decoder.JSONDecodeError) as err:
|
||||
raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
|
||||
|
||||
def save_json_data(self, path, file_name=None):
|
||||
"""save data as json
|
||||
|
||||
Save data as json file
|
||||
|
||||
Args:
|
||||
list(path): Path elements
|
||||
str(file_name): Filename of data
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
if file_name:
|
||||
path.append(file_name)
|
||||
else:
|
||||
prefix = 'lxd_data-'
|
||||
time_stamp = time.strftime('%Y%m%d-%H%M%S')
|
||||
suffix = '.atd'
|
||||
path.append(prefix + time_stamp + suffix)
|
||||
|
||||
try:
|
||||
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||
with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
|
||||
json.dump(self.data, json_file)
|
||||
except IOError as err:
|
||||
raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Check the config
|
||||
|
||||
Return true/false if the config-file is valid for this plugin
|
||||
|
||||
Args:
|
||||
str(path): path to the config
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
bool(valid): is valid"""
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('lxd.yaml', 'lxd.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"')
|
||||
return valid
|
||||
|
||||
@staticmethod
|
||||
def validate_url(url):
|
||||
"""validate url
|
||||
|
||||
check whether the url is correctly formatted
|
||||
|
||||
Args:
|
||||
url
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleError
|
||||
Returns:
|
||||
bool"""
|
||||
if not isinstance(url, str):
|
||||
return False
|
||||
if not url.startswith(('unix:', 'https:')):
|
||||
raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
|
||||
return True
|
||||
|
||||
def _connect_to_socket(self):
|
||||
"""connect to lxd socket
|
||||
|
||||
Connect to lxd socket by provided url or defaults
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleError
|
||||
Returns:
|
||||
None"""
|
||||
error_storage = {}
|
||||
url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL]
|
||||
urls = (url for url in url_list if self.validate_url(url))
|
||||
for url in urls:
|
||||
try:
|
||||
socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
|
||||
return socket_connection
|
||||
except LXDClientException as err:
|
||||
error_storage[url] = err
|
||||
raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
|
||||
|
||||
def _get_networks(self):
|
||||
"""Get Networknames
|
||||
|
||||
Returns all network config names
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all network_configs"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/networks/lxdbr0']}
|
||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||
|
||||
def _get_containers(self):
|
||||
"""Get Containernames
|
||||
|
||||
Returns all containernames
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all containers"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
||||
containers = self.socket.do('GET', '/1.0/containers')
|
||||
return [m.split('/')[3] for m in containers['metadata']]
|
||||
|
||||
def _get_config(self, branch, name):
|
||||
"""Get inventory of container
|
||||
|
||||
Get config of container
|
||||
|
||||
Args:
|
||||
str(branch): Name oft the API-Branch
|
||||
str(name): Name of Container
|
||||
Kwargs:
|
||||
None
|
||||
Source:
|
||||
https://github.com/lxc/lxd/blob/master/doc/rest-api.md
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(config): Config of the container"""
|
||||
config = {}
|
||||
if isinstance(branch, (tuple, list)):
|
||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||
else:
|
||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||
return config
|
||||
|
||||
def get_container_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = ['containers', ('instances', 'state')]
|
||||
container_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
container_config['containers'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(container_config, self.data)
|
||||
|
||||
def get_network_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = [('networks', 'state')]
|
||||
network_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
try:
|
||||
network_config['networks'] = self._get_config(branch, name)
|
||||
except LXDClientException:
|
||||
network_config['networks'] = {name: None}
|
||||
self.data = dict_merge(network_config, self.data)
|
||||
|
||||
def extract_network_information_from_container_config(self, container_name):
|
||||
"""Returns the network interface configuration
|
||||
|
||||
Returns the network ipv4 and ipv6 config of the container without local-link
|
||||
|
||||
Args:
|
||||
str(container_name): Name oft he container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(network_configuration): network config"""
|
||||
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
||||
network_configuration = None
|
||||
if container_network_interfaces:
|
||||
network_configuration = {}
|
||||
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
||||
for interface_name in gen_interface_names:
|
||||
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
network_configuration[interface_name] = []
|
||||
for address in gen_address:
|
||||
address_set = {}
|
||||
address_set['family'] = address.get('family')
|
||||
address_set['address'] = address.get('address')
|
||||
address_set['netmask'] = address.get('netmask')
|
||||
address_set['combined'] = address.get('address') + '/' + address.get('netmask')
|
||||
network_configuration[interface_name].append(address_set)
|
||||
return network_configuration
|
||||
|
||||
def get_prefered_container_network_interface(self, container_name):
|
||||
"""Helper to get the prefered interface of thr container
|
||||
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
str(prefered_interface): None or interface name"""
|
||||
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
prefered_interface = None # init
|
||||
if container_network_interfaces: # container have network interfaces
|
||||
# generator if interfaces which start with the desired pattern
|
||||
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
||||
selected_interfaces = [] # init
|
||||
for interface in net_generator:
|
||||
selected_interfaces.append(interface)
|
||||
if len(selected_interfaces) > 0:
|
||||
prefered_interface = sorted(selected_interfaces)[0]
|
||||
return prefered_interface
|
||||
|
||||
def get_container_vlans(self, container_name):
|
||||
"""Get VLAN(s) from container
|
||||
|
||||
Helper to get the VLAN_ID from the container
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# get network device configuration and store {network: vlan_id}
|
||||
network_vlans = {}
|
||||
for network in self._get_data_entry('networks'):
|
||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||
|
||||
# get networkdevices of container and return
|
||||
# e.g.
|
||||
# "eth0":{ "name":"eth0",
|
||||
# "network":"lxdbr0",
|
||||
# "type":"nic"},
|
||||
vlan_ids = {}
|
||||
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
||||
for device in devices:
|
||||
if 'network' in devices[device]:
|
||||
if devices[device]['network'] in network_vlans:
|
||||
vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')]
|
||||
return vlan_ids if vlan_ids else None
|
||||
|
||||
def _get_data_entry(self, path, data=None, delimiter='/'):
|
||||
"""Helper to get data
|
||||
|
||||
Helper to get data from self.data by a path like 'path/to/target'
|
||||
Attention: Escaping of the delimiter is not (yet) provided.
|
||||
|
||||
Args:
|
||||
str(path): path to nested dict
|
||||
Kwargs:
|
||||
dict(data): datastore
|
||||
str(delimiter): delimiter in Path.
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
*(value)"""
|
||||
try:
|
||||
if not data:
|
||||
data = self.data
|
||||
if delimiter in path:
|
||||
path = path.split(delimiter)
|
||||
|
||||
if isinstance(path, list) and len(path) > 1:
|
||||
data = data[path.pop(0)]
|
||||
path = delimiter.join(path)
|
||||
return self._get_data_entry(path, data, delimiter) # recursion
|
||||
return data[path]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def _set_data_entry(self, container_name, key, value, path=None):
|
||||
"""Helper to save data
|
||||
|
||||
Helper to save the data in self.data
|
||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
str(key): same as dict
|
||||
*(value): same as dict
|
||||
Kwargs:
|
||||
str(path): path to branch-part
|
||||
Raises:
|
||||
AnsibleParserError
|
||||
Returns:
|
||||
None"""
|
||||
if not path:
|
||||
path = self.data['inventory']
|
||||
if container_name not in path:
|
||||
path[container_name] = {}
|
||||
|
||||
try:
|
||||
if isinstance(value, dict) and key in path[container_name]:
|
||||
path[container_name] = dict_merge(value, path[container_name][key])
|
||||
else:
|
||||
path[container_name][key] = value
|
||||
except KeyError as err:
|
||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||
|
||||
def extract_information_from_container_configs(self):
|
||||
"""Process configuration information
|
||||
|
||||
Preparation of the data
|
||||
|
||||
Args:
|
||||
dict(configs): Container configurations
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# create branch "inventory"
|
||||
if 'inventory' not in self.data:
|
||||
self.data['inventory'] = {}
|
||||
|
||||
for container_name in self.data['containers']:
|
||||
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/location'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
||||
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
||||
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
||||
|
||||
def build_inventory_network(self, container_name):
|
||||
"""Add the network interfaces of the container to the inventory
|
||||
|
||||
Logic:
|
||||
- if the container have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def interface_selection(container_name):
|
||||
"""Select container Interface for inventory
|
||||
|
||||
Logic:
|
||||
- get preferred_interface & prefered_container_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(interface_name: ip)"""
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
||||
prefered_container_network_family = self.prefered_container_network_family
|
||||
|
||||
ip_address = ''
|
||||
if prefered_interface:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
else:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
return ip_address
|
||||
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
||||
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
||||
else:
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
||||
|
||||
def build_inventory_hosts(self):
|
||||
"""Build host-part dynamic inventory
|
||||
|
||||
Build the host-part of the dynamic inventory.
|
||||
Add Hosts and host_vars to the inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
for container_name in self.data['inventory']:
|
||||
# Only consider containers that match the "state" filter, if self.state is not None
|
||||
if self.filter:
|
||||
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
||||
continue
|
||||
# add container
|
||||
self.inventory.add_host(container_name)
|
||||
# add network informations
|
||||
self.build_inventory_network(container_name)
|
||||
# add os
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
||||
# add release
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
||||
# add profile
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
||||
# add state
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
||||
# add location information
|
||||
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
||||
# add VLAN_ID information
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
||||
|
||||
def build_inventory_groups_location(self, group_name):
|
||||
"""create group by attribute: location
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_pattern(self, group_name):
|
||||
"""create group by name pattern
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
regex_pattern = self.groupby[group_name].get('attribute')
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, container_name)
|
||||
if result:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_network_range(self, group_name):
|
||||
"""check if IP is in network-class
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
try:
|
||||
network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
|
||||
except ValueError as err:
|
||||
raise AnsibleParserError(
|
||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
||||
try:
|
||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||
if address.version == network.version and address in network:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
except ValueError:
|
||||
# Ignore invalid IP addresses returned by lxd
|
||||
pass
|
||||
|
||||
def build_inventory_groups_os(self, group_name):
|
||||
"""create group by attribute: os
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
Noneself.data['inventory'][container_name][interface]
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_release(self, group_name):
|
||||
"""create group by attribute: release
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_profile(self, group_name):
|
||||
"""create group by attribute: profile
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_vlanid(self, group_name):
|
||||
"""create group by attribute: vlanid
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups(self):
|
||||
"""Build group-part dynamic inventory
|
||||
|
||||
Build the group-part of the dynamic inventory.
|
||||
Add groups to the inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def group_type(group_name):
|
||||
"""create groups defined by lxd.yml or defaultvalues
|
||||
|
||||
create groups defined by lxd.yml or defaultvalues
|
||||
supportetd:
|
||||
* 'location'
|
||||
* 'pattern'
|
||||
* 'network_range'
|
||||
* 'os'
|
||||
* 'release'
|
||||
* 'profile'
|
||||
* 'vlanid'
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
# Due to the compatibility with python 2 no use of map
|
||||
if self.groupby[group_name].get('type') == 'location':
|
||||
self.build_inventory_groups_location(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'pattern':
|
||||
self.build_inventory_groups_pattern(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'network_range':
|
||||
self.build_inventory_groups_network_range(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'os':
|
||||
self.build_inventory_groups_os(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'release':
|
||||
self.build_inventory_groups_release(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'profile':
|
||||
self.build_inventory_groups_profile(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||
self.build_inventory_groups_vlanid(group_name)
|
||||
else:
|
||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||
|
||||
if self.groupby:
|
||||
for group_name in self.groupby:
|
||||
if not group_name.isalnum():
|
||||
raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
|
||||
group_type(group_name)
|
||||
|
||||
def build_inventory(self):
|
||||
"""Build dynamic inventory
|
||||
|
||||
Build the dynamic inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
self.build_inventory_hosts()
|
||||
self.build_inventory_groups()
|
||||
|
||||
def _populate(self):
|
||||
"""Return the hosts and groups
|
||||
|
||||
Returns the processed container configurations from the lxd import
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||
self.socket = self._connect_to_socket()
|
||||
self.get_container_data(self._get_containers())
|
||||
self.get_network_data(self._get_networks())
|
||||
|
||||
self.extract_information_from_container_configs()
|
||||
|
||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||
|
||||
self.build_inventory()
|
||||
|
||||
def parse(self, inventory, loader, path, cache):
|
||||
"""Return dynamic inventory from source
|
||||
|
||||
Returns the processed inventory from the lxd import
|
||||
|
||||
Args:
|
||||
str(inventory): inventory object with existing data and
|
||||
the methods to add hosts/groups/variables
|
||||
to inventory
|
||||
str(loader): Ansible's DataLoader
|
||||
str(path): path to the config
|
||||
bool(cache): use or avoid caches
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleParserError
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
||||
# Read the inventory YAML file
|
||||
self._read_config_data(path)
|
||||
try:
|
||||
self.client_key = self.get_option('client_key')
|
||||
self.client_cert = self.get_option('client_cert')
|
||||
self.debug = self.DEBUG
|
||||
self.data = {} # store for inventory-data
|
||||
self.groupby = self.get_option('groupby')
|
||||
self.plugin = self.get_option('plugin')
|
||||
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
||||
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||
self.filter = None
|
||||
else:
|
||||
self.filter = self.get_option('state').lower()
|
||||
self.trust_password = self.get_option('trust_password')
|
||||
self.url = self.get_option('url')
|
||||
except Exception as err:
|
||||
raise AnsibleParserError(
|
||||
'All correct options required: {0}'.format(to_native(err)))
|
||||
# Call our internal helper to populate the dynamic inventory
|
||||
self._populate()
|
||||
@@ -71,25 +71,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self._nmap = None
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
def _populate(self, hosts):
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
for host in hosts:
|
||||
hostname = host['name']
|
||||
self.inventory.add_host(hostname)
|
||||
for var, value in host.items():
|
||||
self.inventory.set_variable(hostname, var, value)
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
@@ -101,7 +82,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
def parse(self, inventory, loader, path, cache=False):
|
||||
|
||||
try:
|
||||
self._nmap = get_bin_path('nmap')
|
||||
@@ -112,102 +93,75 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
self._read_config_data(path)
|
||||
|
||||
cache_key = self.get_cache_key(path)
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
# cache may be True or False at this point to indicate if the inventory is being refreshed
|
||||
# get the user's cache option too to see if we should save the cache if it is changing
|
||||
user_cache_setting = self.get_option('cache')
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
|
||||
# read if the user has caching enabled and the cache isn't being refreshed
|
||||
attempt_to_read_cache = user_cache_setting and cache
|
||||
# update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
|
||||
cache_needs_update = user_cache_setting and not cache
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
|
||||
if attempt_to_read_cache:
|
||||
try:
|
||||
results = self._cache[cache_key]
|
||||
except KeyError:
|
||||
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
|
||||
cache_needs_update = True
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
|
||||
if not user_cache_setting or cache_needs_update:
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
if host is not None:
|
||||
# update inventory
|
||||
self.inventory.add_host(host)
|
||||
self.inventory.set_variable(host, 'ip', ip)
|
||||
ports = []
|
||||
continue
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
results = []
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
try:
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
# TODO: parse more data, OS?
|
||||
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None and ports:
|
||||
results[-1]['ports'] = ports
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
if host is not None:
|
||||
# update inventory
|
||||
results.append(dict())
|
||||
results[-1]['name'] = host
|
||||
results[-1]['ip'] = ip
|
||||
ports = []
|
||||
continue
|
||||
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1),
|
||||
'protocol': host_ports.group(2),
|
||||
'state': host_ports.group(3),
|
||||
'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
results[-1]['ports'] = ports
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
|
||||
if cache_needs_update:
|
||||
self._cache[cache_key] = results
|
||||
|
||||
self._populate(results)
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
|
||||
@@ -19,7 +19,6 @@ DOCUMENTATION = '''
|
||||
- Will retrieve the first network interface with an IP for Proxmox nodes.
|
||||
- Can retrieve LXC/QEMU configuration as facts.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
@@ -70,21 +69,6 @@ DOCUMENTATION = '''
|
||||
description: Gather LXC/QEMU configuration facts.
|
||||
default: no
|
||||
type: bool
|
||||
want_proxmox_nodes_ansible_host:
|
||||
version_added: 3.0.0
|
||||
description:
|
||||
- Whether to set C(ansbile_host) for proxmox nodes.
|
||||
- When set to C(true) (default), will use the first available interface. This can be different from what you expect.
|
||||
default: true
|
||||
type: bool
|
||||
strict:
|
||||
version_added: 2.5.0
|
||||
compose:
|
||||
version_added: 2.5.0
|
||||
groups:
|
||||
version_added: 2.5.0
|
||||
keyed_groups:
|
||||
version_added: 2.5.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -94,15 +78,6 @@ url: http://localhost:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: no
|
||||
keyed_groups:
|
||||
- key: proxmox_tags_parsed
|
||||
separator: ""
|
||||
prefix: group
|
||||
groups:
|
||||
webservers: "'web' in (proxmox_tags_parsed|list)"
|
||||
mailservers: "'mail' in (proxmox_tags_parsed|list)"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -111,7 +86,7 @@ from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
# 3rd party imports
|
||||
@@ -124,7 +99,7 @@ except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
''' Host inventory parser for ansible using Proxmox as source. '''
|
||||
|
||||
NAME = 'community.general.proxmox'
|
||||
@@ -231,45 +206,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_agent_network_interfaces(self, node, vmid, vmtype):
|
||||
result = []
|
||||
|
||||
try:
|
||||
ifaces = self._get_json(
|
||||
"%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
|
||||
self.proxmox_url, node, vmtype, vmid
|
||||
)
|
||||
)['result']
|
||||
|
||||
if "error" in ifaces:
|
||||
if "class" in ifaces["error"]:
|
||||
# This happens on Windows, even though qemu agent is running, the IP address
|
||||
# cannot be fetched, as it's unsupported, also a command disabled can happen.
|
||||
errorClass = ifaces["error"]["class"]
|
||||
if errorClass in ["Unsupported"]:
|
||||
self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
|
||||
elif errorClass in ["CommandDisabled"]:
|
||||
self.display.v("Retrieving network interfaces from guest agents has been disabled")
|
||||
return result
|
||||
|
||||
for iface in ifaces:
|
||||
result.append({
|
||||
'name': iface['name'],
|
||||
'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
|
||||
'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
|
||||
})
|
||||
except requests.HTTPError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
def _get_vm_config(self, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
node_key = 'node'
|
||||
node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower()))
|
||||
self.inventory.set_variable(name, node_key, node)
|
||||
|
||||
vmid_key = 'vmid'
|
||||
vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
|
||||
self.inventory.set_variable(name, vmid_key, vmid)
|
||||
@@ -278,10 +217,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
|
||||
self.inventory.set_variable(name, vmtype_key, vmtype)
|
||||
|
||||
plaintext_configs = [
|
||||
'tags',
|
||||
]
|
||||
|
||||
for config in ret:
|
||||
key = config
|
||||
key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
|
||||
@@ -291,20 +226,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
|
||||
value = ('disk_image=' + value)
|
||||
|
||||
# Additional field containing parsed tags as list
|
||||
if config == 'tags':
|
||||
parsed_key = self.to_safe('%s%s' % (key, "_parsed"))
|
||||
parsed_value = [tag.strip() for tag in value.split(",")]
|
||||
self.inventory.set_variable(name, parsed_key, parsed_value)
|
||||
|
||||
# The first field in the agent string tells you whether the agent is enabled
|
||||
# the rest of the comma separated string is extra config for the agent
|
||||
if config == 'agent' and int(value.split(',')[0]):
|
||||
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
|
||||
agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
|
||||
if agent_iface_value:
|
||||
self.inventory.set_variable(name, agent_iface_key, agent_iface_value)
|
||||
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
@@ -333,12 +254,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _populate(self):
|
||||
|
||||
self._get_auth()
|
||||
@@ -370,9 +285,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self.inventory.add_child(nodes_group, node['node'])
|
||||
|
||||
# get node IP address
|
||||
if self.get_option("want_proxmox_nodes_ansible_host"):
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
|
||||
# get LXC containers for this node
|
||||
node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
|
||||
@@ -394,8 +308,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
|
||||
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
||||
|
||||
# get QEMU vm's for this node
|
||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||
self.inventory.add_group(node_qemu_group)
|
||||
@@ -418,8 +330,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
|
||||
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
||||
|
||||
# gather vm's in pools
|
||||
for pool in self._get_pools():
|
||||
if pool.get('poolid'):
|
||||
@@ -429,8 +339,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
for member in self._get_members_per_pool(pool['poolid']):
|
||||
if member.get('name'):
|
||||
if not member.get('template'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_REQUESTS:
|
||||
|
||||
@@ -10,8 +10,6 @@ DOCUMENTATION = '''
|
||||
name: stackpath_compute
|
||||
short_description: StackPath Edge Computing inventory source
|
||||
version_added: 1.2.0
|
||||
author:
|
||||
- UNKNOWN (@shayrybak)
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
- constructed
|
||||
@@ -104,13 +102,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
raise AnsibleError("plugin doesn't match this plugin")
|
||||
try:
|
||||
client_id = config['client_id']
|
||||
if len(client_id) != 32:
|
||||
if client_id != 32:
|
||||
raise AnsibleError("client_id must be 32 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
try:
|
||||
client_secret = config['client_secret']
|
||||
if len(client_secret) != 64:
|
||||
if client_secret != 64:
|
||||
raise AnsibleError("client_secret must be 64 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
|
||||
0
plugins/lookup/__init__.py
Normal file
0
plugins/lookup/__init__.py
Normal file
@@ -171,10 +171,10 @@ class LookupModule(LookupBase):
|
||||
|
||||
paramvals = {
|
||||
'key': params[0],
|
||||
'token': self.get_option('token'),
|
||||
'recurse': self.get_option('recurse'),
|
||||
'index': self.get_option('index'),
|
||||
'datacenter': self.get_option('datacenter')
|
||||
'token': None,
|
||||
'recurse': False,
|
||||
'index': None,
|
||||
'datacenter': None
|
||||
}
|
||||
|
||||
# parameters specified?
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
# (c) 2015-2021, Felix Fontein <felix@fontein.de>
|
||||
# (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
name: dependent
|
||||
short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables
|
||||
version_added: 3.1.0
|
||||
description:
|
||||
- "Takes the input lists and returns a list with elements that are lists, dictionaries,
|
||||
or template expressions which evaluate to lists or dicts, composed of the elements of
|
||||
the input evaluated lists and dictionaries."
|
||||
options:
|
||||
_raw:
|
||||
description:
|
||||
- A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary.
|
||||
The name is the index that is used in the result object. The value is iterated over as described below.
|
||||
- If the value is a list, it is simply iterated over.
|
||||
- If the value is a dictionary, it is iterated over and returned as if they would be processed by the
|
||||
R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter).
|
||||
- If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen
|
||||
elements with C(item.<index_name>). The result must be a list or a dictionary.
|
||||
type: list
|
||||
elements: dict
|
||||
required: true
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Install/remove public keys for active admin users
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ item.admin.key }}"
|
||||
key: "{{ lookup('file', item.key.public_key) }}"
|
||||
state: "{{ 'present' if item.key.active else 'absent' }}"
|
||||
when: item.admin.value.active
|
||||
with_community.general.dependent:
|
||||
- admin: admin_user_data
|
||||
- key: admin_ssh_keys[item.admin.key]
|
||||
loop_control:
|
||||
# Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
|
||||
label: "{{ [item.admin.key, 'active' if item.key.active else 'inactive', item.key.public_key] }}"
|
||||
vars:
|
||||
admin_user_data:
|
||||
admin1:
|
||||
name: Alice
|
||||
active: true
|
||||
admin2:
|
||||
name: Bob
|
||||
active: true
|
||||
admin_ssh_keys:
|
||||
admin1:
|
||||
- private_key: keys/private_key_admin1.pem
|
||||
public_key: keys/private_key_admin1.pub
|
||||
active: true
|
||||
admin2:
|
||||
- private_key: keys/private_key_admin2.pem
|
||||
public_key: keys/private_key_admin2.pub
|
||||
active: true
|
||||
- private_key: keys/private_key_admin2-old.pem
|
||||
public_key: keys/private_key_admin2-old.pub
|
||||
active: false
|
||||
|
||||
- name: Update DNS records
|
||||
community.aws.route53:
|
||||
zone: "{{ item.zone.key }}"
|
||||
record: "{{ item.prefix.key ~ '.' if item.prefix.key else '' }}{{ item.zone.key }}"
|
||||
type: "{{ item.entry.key }}"
|
||||
ttl: "{{ item.entry.value.ttl | default(3600) }}"
|
||||
value: "{{ item.entry.value.value }}"
|
||||
state: "{{ 'absent' if (item.entry.value.absent | default(False)) else 'present' }}"
|
||||
overwrite: true
|
||||
loop_control:
|
||||
# Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
|
||||
label: |-
|
||||
{{ [item.zone.key, item.prefix.key, item.entry.key,
|
||||
item.entry.value.ttl | default(3600),
|
||||
item.entry.value.absent | default(False), item.entry.value.value] }}
|
||||
with_community.general.dependent:
|
||||
- zone: dns_setup
|
||||
- prefix: item.zone.value
|
||||
- entry: item.prefix.value
|
||||
vars:
|
||||
dns_setup:
|
||||
example.com:
|
||||
'':
|
||||
A:
|
||||
value:
|
||||
- 1.2.3.4
|
||||
AAAA:
|
||||
value:
|
||||
- "2a01:1:2:3::1"
|
||||
'test._domainkey':
|
||||
TXT:
|
||||
ttl: 300
|
||||
value:
|
||||
- '"k=rsa; t=s; p=MIGfMA..."'
|
||||
example.org:
|
||||
'www':
|
||||
A:
|
||||
value:
|
||||
- 1.2.3.4
|
||||
- 5.6.7.8
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- A list composed of dictionaries whose keys are the variable names from the input list.
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
- key1: a
|
||||
key2: test
|
||||
- key1: a
|
||||
key2: foo
|
||||
- key1: b
|
||||
key2: bar
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.template import Templar
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __evaluate(self, expression, templar, variables):
|
||||
"""Evaluate expression with templar.
|
||||
|
||||
``expression`` is the expression to evaluate.
|
||||
``variables`` are the variables to use.
|
||||
"""
|
||||
templar.available_variables = variables or {}
|
||||
return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False)
|
||||
|
||||
def __process(self, result, terms, index, current, templar, variables):
|
||||
"""Fills ``result`` list with evaluated items.
|
||||
|
||||
``result`` is a list where the resulting items are placed.
|
||||
``terms`` is the parsed list of terms
|
||||
``index`` is the current index to be processed in the list.
|
||||
``current`` is a dictionary where the first ``index`` values are filled in.
|
||||
``variables`` are the variables currently available.
|
||||
"""
|
||||
# If we are done, add to result list:
|
||||
if index == len(terms):
|
||||
result.append(current.copy())
|
||||
return
|
||||
|
||||
key, expression, values = terms[index]
|
||||
|
||||
if expression is not None:
|
||||
# Evaluate expression in current context
|
||||
vars = variables.copy()
|
||||
vars['item'] = current.copy()
|
||||
try:
|
||||
values = self.__evaluate(expression, templar, variables=vars)
|
||||
except Exception as e:
|
||||
raise AnsibleLookupError(
|
||||
'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format(
|
||||
error=e, key=key, item=current))
|
||||
|
||||
if isinstance(values, Mapping):
|
||||
for idx, val in sorted(values.items()):
|
||||
current[key] = dict([('key', idx), ('value', val)])
|
||||
self.__process(result, terms, index + 1, current, templar, variables)
|
||||
elif isinstance(values, Sequence):
|
||||
for elt in values:
|
||||
current[key] = elt
|
||||
self.__process(result, terms, index + 1, current, templar, variables)
|
||||
else:
|
||||
raise AnsibleLookupError(
|
||||
'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format(
|
||||
key=key, item=current, type=type(values)))
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Generate list."""
|
||||
result = []
|
||||
if len(terms) > 0:
|
||||
templar = Templar(loader=self._templar._loader)
|
||||
data = []
|
||||
vars_so_far = set()
|
||||
for index, term in enumerate(terms):
|
||||
if not isinstance(term, Mapping):
|
||||
raise AnsibleLookupError(
|
||||
'Parameter {index} must be a dictionary, got {type}'.format(
|
||||
index=index, type=type(term)))
|
||||
if len(term) != 1:
|
||||
raise AnsibleLookupError(
|
||||
'Parameter {index} must be a one-element dictionary, got {count} elements'.format(
|
||||
index=index, count=len(term)))
|
||||
k, v = list(term.items())[0]
|
||||
if k in vars_so_far:
|
||||
raise AnsibleLookupError(
|
||||
'The variable {key!r} appears more than once'.format(key=k))
|
||||
vars_so_far.add(k)
|
||||
if isinstance(v, string_types):
|
||||
data.append((k, v, None))
|
||||
elif isinstance(v, (Sequence, Mapping)):
|
||||
data.append((k, None, v))
|
||||
else:
|
||||
raise AnsibleLookupError(
|
||||
'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format(
|
||||
index=index, key=k, type=type(v)))
|
||||
self.__process(result, data, 0, {}, templar, variables)
|
||||
return result
|
||||
@@ -31,9 +31,7 @@ EXAMPLES = r"""
|
||||
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
||||
ansible.builtin.template:
|
||||
src: '{{ item.src }}'
|
||||
# Your template files should be stored with a .j2 file extension,
|
||||
# but should not be deployed with it. splitext|first removes it.
|
||||
dest: /web/{{ item.path | splitext | first }}
|
||||
dest: /web/{{ item.path }}
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'file'
|
||||
@@ -43,7 +41,6 @@ EXAMPLES = r"""
|
||||
src: '{{ item.src }}'
|
||||
dest: /web/{{ item.path }}
|
||||
state: link
|
||||
follow: false # avoid corrupting target files if the link already exists
|
||||
force: yes
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
|
||||
@@ -63,7 +63,6 @@ import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
||||
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
||||
@@ -79,11 +78,13 @@ class Hiera(object):
|
||||
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
||||
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
||||
|
||||
return to_text(output.strip())
|
||||
return output.strip()
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=''):
|
||||
hiera = Hiera()
|
||||
ret = [hiera.get(terms)]
|
||||
ret = []
|
||||
|
||||
ret.append(hiera.get(terms))
|
||||
return ret
|
||||
|
||||
@@ -25,10 +25,6 @@ DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios
|
||||
short_description: Query Infoblox NIOS objects
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_lookup
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
|
||||
supports adding additional keywords to filter the return data and specify
|
||||
|
||||
@@ -25,10 +25,6 @@ DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios_next_ip
|
||||
short_description: Return the next available IP address for a network
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_next_ip
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to return the next available IP addresses
|
||||
for a given network CIDR
|
||||
|
||||
@@ -25,10 +25,6 @@ DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nios_next_network
|
||||
short_description: Return the next available network range for a network-container
|
||||
deprecated:
|
||||
why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it.
|
||||
alternative: infoblox.nios_modules.nios_next_network
|
||||
removed_in: 5.0.0
|
||||
description:
|
||||
- Uses the Infoblox WAPI API to return the next available network addresses for
|
||||
a given network CIDR
|
||||
|
||||
@@ -30,11 +30,6 @@ DOCUMENTATION = '''
|
||||
aliases: ['vault_password']
|
||||
section:
|
||||
description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
|
||||
domain:
|
||||
description: Domain of 1Password. Default is U(1password.com).
|
||||
version_added: 3.2.0
|
||||
default: '1password.com'
|
||||
type: str
|
||||
subdomain:
|
||||
description: The 1Password subdomain to authenticate against.
|
||||
username:
|
||||
@@ -114,7 +109,6 @@ class OnePass(object):
|
||||
self.logged_in = False
|
||||
self.token = None
|
||||
self.subdomain = None
|
||||
self.domain = None
|
||||
self.username = None
|
||||
self.secret_key = None
|
||||
self.master_password = None
|
||||
@@ -174,7 +168,7 @@ class OnePass(object):
|
||||
|
||||
args = [
|
||||
'signin',
|
||||
'{0}.{1}'.format(self.subdomain, self.domain),
|
||||
'{0}.1password.com'.format(self.subdomain),
|
||||
to_bytes(self.username),
|
||||
to_bytes(self.secret_key),
|
||||
'--output=raw',
|
||||
@@ -271,7 +265,6 @@ class LookupModule(LookupBase):
|
||||
section = kwargs.get('section')
|
||||
vault = kwargs.get('vault')
|
||||
op.subdomain = kwargs.get('subdomain')
|
||||
op.domain = kwargs.get('domain', '1password.com')
|
||||
op.username = kwargs.get('username')
|
||||
op.secret_key = kwargs.get('secret_key')
|
||||
op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
|
||||
|
||||
@@ -25,9 +25,9 @@ DOCUMENTATION = '''
|
||||
env:
|
||||
- name: PASSWORD_STORE_DIR
|
||||
create:
|
||||
description: Create the password if it does not already exist. Takes precedence over C(missing).
|
||||
description: Create the password if it does not already exist.
|
||||
type: bool
|
||||
default: false
|
||||
default: 'no'
|
||||
overwrite:
|
||||
description: Overwrite the password if it does already exist.
|
||||
type: bool
|
||||
@@ -60,22 +60,6 @@ DOCUMENTATION = '''
|
||||
description: use alphanumeric characters.
|
||||
type: bool
|
||||
default: 'no'
|
||||
missing:
|
||||
description:
|
||||
- List of preference about what to do if the password file is missing.
|
||||
- If I(create=true), the value for this option is ignored and assumed to be C(create).
|
||||
- If set to C(error), the lookup will error out if the passname does not exist.
|
||||
- If set to C(create), the passname will be created with the provided length I(length) if it does not exist.
|
||||
- If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist.
|
||||
When using C(lookup) and not C(query), this will be translated to an empty string.
|
||||
version_added: 3.1.0
|
||||
type: str
|
||||
default: error
|
||||
choices:
|
||||
- error
|
||||
- warn
|
||||
- empty
|
||||
- create
|
||||
'''
|
||||
EXAMPLES = """
|
||||
# Debug is used for examples, BAD IDEA to show passwords on screen
|
||||
@@ -83,28 +67,12 @@ EXAMPLES = """
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
|
||||
|
||||
- name: Basic lookup. Warns if example/test does not exist and returns empty string
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}"
|
||||
|
||||
- name: Create pass with random 16 character password. If password exists just give the password
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
|
||||
|
||||
- name: Create pass with random 16 character password. If password exists just give the password
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}"
|
||||
|
||||
- name: Prints 'abc' if example/test does not exist, just give the password otherwise
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}"
|
||||
|
||||
- name: Different size password
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
|
||||
@@ -143,13 +111,10 @@ import yaml
|
||||
from distutils import util
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.encrypt import random_password
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible import constants as C
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
# backhacked check_output with input for python 2.7
|
||||
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
|
||||
@@ -213,17 +178,12 @@ class LookupModule(LookupBase):
|
||||
self.paramvals[key] = util.strtobool(self.paramvals[key])
|
||||
except (ValueError, AssertionError) as e:
|
||||
raise AnsibleError(e)
|
||||
if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
|
||||
raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing']))
|
||||
if not isinstance(self.paramvals['length'], int):
|
||||
if self.paramvals['length'].isdigit():
|
||||
self.paramvals['length'] = int(self.paramvals['length'])
|
||||
else:
|
||||
raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
|
||||
|
||||
if self.paramvals['create']:
|
||||
self.paramvals['missing'] = 'create'
|
||||
|
||||
# Collect pass environment variables from the plugin's parameters.
|
||||
self.env = os.environ.copy()
|
||||
|
||||
@@ -264,11 +224,9 @@ class LookupModule(LookupBase):
|
||||
if e.returncode != 0 and 'not in the password store' in e.output:
|
||||
# if pass returns 1 and return string contains 'is not in the password store.'
|
||||
# We need to determine if this is valid or Error.
|
||||
if self.paramvals['missing'] == 'error':
|
||||
raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname))
|
||||
if not self.paramvals['create']:
|
||||
raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
|
||||
else:
|
||||
if self.paramvals['missing'] == 'warn':
|
||||
display.warning('passwordstore: passname {0} not found'.format(self.passname))
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError(e)
|
||||
@@ -336,7 +294,6 @@ class LookupModule(LookupBase):
|
||||
'userpass': '',
|
||||
'length': 16,
|
||||
'backup': False,
|
||||
'missing': 'error',
|
||||
}
|
||||
|
||||
for term in terms:
|
||||
@@ -347,9 +304,6 @@ class LookupModule(LookupBase):
|
||||
else:
|
||||
result.append(self.get_passresult())
|
||||
else: # password does not exist
|
||||
if self.paramvals['missing'] == 'create':
|
||||
if self.paramvals['create']:
|
||||
result.append(self.generate_password())
|
||||
else:
|
||||
result.append(None)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: random_pet
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
short_description: Generates random pet names
|
||||
version_added: '3.1.0'
|
||||
requirements:
|
||||
- petname U(https://github.com/dustinkirkland/python-petname)
|
||||
description:
|
||||
- Generates random pet names that can be used as unique identifiers for the resources.
|
||||
options:
|
||||
words:
|
||||
description:
|
||||
- The number of words in the pet name.
|
||||
default: 2
|
||||
type: int
|
||||
length:
|
||||
description:
|
||||
- The maximal length of every component of the pet name.
|
||||
- Values below 3 will be set to 3 by petname.
|
||||
default: 6
|
||||
type: int
|
||||
prefix:
|
||||
description: A string to prefix with the name.
|
||||
type: str
|
||||
separator:
|
||||
description: The character to separate words in the pet name.
|
||||
default: "-"
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Generate pet name
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_pet')
|
||||
# Example result: 'loving-raptor'
|
||||
|
||||
- name: Generate pet name with 3 words
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_pet', words=3)
|
||||
# Example result: 'fully-fresh-macaw'
|
||||
|
||||
- name: Generate pet name with separator
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_pet', separator="_")
|
||||
# Example result: 'causal_snipe'
|
||||
|
||||
- name: Generate pet name with length
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_pet', length=7)
|
||||
# Example result: 'natural-peacock'
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
_raw:
|
||||
description: A one-element list containing a random pet name
|
||||
type: list
|
||||
elements: str
|
||||
'''
|
||||
|
||||
try:
|
||||
import petname
|
||||
|
||||
HAS_PETNAME = True
|
||||
except ImportError:
|
||||
HAS_PETNAME = False
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
if not HAS_PETNAME:
|
||||
raise AnsibleError('Python petname library is required. '
|
||||
'Please install using "pip install petname"')
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
words = self.get_option('words')
|
||||
length = self.get_option('length')
|
||||
prefix = self.get_option('prefix')
|
||||
separator = self.get_option('separator')
|
||||
|
||||
values = petname.Generate(words=words, separator=separator, letters=length)
|
||||
if prefix:
|
||||
values = "%s%s%s" % (prefix, separator, values)
|
||||
|
||||
return [values]
|
||||
@@ -1,220 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: random_string
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
short_description: Generates random string
|
||||
version_added: '3.2.0'
|
||||
description:
|
||||
- Generates random string based upon the given constraints.
|
||||
options:
|
||||
length:
|
||||
description: The length of the string.
|
||||
default: 8
|
||||
type: int
|
||||
upper:
|
||||
description:
|
||||
- Include uppercase letters in the string.
|
||||
default: true
|
||||
type: bool
|
||||
lower:
|
||||
description:
|
||||
- Include lowercase letters in the string.
|
||||
default: true
|
||||
type: bool
|
||||
numbers:
|
||||
description:
|
||||
- Include numbers in the string.
|
||||
default: true
|
||||
type: bool
|
||||
special:
|
||||
description:
|
||||
- Include special characters in the string.
|
||||
- Special characters are taken from Python standard library C(string).
|
||||
See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
|
||||
for which characters will be used.
|
||||
- The choice of special characters can be changed to setting I(override_special).
|
||||
default: true
|
||||
type: bool
|
||||
min_numeric:
|
||||
description:
|
||||
- Minimum number of numeric characters in the string.
|
||||
- If set, overrides I(numbers=false).
|
||||
default: 0
|
||||
type: int
|
||||
min_upper:
|
||||
description:
|
||||
- Minimum number of uppercase alphabets in the string.
|
||||
- If set, overrides I(upper=false).
|
||||
default: 0
|
||||
type: int
|
||||
min_lower:
|
||||
description:
|
||||
- Minimum number of lowercase alphabets in the string.
|
||||
- If set, overrides I(lower=false).
|
||||
default: 0
|
||||
type: int
|
||||
min_special:
|
||||
description:
|
||||
- Minimum number of special character in the string.
|
||||
default: 0
|
||||
type: int
|
||||
override_special:
|
||||
description:
|
||||
- Overide a list of special characters to use in the string.
|
||||
- If set I(min_special) should be set to a non-default value.
|
||||
type: str
|
||||
override_all:
|
||||
description:
|
||||
- Override all values of I(numbers), I(upper), I(lower), and I(special) with
|
||||
the given list of characters.
|
||||
type: str
|
||||
base64:
|
||||
description:
|
||||
- Returns base64 encoded string.
|
||||
type: bool
|
||||
default: false
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Generate random string
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_string')
|
||||
# Example result: ['DeadBeeF']
|
||||
|
||||
- name: Generate random string with length 12
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_string', length=12)
|
||||
# Example result: ['Uan0hUiX5kVG']
|
||||
|
||||
- name: Generate base64 encoded random string
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_string', base64=True)
|
||||
# Example result: ['NHZ6eWN5Qk0=']
|
||||
|
||||
- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast)
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1)
|
||||
# Example result: ['&Qw2|E[-']
|
||||
|
||||
- name: Generate a random string with all lower case characters
|
||||
debug:
|
||||
var: query('community.general.random_string', upper=false, numbers=false, special=false)
|
||||
# Example result: ['exolxzyz']
|
||||
|
||||
- name: Generate random hexadecimal string
|
||||
debug:
|
||||
var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false)
|
||||
vars:
|
||||
hex_chars: '0123456789ABCDEF'
|
||||
# Example result: ['D2A40737']
|
||||
|
||||
- name: Generate random hexadecimal string with override_all
|
||||
debug:
|
||||
var: query('community.general.random_string', override_all=hex_chars)
|
||||
vars:
|
||||
hex_chars: '0123456789ABCDEF'
|
||||
# Example result: ['D2A40737']
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
_raw:
|
||||
description: A one-element list containing a random string
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import base64
|
||||
import random
|
||||
import string
|
||||
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
@staticmethod
|
||||
def get_random(random_generator, chars, length):
|
||||
if not chars:
|
||||
raise AnsibleLookupError(
|
||||
"Available characters cannot be None, please change constraints"
|
||||
)
|
||||
return "".join(random_generator.choice(chars) for dummy in range(length))
|
||||
|
||||
@staticmethod
|
||||
def b64encode(string_value, encoding="utf-8"):
|
||||
return to_text(
|
||||
base64.b64encode(
|
||||
to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict")
|
||||
)
|
||||
)
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
number_chars = string.digits
|
||||
lower_chars = string.ascii_lowercase
|
||||
upper_chars = string.ascii_uppercase
|
||||
special_chars = string.punctuation
|
||||
random_generator = random.SystemRandom()
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
length = self.get_option("length")
|
||||
base64_flag = self.get_option("base64")
|
||||
override_all = self.get_option("override_all")
|
||||
values = ""
|
||||
available_chars_set = ""
|
||||
|
||||
if override_all:
|
||||
# Override all the values
|
||||
available_chars_set = override_all
|
||||
else:
|
||||
upper = self.get_option("upper")
|
||||
lower = self.get_option("lower")
|
||||
numbers = self.get_option("numbers")
|
||||
special = self.get_option("special")
|
||||
override_special = self.get_option("override_special")
|
||||
|
||||
if override_special:
|
||||
special_chars = override_special
|
||||
|
||||
if upper:
|
||||
available_chars_set += upper_chars
|
||||
if lower:
|
||||
available_chars_set += lower_chars
|
||||
if numbers:
|
||||
available_chars_set += number_chars
|
||||
if special:
|
||||
available_chars_set += special_chars
|
||||
|
||||
mapping = {
|
||||
"min_numeric": number_chars,
|
||||
"min_lower": lower_chars,
|
||||
"min_upper": upper_chars,
|
||||
"min_special": special_chars,
|
||||
}
|
||||
|
||||
for m in mapping:
|
||||
if self.get_option(m):
|
||||
values += self.get_random(random_generator, mapping[m], self.get_option(m))
|
||||
|
||||
remaining_pass_len = length - len(values)
|
||||
values += self.get_random(random_generator, available_chars_set, remaining_pass_len)
|
||||
|
||||
# Get pseudo randomization
|
||||
shuffled_values = list(values)
|
||||
# Randomize the order
|
||||
random.shuffle(shuffled_values)
|
||||
|
||||
if base64_flag:
|
||||
return [self.b64encode("".join(shuffled_values))]
|
||||
|
||||
return ["".join(shuffled_values)]
|
||||
@@ -103,14 +103,6 @@ EXAMPLES = r"""
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret_password: >-
|
||||
{{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}"
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: the password is {{ secret_password }}
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
|
||||
0
plugins/module_utils/__init__.py
Normal file
0
plugins/module_utils/__init__.py
Normal file
871
plugins/module_utils/_ovirt.py
Normal file
871
plugins/module_utils/_ovirt.py
Normal file
@@ -0,0 +1,871 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import time
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from datetime import datetime
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cloud import CloudRetry
|
||||
from ansible.module_utils.common._collections_compat import Mapping
|
||||
|
||||
try:
|
||||
from enum import Enum # enum is a ovirtsdk4 requirement
|
||||
import ovirtsdk4 as sdk
|
||||
import ovirtsdk4.version as sdk_version
|
||||
import ovirtsdk4.types as otypes
|
||||
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0')
|
||||
except ImportError:
|
||||
HAS_SDK = False
|
||||
|
||||
|
||||
BYTES_MAP = {
|
||||
'kib': 2**10,
|
||||
'mib': 2**20,
|
||||
'gib': 2**30,
|
||||
'tib': 2**40,
|
||||
'pib': 2**50,
|
||||
}
|
||||
|
||||
|
||||
def check_sdk(module):
|
||||
if not HAS_SDK:
|
||||
module.fail_json(
|
||||
msg='ovirtsdk4 version 4.3.0 or higher is required for this module'
|
||||
)
|
||||
|
||||
|
||||
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
|
||||
"""
|
||||
Convert SDK Struct type into dictionary.
|
||||
"""
|
||||
res = {}
|
||||
|
||||
def resolve_href(value):
|
||||
# Fetch nested values of struct:
|
||||
try:
|
||||
value = connection.follow_link(value)
|
||||
except sdk.Error:
|
||||
value = None
|
||||
nested_obj = dict(
|
||||
(attr, convert_value(getattr(value, attr)))
|
||||
for attr in attributes if getattr(value, attr, None) is not None
|
||||
)
|
||||
nested_obj['id'] = getattr(value, 'id', None)
|
||||
nested_obj['href'] = getattr(value, 'href', None)
|
||||
return nested_obj
|
||||
|
||||
def remove_underscore(val):
|
||||
if val.startswith('_'):
|
||||
val = val[1:]
|
||||
remove_underscore(val)
|
||||
return val
|
||||
|
||||
def convert_value(value):
|
||||
nested = False
|
||||
|
||||
if isinstance(value, sdk.Struct):
|
||||
if not fetch_nested or not value.href:
|
||||
return get_dict_of_struct(value)
|
||||
return resolve_href(value)
|
||||
|
||||
elif isinstance(value, Enum) or isinstance(value, datetime):
|
||||
return str(value)
|
||||
elif isinstance(value, list) or isinstance(value, sdk.List):
|
||||
if isinstance(value, sdk.List) and fetch_nested and value.href:
|
||||
try:
|
||||
value = connection.follow_link(value)
|
||||
nested = True
|
||||
except sdk.Error:
|
||||
value = []
|
||||
|
||||
ret = []
|
||||
for i in value:
|
||||
if isinstance(i, sdk.Struct):
|
||||
if not nested and fetch_nested and i.href:
|
||||
ret.append(resolve_href(i))
|
||||
elif not nested:
|
||||
ret.append(get_dict_of_struct(i))
|
||||
else:
|
||||
nested_obj = dict(
|
||||
(attr, convert_value(getattr(i, attr)))
|
||||
for attr in attributes if getattr(i, attr, None)
|
||||
)
|
||||
nested_obj['id'] = getattr(i, 'id', None)
|
||||
ret.append(nested_obj)
|
||||
elif isinstance(i, Enum):
|
||||
ret.append(str(i))
|
||||
else:
|
||||
ret.append(i)
|
||||
return ret
|
||||
else:
|
||||
return value
|
||||
|
||||
if struct is not None:
|
||||
for key, value in struct.__dict__.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
key = remove_underscore(key)
|
||||
res[key] = convert_value(value)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def engine_version(connection):
|
||||
"""
|
||||
Return string representation of oVirt engine version.
|
||||
"""
|
||||
engine_api = connection.system_service().get()
|
||||
engine_version = engine_api.product_info.version
|
||||
return '%s.%s' % (engine_version.major, engine_version.minor)
|
||||
|
||||
|
||||
def create_connection(auth):
|
||||
"""
|
||||
Create a connection to Python SDK, from task `auth` parameter.
|
||||
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
|
||||
url, username, password
|
||||
|
||||
If user has SSO token the `auth` dictionary has following parameters mandatory:
|
||||
url, token
|
||||
|
||||
The `ca_file` parameter is mandatory in case user want to use secure connection,
|
||||
in case user want to use insecure connection, it's mandatory to send insecure=True.
|
||||
|
||||
:param auth: dictionary which contains needed values for connection creation
|
||||
:return: Python SDK connection
|
||||
"""
|
||||
|
||||
url = auth.get('url')
|
||||
if url is None and auth.get('hostname') is not None:
|
||||
url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
|
||||
|
||||
return sdk.Connection(
|
||||
url=url,
|
||||
username=auth.get('username'),
|
||||
password=auth.get('password'),
|
||||
ca_file=auth.get('ca_file', None),
|
||||
insecure=auth.get('insecure', False),
|
||||
token=auth.get('token', None),
|
||||
kerberos=auth.get('kerberos', None),
|
||||
headers=auth.get('headers', None),
|
||||
)
|
||||
|
||||
|
||||
def convert_to_bytes(param):
|
||||
"""
|
||||
This method convert units to bytes, which follow IEC standard.
|
||||
|
||||
:param param: value to be converted
|
||||
"""
|
||||
if param is None:
|
||||
return None
|
||||
|
||||
# Get rid of whitespaces:
|
||||
param = ''.join(param.split())
|
||||
|
||||
# Convert to bytes:
|
||||
if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
|
||||
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
|
||||
elif param.isdigit():
|
||||
return int(param) * 2**10
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported value(IEC supported): '{value}'".format(value=param)
|
||||
)
|
||||
|
||||
|
||||
def follow_link(connection, link):
|
||||
"""
|
||||
This method returns the entity of the element which link points to.
|
||||
|
||||
:param connection: connection to the Python SDK
|
||||
:param link: link of the entity
|
||||
:return: entity which link points to
|
||||
"""
|
||||
|
||||
if link:
|
||||
return connection.follow_link(link)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_link_name(connection, link):
|
||||
"""
|
||||
This method returns the name of the element which link points to.
|
||||
|
||||
:param connection: connection to the Python SDK
|
||||
:param link: link of the entity
|
||||
:return: name of the entity, which link points to
|
||||
"""
|
||||
|
||||
if link:
|
||||
return connection.follow_link(link).name
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def equal(param1, param2, ignore_case=False):
|
||||
"""
|
||||
Compare two parameters and return if they are equal.
|
||||
This parameter doesn't run equal operation if first parameter is None.
|
||||
With this approach we don't run equal operation in case user don't
|
||||
specify parameter in their task.
|
||||
|
||||
:param param1: user inputted parameter
|
||||
:param param2: value of entity parameter
|
||||
:return: True if parameters are equal or first parameter is None, otherwise False
|
||||
"""
|
||||
if param1 is not None:
|
||||
if ignore_case:
|
||||
return param1.lower() == param2.lower()
|
||||
return param1 == param2
|
||||
return True
|
||||
|
||||
|
||||
def search_by_attributes(service, list_params=None, **kwargs):
|
||||
"""
|
||||
Search for the entity by attributes. Nested entities don't support search
|
||||
via REST, so in case using search for nested entity we return all entities
|
||||
and filter them by specified attributes.
|
||||
"""
|
||||
list_params = list_params or {}
|
||||
# Check if 'list' method support search(look for search parameter):
|
||||
if 'search' in inspect.getargspec(service.list)[0]:
|
||||
res = service.list(
|
||||
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
|
||||
search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
|
||||
**list_params
|
||||
)
|
||||
else:
|
||||
res = [
|
||||
e for e in service.list(**list_params) if len([
|
||||
k for k, v in kwargs.items() if getattr(e, k, None) == v
|
||||
]) == len(kwargs)
|
||||
]
|
||||
|
||||
res = res or [None]
|
||||
return res[0]
|
||||
|
||||
|
||||
def search_by_name(service, name, **kwargs):
|
||||
"""
|
||||
Search for the entity by its name. Nested entities don't support search
|
||||
via REST, so in case using search for nested entity we return all entities
|
||||
and filter them by name.
|
||||
|
||||
:param service: service of the entity
|
||||
:param name: name of the entity
|
||||
:return: Entity object returned by Python SDK
|
||||
"""
|
||||
# Check if 'list' method support search(look for search parameter):
|
||||
if 'search' in inspect.getargspec(service.list)[0]:
|
||||
res = service.list(
|
||||
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
|
||||
search='name="{name}"'.format(name=name)
|
||||
)
|
||||
else:
|
||||
res = [e for e in service.list() if e.name == name]
|
||||
|
||||
if kwargs:
|
||||
res = [
|
||||
e for e in service.list() if len([
|
||||
k for k, v in kwargs.items() if getattr(e, k, None) == v
|
||||
]) == len(kwargs)
|
||||
]
|
||||
|
||||
res = res or [None]
|
||||
return res[0]
|
||||
|
||||
|
||||
def get_entity(service, get_params=None):
|
||||
"""
|
||||
Ignore SDK Error in case of getting an entity from service.
|
||||
"""
|
||||
entity = None
|
||||
try:
|
||||
if get_params is not None:
|
||||
entity = service.get(**get_params)
|
||||
else:
|
||||
entity = service.get()
|
||||
except sdk.Error:
|
||||
# We can get here 404, we should ignore it, in case
|
||||
# of removing entity for example.
|
||||
pass
|
||||
return entity
|
||||
|
||||
|
||||
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
|
||||
"""
|
||||
Search an entity ID by it's name.
|
||||
"""
|
||||
entity = search_by_name(service, name)
|
||||
|
||||
if entity is not None:
|
||||
return entity.id
|
||||
|
||||
if raise_error:
|
||||
raise Exception("Entity '%s' was not found." % name)
|
||||
|
||||
|
||||
def wait(
|
||||
service,
|
||||
condition,
|
||||
fail_condition=lambda e: False,
|
||||
timeout=180,
|
||||
wait=True,
|
||||
poll_interval=3,
|
||||
):
|
||||
"""
|
||||
Wait until entity fulfill expected condition.
|
||||
|
||||
:param service: service of the entity
|
||||
:param condition: condition to be fulfilled
|
||||
:param fail_condition: if this condition is true, raise Exception
|
||||
:param timeout: max time to wait in seconds
|
||||
:param wait: if True wait for condition, if False don't wait
|
||||
:param poll_interval: Number of seconds we should wait until next condition check
|
||||
"""
|
||||
# Wait until the desired state of the entity:
|
||||
if wait:
|
||||
start = time.time()
|
||||
while time.time() < start + timeout:
|
||||
# Exit if the condition of entity is valid:
|
||||
entity = get_entity(service)
|
||||
if condition(entity):
|
||||
return
|
||||
elif fail_condition(entity):
|
||||
raise Exception("Error while waiting on result state of the entity.")
|
||||
|
||||
# Sleep for `poll_interval` seconds if none of the conditions apply:
|
||||
time.sleep(float(poll_interval))
|
||||
|
||||
raise Exception("Timeout exceed while waiting on result state of the entity.")
|
||||
|
||||
|
||||
def __get_auth_dict():
|
||||
OVIRT_URL = os.environ.get('OVIRT_URL')
|
||||
OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
|
||||
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
|
||||
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
|
||||
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
|
||||
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
|
||||
OVIRT_INSECURE = OVIRT_CAFILE is None
|
||||
|
||||
env_vars = None
|
||||
if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
|
||||
OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
|
||||
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
|
||||
env_vars = {
|
||||
'url': OVIRT_URL,
|
||||
'username': OVIRT_USERNAME,
|
||||
'password': OVIRT_PASSWORD,
|
||||
'insecure': OVIRT_INSECURE,
|
||||
'token': OVIRT_TOKEN,
|
||||
'ca_file': OVIRT_CAFILE,
|
||||
}
|
||||
if env_vars is not None:
|
||||
auth = dict(default=env_vars, type='dict')
|
||||
else:
|
||||
auth = dict(required=True, type='dict')
|
||||
|
||||
return auth
|
||||
|
||||
|
||||
def ovirt_info_full_argument_spec(**kwargs):
|
||||
"""
|
||||
Extend parameters of info module with parameters which are common to all
|
||||
oVirt info modules.
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
spec = dict(
|
||||
auth=__get_auth_dict(),
|
||||
fetch_nested=dict(default=False, type='bool'),
|
||||
nested_attributes=dict(type='list', default=list()),
|
||||
)
|
||||
spec.update(kwargs)
|
||||
return spec
|
||||
|
||||
|
||||
# Left for third-party module compatibility
|
||||
def ovirt_facts_full_argument_spec(**kwargs):
|
||||
"""
|
||||
This is deprecated. Please use ovirt_info_full_argument_spec instead!
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
return ovirt_info_full_argument_spec(**kwargs)
|
||||
|
||||
|
||||
def ovirt_full_argument_spec(**kwargs):
|
||||
"""
|
||||
Extend parameters of module with parameters which are common to all oVirt modules.
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
spec = dict(
|
||||
auth=__get_auth_dict(),
|
||||
timeout=dict(default=180, type='int'),
|
||||
wait=dict(default=True, type='bool'),
|
||||
poll_interval=dict(default=3, type='int'),
|
||||
fetch_nested=dict(default=False, type='bool'),
|
||||
nested_attributes=dict(type='list', default=list()),
|
||||
)
|
||||
spec.update(kwargs)
|
||||
return spec
|
||||
|
||||
|
||||
def check_params(module):
|
||||
"""
|
||||
Most modules must have either `name` or `id` specified.
|
||||
"""
|
||||
if module.params.get('name') is None and module.params.get('id') is None:
|
||||
module.fail_json(msg='"name" or "id" is required')
|
||||
|
||||
|
||||
def engine_supported(connection, version):
|
||||
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
|
||||
|
||||
|
||||
def check_support(version, connection, module, params):
|
||||
"""
|
||||
Check if parameters used by user are supported by oVirt Python SDK
|
||||
and oVirt engine.
|
||||
"""
|
||||
api_version = LooseVersion(engine_version(connection))
|
||||
version = LooseVersion(version)
|
||||
for param in params:
|
||||
if module.params.get(param) is not None:
|
||||
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class BaseModule(object):
|
||||
"""
|
||||
This is base class for oVirt modules. oVirt modules should inherit this
|
||||
class and override method to customize specific needs of the module.
|
||||
The only abstract method of this class is `build_entity`, which must
|
||||
to be implemented in child class.
|
||||
"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def __init__(self, connection, module, service, changed=False):
|
||||
self._connection = connection
|
||||
self._module = module
|
||||
self._service = service
|
||||
self._changed = changed
|
||||
self._diff = {'after': dict(), 'before': dict()}
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return self._changed
|
||||
|
||||
@changed.setter
|
||||
def changed(self, changed):
|
||||
if not self._changed:
|
||||
self._changed = changed
|
||||
|
||||
@abstractmethod
|
||||
def build_entity(self):
|
||||
"""
|
||||
This method should return oVirt Python SDK type, which we want to
|
||||
create or update, initialized by values passed by Ansible module.
|
||||
|
||||
For example if we want to create VM, we will return following:
|
||||
types.Vm(name=self._module.params['vm_name'])
|
||||
|
||||
:return: Specific instance of sdk.Struct.
|
||||
"""
|
||||
pass
|
||||
|
||||
def param(self, name, default=None):
|
||||
"""
|
||||
Return a module parameter specified by it's name.
|
||||
"""
|
||||
return self._module.params.get(name, default)
|
||||
|
||||
def update_check(self, entity):
|
||||
"""
|
||||
This method handle checks whether the entity values are same as values
|
||||
passed to ansible module. By default we don't compare any values.
|
||||
|
||||
:param entity: Entity we want to compare with Ansible module values.
|
||||
:return: True if values are same, so we don't need to update the entity.
|
||||
"""
|
||||
return True
|
||||
|
||||
def pre_create(self, entity):
|
||||
"""
|
||||
This method is called right before entity is created.
|
||||
|
||||
:param entity: Entity to be created or updated.
|
||||
"""
|
||||
pass
|
||||
|
||||
def post_create(self, entity):
|
||||
"""
|
||||
This method is called right after entity is created.
|
||||
|
||||
:param entity: Entity which was created.
|
||||
"""
|
||||
pass
|
||||
|
||||
def post_update(self, entity):
|
||||
"""
|
||||
This method is called right after entity is updated.
|
||||
|
||||
:param entity: Entity which was updated.
|
||||
"""
|
||||
pass
|
||||
|
||||
def diff_update(self, after, update):
|
||||
for k, v in update.items():
|
||||
if isinstance(v, Mapping):
|
||||
after[k] = self.diff_update(after.get(k, dict()), v)
|
||||
else:
|
||||
after[k] = update[k]
|
||||
return after
|
||||
|
||||
def create(
|
||||
self,
|
||||
entity=None,
|
||||
result_state=None,
|
||||
fail_condition=lambda e: False,
|
||||
search_params=None,
|
||||
update_params=None,
|
||||
_wait=None,
|
||||
force_create=False,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Method which is called when state of the entity is 'present'. If user
|
||||
don't provide `entity` parameter the entity is searched using
|
||||
`search_params` parameter. If entity is found it's updated, whether
|
||||
the entity should be updated is checked by `update_check` method.
|
||||
The corresponding updated entity is build by `build_entity` method.
|
||||
|
||||
Function executed after entity is created can optionally be specified
|
||||
in `post_create` parameter. Function executed after entity is updated
|
||||
can optionally be specified in `post_update` parameter.
|
||||
|
||||
:param entity: Entity we want to update, if exists.
|
||||
:param result_state: State which should entity has in order to finish task.
|
||||
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param update_params: The params which should be passed to update method.
|
||||
:param kwargs: Additional parameters passed when creating entity.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None and not force_create:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
self.pre_create(entity)
|
||||
|
||||
if entity:
|
||||
# Entity exists, so update it:
|
||||
entity_service = self._service.service(entity.id)
|
||||
if not self.update_check(entity):
|
||||
new_entity = self.build_entity()
|
||||
if not self._module.check_mode:
|
||||
update_params = update_params or {}
|
||||
updated_entity = entity_service.update(
|
||||
new_entity,
|
||||
**update_params
|
||||
)
|
||||
self.post_update(entity)
|
||||
|
||||
# Update diffs only if user specified --diff parameter,
|
||||
# so we don't useless overload API:
|
||||
if self._module._diff:
|
||||
before = get_dict_of_struct(
|
||||
entity,
|
||||
self._connection,
|
||||
fetch_nested=True,
|
||||
attributes=['name'],
|
||||
)
|
||||
after = before.copy()
|
||||
self.diff_update(after, get_dict_of_struct(new_entity))
|
||||
self._diff['before'] = before
|
||||
self._diff['after'] = after
|
||||
|
||||
self.changed = True
|
||||
else:
|
||||
# Entity don't exists, so create it:
|
||||
if not self._module.check_mode:
|
||||
entity = self._service.add(
|
||||
self.build_entity(),
|
||||
**kwargs
|
||||
)
|
||||
self.post_create(entity)
|
||||
self.changed = True
|
||||
|
||||
if not self._module.check_mode:
|
||||
# Wait for the entity to be created and to be in the defined state:
|
||||
entity_service = self._service.service(entity.id)
|
||||
|
||||
def state_condition(entity):
|
||||
return entity
|
||||
|
||||
if result_state:
|
||||
|
||||
def state_condition(entity):
|
||||
return entity and entity.status == result_state
|
||||
|
||||
wait(
|
||||
service=entity_service,
|
||||
condition=state_condition,
|
||||
fail_condition=fail_condition,
|
||||
wait=_wait if _wait is not None else self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': getattr(entity, 'id', None),
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
'diff': self._diff,
|
||||
}
|
||||
|
||||
def pre_remove(self, entity):
|
||||
"""
|
||||
This method is called right before entity is removed.
|
||||
|
||||
:param entity: Entity which we want to remove.
|
||||
"""
|
||||
pass
|
||||
|
||||
def entity_name(self, entity):
|
||||
return "{e_type} '{e_name}'".format(
|
||||
e_type=type(entity).__name__.lower(),
|
||||
e_name=getattr(entity, 'name', None),
|
||||
)
|
||||
|
||||
def remove(self, entity=None, search_params=None, **kwargs):
|
||||
"""
|
||||
Method which is called when state of the entity is 'absent'. If user
|
||||
don't provide `entity` parameter the entity is searched using
|
||||
`search_params` parameter. If entity is found it's removed.
|
||||
|
||||
Function executed before remove is executed can optionally be specified
|
||||
in `pre_remove` parameter.
|
||||
|
||||
:param entity: Entity we want to remove.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param kwargs: Additional parameters passed when removing entity.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
if entity is None:
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'msg': "Entity wasn't found."
|
||||
}
|
||||
|
||||
self.pre_remove(entity)
|
||||
|
||||
entity_service = self._service.service(entity.id)
|
||||
if not self._module.check_mode:
|
||||
entity_service.remove(**kwargs)
|
||||
wait(
|
||||
service=entity_service,
|
||||
condition=lambda entity: not entity,
|
||||
wait=self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
self.changed = True
|
||||
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': entity.id,
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
}
|
||||
|
||||
def action(
|
||||
self,
|
||||
action,
|
||||
entity=None,
|
||||
action_condition=lambda e: e,
|
||||
wait_condition=lambda e: e,
|
||||
fail_condition=lambda e: False,
|
||||
pre_action=lambda e: e,
|
||||
post_action=lambda e: None,
|
||||
search_params=None,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
This method is executed when we want to change the state of some oVirt
|
||||
entity. The action to be executed on oVirt service is specified by
|
||||
`action` parameter. Whether the action should be executed can be
|
||||
specified by passing `action_condition` parameter. State which the
|
||||
entity should be in after execution of the action can be specified
|
||||
by `wait_condition` parameter.
|
||||
|
||||
Function executed before an action on entity can optionally be specified
|
||||
in `pre_action` parameter. Function executed after an action on entity can
|
||||
optionally be specified in `post_action` parameter.
|
||||
|
||||
:param action: Action which should be executed by service on entity.
|
||||
:param entity: Entity we want to run action on.
|
||||
:param action_condition: Function which is executed when checking if action should be executed.
|
||||
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
|
||||
:param wait_condition: Function which is executed when waiting on result state.
|
||||
:param pre_action: Function which is executed before running the action.
|
||||
:param post_action: Function which is executed after running the action.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param kwargs: Additional parameters passed to action.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
entity = pre_action(entity)
|
||||
|
||||
if entity is None:
|
||||
self._module.fail_json(
|
||||
msg="Entity not found, can't run action '{0}'.".format(
|
||||
action
|
||||
)
|
||||
)
|
||||
|
||||
entity_service = self._service.service(entity.id)
|
||||
entity = entity_service.get()
|
||||
if action_condition(entity):
|
||||
if not self._module.check_mode:
|
||||
getattr(entity_service, action)(**kwargs)
|
||||
self.changed = True
|
||||
|
||||
post_action(entity)
|
||||
|
||||
wait(
|
||||
service=self._service.service(entity.id),
|
||||
condition=wait_condition,
|
||||
fail_condition=fail_condition,
|
||||
wait=self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': entity.id,
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
'diff': self._diff,
|
||||
}
|
||||
|
||||
def wait_for_import(self, condition=lambda e: True):
|
||||
if self._module.params['wait']:
|
||||
start = time.time()
|
||||
timeout = self._module.params['timeout']
|
||||
poll_interval = self._module.params['poll_interval']
|
||||
while time.time() < start + timeout:
|
||||
entity = self.search_entity()
|
||||
if entity and condition(entity):
|
||||
return entity
|
||||
time.sleep(poll_interval)
|
||||
|
||||
def search_entity(self, search_params=None, list_params=None):
|
||||
"""
|
||||
Always first try to search by `ID`, if ID isn't specified,
|
||||
check if user constructed special search in `search_params`,
|
||||
if not search by `name`.
|
||||
"""
|
||||
entity = None
|
||||
|
||||
if 'id' in self._module.params and self._module.params['id'] is not None:
|
||||
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
|
||||
elif search_params is not None:
|
||||
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
|
||||
elif self._module.params.get('name') is not None:
|
||||
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
|
||||
|
||||
return entity
|
||||
|
||||
def _get_major(self, full_version):
|
||||
if full_version is None or full_version == "":
|
||||
return None
|
||||
if isinstance(full_version, otypes.Version):
|
||||
return int(full_version.major)
|
||||
return int(full_version.split('.')[0])
|
||||
|
||||
def _get_minor(self, full_version):
|
||||
if full_version is None or full_version == "":
|
||||
return None
|
||||
if isinstance(full_version, otypes.Version):
|
||||
return int(full_version.minor)
|
||||
return int(full_version.split('.')[1])
|
||||
|
||||
|
||||
def _sdk4_error_maybe():
|
||||
"""
|
||||
Allow for ovirtsdk4 not being installed.
|
||||
"""
|
||||
if HAS_SDK:
|
||||
return sdk.Error
|
||||
return type(None)
|
||||
|
||||
|
||||
class OvirtRetry(CloudRetry):
|
||||
base_class = _sdk4_error_maybe()
|
||||
|
||||
@staticmethod
|
||||
def status_code_from_exception(error):
|
||||
return error.code
|
||||
|
||||
@staticmethod
|
||||
def found(response_code, catch_extra_error_codes=None):
|
||||
# This is a list of error codes to retry.
|
||||
retry_on = [
|
||||
# HTTP status: Conflict
|
||||
409,
|
||||
]
|
||||
if catch_extra_error_codes:
|
||||
retry_on.extend(catch_extra_error_codes)
|
||||
|
||||
return response_code in retry_on
|
||||
@@ -1,67 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import csv
|
||||
from io import BytesIO, StringIO
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
|
||||
class CustomDialectFailureError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DialectNotAvailableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
CSVError = csv.Error
|
||||
|
||||
|
||||
def initialize_dialect(dialect, **kwargs):
|
||||
# Add Unix dialect from Python 3
|
||||
class unix_dialect(csv.Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = csv.QUOTE_ALL
|
||||
|
||||
csv.register_dialect("unix", unix_dialect)
|
||||
|
||||
if dialect not in csv.list_dialects():
|
||||
raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
|
||||
|
||||
# Create a dictionary from only set options
|
||||
dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
if dialect_params:
|
||||
try:
|
||||
csv.register_dialect('custom', dialect, **dialect_params)
|
||||
except TypeError as e:
|
||||
raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
|
||||
dialect = 'custom'
|
||||
|
||||
return dialect
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
else:
|
||||
fake_fh = BytesIO(data)
|
||||
|
||||
reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
|
||||
|
||||
return reader
|
||||
@@ -1,234 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class GandiLiveDNSAPI(object):
|
||||
|
||||
api_endpoint = 'https://api.gandi.net/v5/livedns'
|
||||
changed = False
|
||||
|
||||
error_strings = {
|
||||
400: 'Bad request',
|
||||
401: 'Permission denied',
|
||||
404: 'Resource not found',
|
||||
}
|
||||
|
||||
attribute_map = {
|
||||
'record': 'rrset_name',
|
||||
'type': 'rrset_type',
|
||||
'ttl': 'rrset_ttl',
|
||||
'values': 'rrset_values'
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.api_key = module.params['api_key']
|
||||
|
||||
def _build_error_message(self, module, info):
|
||||
s = ''
|
||||
body = info.get('body')
|
||||
if body:
|
||||
errors = module.from_json(body).get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
name = error.get('name')
|
||||
if name:
|
||||
s += '{0} :'.format(name)
|
||||
description = error.get('description')
|
||||
if description:
|
||||
s += description
|
||||
return s
|
||||
|
||||
def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
|
||||
headers = {'Authorization': 'Apikey {0}'.format(self.api_key),
|
||||
'Content-Type': 'application/json'}
|
||||
data = None
|
||||
if payload:
|
||||
try:
|
||||
data = json.dumps(payload)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
|
||||
|
||||
resp, info = fetch_url(self.module,
|
||||
self.api_endpoint + api_call,
|
||||
headers=headers,
|
||||
data=data,
|
||||
method=method)
|
||||
|
||||
error_msg = ''
|
||||
if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
|
||||
err_s = self.error_strings.get(info['status'], '')
|
||||
|
||||
error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info))
|
||||
|
||||
result = None
|
||||
try:
|
||||
content = resp.read()
|
||||
except AttributeError:
|
||||
content = None
|
||||
|
||||
if content:
|
||||
try:
|
||||
result = json.loads(to_text(content, errors='surrogate_or_strict'))
|
||||
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
|
||||
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
|
||||
|
||||
if error_msg:
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
return result, info['status']
|
||||
|
||||
def build_result(self, result, domain):
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for k in self.attribute_map:
|
||||
v = result.get(self.attribute_map[k], None)
|
||||
if v is not None:
|
||||
if k == 'record' and v == '@':
|
||||
v = ''
|
||||
res[k] = v
|
||||
|
||||
res['domain'] = domain
|
||||
|
||||
return res
|
||||
|
||||
def build_results(self, results, domain):
|
||||
if results is None:
|
||||
return []
|
||||
return [self.build_result(r, domain) for r in results]
|
||||
|
||||
def get_records(self, record, type, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
if record:
|
||||
url += '/%s' % (record)
|
||||
if type:
|
||||
url += '/%s' % (type)
|
||||
|
||||
records, status = self._gandi_api_call(url, error_on_404=False)
|
||||
|
||||
if status == 404:
|
||||
return []
|
||||
|
||||
if not isinstance(records, list):
|
||||
records = [records]
|
||||
|
||||
# filter by type if record is not set
|
||||
if not record and type:
|
||||
records = [r
|
||||
for r in records
|
||||
if r['rrset_type'] == type]
|
||||
|
||||
return records
|
||||
|
||||
def create_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
new_record = {
|
||||
'rrset_name': record,
|
||||
'rrset_type': type,
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record, status = self._gandi_api_call(url, method='POST', payload=new_record)
|
||||
|
||||
if status in (200, 201,):
|
||||
return new_record
|
||||
|
||||
return None
|
||||
|
||||
def update_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
new_record = {
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
|
||||
return record
|
||||
|
||||
def delete_record(self, record, type, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
|
||||
self._gandi_api_call(url, method='DELETE')
|
||||
|
||||
def delete_dns_record(self, record, type, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
self.changed = True
|
||||
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
new_values = set(cur_record['rrset_values']) - set(values)
|
||||
if new_values:
|
||||
# Removing one or more values from a record, we update the record with the remaining values
|
||||
self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
|
||||
records = self.get_records(record, type, domain)
|
||||
return records[0], self.changed
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.delete_record(record, type, domain)
|
||||
else:
|
||||
cur_record = None
|
||||
|
||||
return None, self.changed
|
||||
|
||||
def ensure_dns_record(self, record, type, ttl, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
do_update = False
|
||||
if ttl is not None and cur_record['rrset_ttl'] != ttl:
|
||||
do_update = True
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
do_update = True
|
||||
|
||||
if do_update:
|
||||
if self.module.check_mode:
|
||||
result = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
else:
|
||||
self.update_record(record, type, values, ttl, domain)
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
result = records[0]
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
else:
|
||||
return cur_record, self.changed
|
||||
|
||||
if self.module.check_mode:
|
||||
new_record = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
result = new_record
|
||||
else:
|
||||
result = self.create_record(record, type, values, ttl, domain)
|
||||
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
0
plugins/module_utils/identity/__init__.py
Normal file
0
plugins/module_utils/identity/__init__.py
Normal file
0
plugins/module_utils/identity/keycloak/__init__.py
Normal file
0
plugins/module_utils/identity/keycloak/__init__.py
Normal file
@@ -30,16 +30,12 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
URL_REALMS = "{url}/admin/realms"
|
||||
URL_REALM = "{url}/admin/realms/{realm}"
|
||||
|
||||
URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
|
||||
URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
|
||||
URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
|
||||
@@ -59,14 +55,13 @@ def keycloak_argument_spec():
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
return dict(
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
|
||||
auth_client_id=dict(type='str', default='admin-cli'),
|
||||
auth_realm=dict(type='str'),
|
||||
auth_realm=dict(type='str', required=True),
|
||||
auth_client_secret=dict(type='str', default=None, no_log=True),
|
||||
auth_username=dict(type='str', aliases=['username']),
|
||||
auth_password=dict(type='str', aliases=['password'], no_log=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
token=dict(type='str', no_log=True),
|
||||
auth_username=dict(type='str', aliases=['username'], required=True),
|
||||
auth_password=dict(type='str', aliases=['password'], required=True, no_log=True),
|
||||
validate_certs=dict(type='bool', default=True)
|
||||
)
|
||||
|
||||
|
||||
@@ -78,58 +73,41 @@ class KeycloakError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_token(module_params):
|
||||
""" Obtains connection header with token for the authentication,
|
||||
token already given or obtained from credentials
|
||||
:param module_params: parameters of the module
|
||||
:return: connection header
|
||||
"""
|
||||
token = module_params.get('token')
|
||||
base_url = module_params.get('auth_keycloak_url')
|
||||
|
||||
def get_token(base_url, validate_certs, auth_realm, client_id,
|
||||
auth_username, auth_password, client_secret):
|
||||
if not base_url.lower().startswith(('http', 'https')):
|
||||
raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
|
||||
|
||||
if token is None:
|
||||
base_url = module_params.get('auth_keycloak_url')
|
||||
validate_certs = module_params.get('validate_certs')
|
||||
auth_realm = module_params.get('auth_realm')
|
||||
client_id = module_params.get('auth_client_id')
|
||||
auth_username = module_params.get('auth_username')
|
||||
auth_password = module_params.get('auth_password')
|
||||
client_secret = module_params.get('auth_client_secret')
|
||||
auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
|
||||
temp_payload = {
|
||||
'grant_type': 'password',
|
||||
'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'username': auth_username,
|
||||
'password': auth_password,
|
||||
}
|
||||
# Remove empty items, for instance missing client_secret
|
||||
payload = dict(
|
||||
(k, v) for k, v in temp_payload.items() if v is not None)
|
||||
try:
|
||||
r = json.loads(to_native(open_url(auth_url, method='POST',
|
||||
validate_certs=validate_certs,
|
||||
data=urlencode(payload)).read()))
|
||||
except ValueError as e:
|
||||
raise KeycloakError(
|
||||
'API returned invalid JSON when trying to obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
except Exception as e:
|
||||
raise KeycloakError('Could not obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
|
||||
try:
|
||||
token = r['access_token']
|
||||
except KeyError:
|
||||
raise KeycloakError(
|
||||
'Could not obtain access token from %s' % auth_url)
|
||||
return {
|
||||
'Authorization': 'Bearer ' + token,
|
||||
'Content-Type': 'application/json'
|
||||
auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
|
||||
temp_payload = {
|
||||
'grant_type': 'password',
|
||||
'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'username': auth_username,
|
||||
'password': auth_password,
|
||||
}
|
||||
# Remove empty items, for instance missing client_secret
|
||||
payload = dict(
|
||||
(k, v) for k, v in temp_payload.items() if v is not None)
|
||||
try:
|
||||
r = json.loads(to_native(open_url(auth_url, method='POST',
|
||||
validate_certs=validate_certs,
|
||||
data=urlencode(payload)).read()))
|
||||
except ValueError as e:
|
||||
raise KeycloakError(
|
||||
'API returned invalid JSON when trying to obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
except Exception as e:
|
||||
raise KeycloakError('Could not obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
|
||||
try:
|
||||
return {
|
||||
'Authorization': 'Bearer ' + r['access_token'],
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
except KeyError:
|
||||
raise KeycloakError(
|
||||
'Could not obtain access token from %s' % auth_url)
|
||||
|
||||
|
||||
class KeycloakAPI(object):
|
||||
@@ -142,75 +120,6 @@ class KeycloakAPI(object):
|
||||
self.validate_certs = self.module.params.get('validate_certs')
|
||||
self.restheaders = connection_header
|
||||
|
||||
def get_realm_by_id(self, realm='master'):
|
||||
""" Obtain realm representation by id
|
||||
|
||||
:param realm: realm id
|
||||
:return: dict of real, representation or None if none matching exist
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def update_realm(self, realmrep, realm="master"):
|
||||
""" Update an existing realm
|
||||
:param realmrep: corresponding (partial/full) realm representation with updates
|
||||
:param realm: realm to be updated in Keycloak
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(realmrep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def create_realm(self, realmrep):
|
||||
""" Create a realm in keycloak
|
||||
:param realmrep: Realm representation of realm to be created.
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALMS.format(url=self.baseurl)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(realmrep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def delete_realm(self, realm="master"):
|
||||
""" Delete a realm from Keycloak
|
||||
|
||||
:param realm: realm to be deleted
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def get_clients(self, realm='master', filter=None):
|
||||
""" Obtains client representations for clients in a realm
|
||||
|
||||
|
||||
@@ -119,9 +119,9 @@ class IPAClient(object):
|
||||
data = dict(method=method)
|
||||
|
||||
# TODO: We should probably handle this a little better.
|
||||
if method in ('ping', 'config_show', 'otpconfig_show'):
|
||||
if method in ('ping', 'config_show'):
|
||||
data['params'] = [[], {}]
|
||||
elif method in ('config_mod', 'otpconfig_mod'):
|
||||
elif method == 'config_mod':
|
||||
data['params'] = [[], item]
|
||||
else:
|
||||
data['params'] = [[name], item]
|
||||
|
||||
@@ -87,12 +87,11 @@ def not_in_host_file(self, host):
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = [
|
||||
user_host_file,
|
||||
"/etc/ssh/ssh_known_hosts",
|
||||
"/etc/ssh/ssh_known_hosts2",
|
||||
"/etc/openssh/ssh_known_hosts",
|
||||
]
|
||||
host_file_list = []
|
||||
host_file_list.append(user_host_file)
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||
host_file_list.append("/etc/openssh/ssh_known_hosts")
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
|
||||
|
||||
|
||||
class ModuleHelperBase(object):
|
||||
module = None
|
||||
ModuleHelperException = _MHE
|
||||
|
||||
def __init__(self, module=None):
|
||||
self._changed = False
|
||||
|
||||
if module:
|
||||
self.module = module
|
||||
|
||||
if not isinstance(self.module, AnsibleModule):
|
||||
self.module = AnsibleModule(**self.module)
|
||||
|
||||
def __init_module__(self):
|
||||
pass
|
||||
|
||||
def __run__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __quit_module__(self):
|
||||
pass
|
||||
|
||||
def __changed__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
try:
|
||||
return self.__changed__()
|
||||
except NotImplementedError:
|
||||
return self._changed
|
||||
|
||||
@changed.setter
|
||||
def changed(self, value):
|
||||
self._changed = value
|
||||
|
||||
def has_changed(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@module_fails_on_exception
|
||||
def run(self):
|
||||
self.__init_module__()
|
||||
self.__run__()
|
||||
self.__quit_module__()
|
||||
self.module.exit_json(changed=self.has_changed(), **self.output)
|
||||
@@ -1,54 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
from functools import wraps
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
|
||||
|
||||
|
||||
def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
def deco(func):
|
||||
if on_success is None and on_failure is None:
|
||||
return func
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
self = args[0]
|
||||
func(*args, **kwargs)
|
||||
if on_success is not None:
|
||||
self.changed = on_success
|
||||
except Exception:
|
||||
if on_failure is not None:
|
||||
self.changed = on_failure
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return deco
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(self, *args, **kwargs)
|
||||
except SystemExit:
|
||||
raise
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
except Exception as e:
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
@@ -1,22 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleHelperException(Exception):
|
||||
@staticmethod
|
||||
def _get_remove(key, kwargs):
|
||||
if key in kwargs:
|
||||
result = kwargs[key]
|
||||
del kwargs[key]
|
||||
return result
|
||||
return None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
|
||||
self.update_output = self._get_remove('update_output', kwargs) or {}
|
||||
super(ModuleHelperException, self).__init__(*args)
|
||||
@@ -1,175 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from functools import partial
|
||||
|
||||
|
||||
class ArgFormat(object):
|
||||
"""
|
||||
Argument formatter for use as a command line parameter. Used in CmdMixin.
|
||||
"""
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
FORMAT = 2
|
||||
|
||||
@staticmethod
|
||||
def stars_deco(num):
|
||||
if num == 1:
|
||||
def deco(f):
|
||||
return lambda v: f(*v)
|
||||
return deco
|
||||
elif num == 2:
|
||||
def deco(f):
|
||||
return lambda v: f(**v)
|
||||
return deco
|
||||
|
||||
return lambda f: f
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
|
||||
the CLI command execution.
|
||||
:param name: Name of the argument to be formatted
|
||||
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
|
||||
:param style: Whether arg_format (as str) should use printf-style formatting.
|
||||
Ignored if arg_format is None or not a str (should be callable).
|
||||
:param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
|
||||
"""
|
||||
def printf_fmt(_fmt, v):
|
||||
try:
|
||||
return [_fmt % v]
|
||||
except TypeError as e:
|
||||
if e.args[0] != 'not all arguments converted during string formatting':
|
||||
raise
|
||||
return [_fmt]
|
||||
|
||||
_fmts = {
|
||||
ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
|
||||
ArgFormat.PRINTF: printf_fmt,
|
||||
ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
|
||||
}
|
||||
|
||||
self.name = name
|
||||
self.stars = stars
|
||||
|
||||
if fmt is None:
|
||||
fmt = "{0}"
|
||||
style = ArgFormat.FORMAT
|
||||
|
||||
if isinstance(fmt, str):
|
||||
func = _fmts[style]
|
||||
self.arg_format = partial(func, fmt)
|
||||
elif isinstance(fmt, list) or isinstance(fmt, tuple):
|
||||
self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
|
||||
elif hasattr(fmt, '__call__'):
|
||||
self.arg_format = fmt
|
||||
else:
|
||||
raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
|
||||
'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
|
||||
|
||||
if stars:
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
if value is None:
|
||||
return []
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
|
||||
class CmdMixin(object):
|
||||
"""
|
||||
Mixin for mapping module options to running a CLI command with its arguments.
|
||||
"""
|
||||
command = None
|
||||
command_args_formats = {}
|
||||
run_command_fixed_options = {}
|
||||
check_rc = False
|
||||
force_lang = "C"
|
||||
|
||||
@property
|
||||
def module_formats(self):
|
||||
result = {}
|
||||
for param in self.module.params.keys():
|
||||
result[param] = ArgFormat(param)
|
||||
return result
|
||||
|
||||
@property
|
||||
def custom_formats(self):
|
||||
result = {}
|
||||
for param, fmt_spec in self.command_args_formats.items():
|
||||
result[param] = ArgFormat(param, **fmt_spec)
|
||||
return result
|
||||
|
||||
def _calculate_args(self, extra_params=None, params=None):
|
||||
def add_arg_formatted_param(_cmd_args, arg_format, _value):
|
||||
args = list(arg_format.to_text(_value))
|
||||
return _cmd_args + args
|
||||
|
||||
def find_format(_param):
|
||||
return self.custom_formats.get(_param, self.module_formats.get(_param))
|
||||
|
||||
extra_params = extra_params or dict()
|
||||
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
|
||||
try:
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
|
||||
except ValueError:
|
||||
pass
|
||||
param_list = params if params else self.vars.keys()
|
||||
|
||||
for param in param_list:
|
||||
if isinstance(param, dict):
|
||||
if len(param) != 1:
|
||||
raise self.ModuleHelperException("run_command parameter as a dict must "
|
||||
"contain only one key: {0}".format(param))
|
||||
_param = list(param.keys())[0]
|
||||
fmt = find_format(_param)
|
||||
value = param[_param]
|
||||
elif isinstance(param, str):
|
||||
if param in self.vars.keys():
|
||||
fmt = find_format(param)
|
||||
value = self.vars[param]
|
||||
elif param in extra_params:
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
||||
version="4.0.0", collection_name="community.general")
|
||||
continue
|
||||
|
||||
else:
|
||||
raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
|
||||
return cmd_args
|
||||
|
||||
def process_command_output(self, rc, out, err):
|
||||
return rc, out, err
|
||||
|
||||
def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):
|
||||
self.vars.cmd_args = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
options.update(kwargs)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
if self.force_lang:
|
||||
env_update.update({
|
||||
'LANGUAGE': self.force_lang,
|
||||
'LC_ALL': self.force_lang,
|
||||
})
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
|
||||
self.update_output(rc=rc, stdout=out, stderr=err)
|
||||
if process_output is None:
|
||||
_process = self.process_command_output
|
||||
else:
|
||||
_process = process_output
|
||||
|
||||
return _process(rc, out, err)
|
||||
@@ -1,58 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
|
||||
|
||||
|
||||
class DependencyCtxMgr(object):
|
||||
def __init__(self, name, msg=None):
|
||||
self.name = name
|
||||
self.msg = msg
|
||||
self.has_it = False
|
||||
self.exc_type = None
|
||||
self.exc_val = None
|
||||
self.exc_tb = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.has_it = exc_type is None
|
||||
self.exc_type = exc_type
|
||||
self.exc_val = exc_val
|
||||
self.exc_tb = exc_tb
|
||||
return not self.has_it
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self.msg or str(self.exc_val)
|
||||
|
||||
|
||||
class DependencyMixin(ModuleHelperBase):
|
||||
_dependencies = []
|
||||
|
||||
@classmethod
|
||||
def dependency(cls, name, msg):
|
||||
cls._dependencies.append(DependencyCtxMgr(name, msg))
|
||||
return cls._dependencies[-1]
|
||||
|
||||
def fail_on_missing_deps(self):
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
|
||||
msg=d.text,
|
||||
**self.output)
|
||||
|
||||
@module_fails_on_exception
|
||||
def run(self):
|
||||
self.fail_on_missing_deps()
|
||||
super(DependencyMixin, self).run()
|
||||
@@ -1,39 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class StateMixin(object):
|
||||
state_param = 'state'
|
||||
default_state = None
|
||||
|
||||
def _state(self):
|
||||
state = self.module.params.get(self.state_param)
|
||||
return self.default_state if state is None else state
|
||||
|
||||
def _method(self, state):
|
||||
return "{0}_{1}".format(self.state_param, state)
|
||||
|
||||
def __run__(self):
|
||||
state = self._state()
|
||||
self.vars.state = state
|
||||
|
||||
# resolve aliases
|
||||
if state not in self.module.params:
|
||||
aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
|
||||
if aliased:
|
||||
state = aliased[0]
|
||||
self.vars.effective_state = state
|
||||
|
||||
method = self._method(state)
|
||||
if not hasattr(self, method):
|
||||
return self.__state_fallback__()
|
||||
func = getattr(self, method)
|
||||
return func()
|
||||
|
||||
def __state_fallback__(self):
|
||||
raise ValueError("Cannot find method: {0}".format(self._method(self._state())))
|
||||
@@ -1,132 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class VarMeta(object):
|
||||
NOTHING = object()
|
||||
|
||||
def __init__(self, diff=False, output=True, change=None, fact=False):
|
||||
self.init = False
|
||||
self.initial_value = None
|
||||
self.value = None
|
||||
|
||||
self.diff = diff
|
||||
self.change = diff if change is None else change
|
||||
self.output = output
|
||||
self.fact = fact
|
||||
|
||||
def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
|
||||
if diff is not None:
|
||||
self.diff = diff
|
||||
if output is not None:
|
||||
self.output = output
|
||||
if change is not None:
|
||||
self.change = change
|
||||
if fact is not None:
|
||||
self.fact = fact
|
||||
if initial_value is not self.NOTHING:
|
||||
self.initial_value = initial_value
|
||||
|
||||
def set_value(self, value):
|
||||
if not self.init:
|
||||
self.initial_value = value
|
||||
self.init = True
|
||||
self.value = value
|
||||
return self
|
||||
|
||||
@property
|
||||
def has_changed(self):
|
||||
return self.change and (self.initial_value != self.value)
|
||||
|
||||
@property
|
||||
def diff_result(self):
|
||||
return None if not (self.diff and self.has_changed) else {
|
||||
'before': self.initial_value,
|
||||
'after': self.value,
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
return "<VarMeta: value={0}, initial={1}, diff={2}, output={3}, change={4}>".format(
|
||||
self.value, self.initial_value, self.diff, self.output, self.change
|
||||
)
|
||||
|
||||
|
||||
class VarDict(object):
|
||||
def __init__(self):
|
||||
self._data = dict()
|
||||
self._meta = dict()
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self._data[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.set(key, value)
|
||||
|
||||
def __getattr__(self, item):
|
||||
try:
|
||||
return self._data[item]
|
||||
except KeyError:
|
||||
return getattr(self._data, item)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in ('_data', '_meta'):
|
||||
super(VarDict, self).__setattr__(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
|
||||
def meta(self, name):
|
||||
return self._meta[name]
|
||||
|
||||
def set_meta(self, name, **kwargs):
|
||||
self.meta(name).set(**kwargs)
|
||||
|
||||
def set(self, name, value, **kwargs):
|
||||
if name in ('_data', '_meta'):
|
||||
raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
|
||||
self._data[name] = value
|
||||
if name in self._meta:
|
||||
meta = self.meta(name)
|
||||
else:
|
||||
meta = VarMeta(**kwargs)
|
||||
meta.set_value(value)
|
||||
self._meta[name] = meta
|
||||
|
||||
def output(self):
|
||||
return dict((k, v) for k, v in self._data.items() if self.meta(k).output)
|
||||
|
||||
def diff(self):
|
||||
diff_results = [(k, self.meta(k).diff_result) for k in self._data]
|
||||
diff_results = [dr for dr in diff_results if dr[1] is not None]
|
||||
if diff_results:
|
||||
before = dict((dr[0], dr[1]['before']) for dr in diff_results)
|
||||
after = dict((dr[0], dr[1]['after']) for dr in diff_results)
|
||||
return {'before': before, 'after': after}
|
||||
return None
|
||||
|
||||
def facts(self):
|
||||
facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact)
|
||||
return facts_result if facts_result else None
|
||||
|
||||
def change_vars(self):
|
||||
return [v for v in self._data if self.meta(v).change]
|
||||
|
||||
def has_changed(self, v):
|
||||
return self._meta[v].has_changed
|
||||
|
||||
|
||||
class VarsMixin(object):
|
||||
|
||||
def __init__(self, module=None):
|
||||
self.vars = VarDict()
|
||||
super(VarsMixin, self).__init__(module)
|
||||
|
||||
def update_vars(self, meta=None, **kwargs):
|
||||
if meta is None:
|
||||
meta = {}
|
||||
for k, v in kwargs.items():
|
||||
self.vars.set(k, v, **meta)
|
||||
@@ -1,79 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||
|
||||
|
||||
class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
change_params = ()
|
||||
facts_params = ()
|
||||
|
||||
VarDict = _VD # for backward compatibility, will be deprecated at some point
|
||||
|
||||
def __init__(self, module=None):
|
||||
super(ModuleHelper, self).__init__(module)
|
||||
for name, value in self.module.params.items():
|
||||
self.vars.set(
|
||||
name, value,
|
||||
diff=name in self.diff_params,
|
||||
output=name in self.output_params,
|
||||
change=None if not self.change_params else name in self.change_params,
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
def update_facts(self, **kwargs):
|
||||
self.update_vars(meta={"fact": True}, **kwargs)
|
||||
|
||||
def _vars_changed(self):
|
||||
return any(self.vars.has_changed(v) for v in self.vars.change_vars())
|
||||
|
||||
def has_changed(self):
|
||||
return self.changed or self._vars_changed()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
result = dict(self.vars.output())
|
||||
if self.facts_name:
|
||||
facts = self.vars.facts()
|
||||
if facts is not None:
|
||||
result['ansible_facts'] = {self.facts_name: facts}
|
||||
if self.module._diff:
|
||||
diff = result.get('diff', {})
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
return result
|
||||
|
||||
|
||||
class StateModuleHelper(StateMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdModuleHelper(CmdMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
|
||||
pass
|
||||
@@ -6,13 +6,347 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from functools import partial, wraps
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
|
||||
ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class ModuleHelperException(Exception):
|
||||
@staticmethod
|
||||
def _get_remove(key, kwargs):
|
||||
if key in kwargs:
|
||||
result = kwargs[key]
|
||||
del kwargs[key]
|
||||
return result
|
||||
return None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
|
||||
self.update_output = self._get_remove('update_output', kwargs) or {}
|
||||
super(ModuleHelperException, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ArgFormat(object):
|
||||
"""
|
||||
Argument formatter
|
||||
"""
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
FORMAT = 2
|
||||
|
||||
@staticmethod
|
||||
def stars_deco(num):
|
||||
if num == 1:
|
||||
def deco(f):
|
||||
return lambda v: f(*v)
|
||||
return deco
|
||||
elif num == 2:
|
||||
def deco(f):
|
||||
return lambda v: f(**v)
|
||||
return deco
|
||||
|
||||
return lambda f: f
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
Creates a new formatter
|
||||
:param name: Name of the argument to be formatted
|
||||
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
|
||||
:param style: Whether arg_format (as str) should use printf-style formatting.
|
||||
Ignored if arg_format is None or not a str (should be callable).
|
||||
:param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
|
||||
"""
|
||||
def printf_fmt(_fmt, v):
|
||||
try:
|
||||
return [_fmt % v]
|
||||
except TypeError as e:
|
||||
if e.args[0] != 'not all arguments converted during string formatting':
|
||||
raise
|
||||
return [_fmt]
|
||||
|
||||
_fmts = {
|
||||
ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
|
||||
ArgFormat.PRINTF: printf_fmt,
|
||||
ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
|
||||
}
|
||||
|
||||
self.name = name
|
||||
self.stars = stars
|
||||
|
||||
if fmt is None:
|
||||
fmt = "{0}"
|
||||
style = ArgFormat.FORMAT
|
||||
|
||||
if isinstance(fmt, str):
|
||||
func = _fmts[style]
|
||||
self.arg_format = partial(func, fmt)
|
||||
elif isinstance(fmt, list) or isinstance(fmt, tuple):
|
||||
self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
|
||||
elif hasattr(fmt, '__call__'):
|
||||
self.arg_format = fmt
|
||||
else:
|
||||
raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
|
||||
'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
|
||||
|
||||
if stars:
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
|
||||
def cause_changes(func, on_success=True, on_failure=False):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
if on_success:
|
||||
self.changed = True
|
||||
except Exception as e:
|
||||
if on_failure:
|
||||
self.changed = True
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(self, *args, **kwargs)
|
||||
except SystemExit:
|
||||
raise
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
except Exception as e:
|
||||
self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.vars.exception = traceback.format_exc()
|
||||
self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars)
|
||||
return wrapper
|
||||
|
||||
|
||||
class DependencyCtxMgr(object):
|
||||
def __init__(self, name, msg=None):
|
||||
self.name = name
|
||||
self.msg = msg
|
||||
self.has_it = False
|
||||
self.exc_type = None
|
||||
self.exc_val = None
|
||||
self.exc_tb = None
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.has_it = exc_type is None
|
||||
self.exc_type = exc_type
|
||||
self.exc_val = exc_val
|
||||
self.exc_tb = exc_tb
|
||||
return not self.has_it
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self.msg or str(self.exc_val)
|
||||
|
||||
|
||||
class ModuleHelper(object):
|
||||
_dependencies = []
|
||||
module = {}
|
||||
facts_name = None
|
||||
|
||||
class AttrDict(dict):
|
||||
def __getattr__(self, item):
|
||||
return self[item]
|
||||
|
||||
def __init__(self, module=None):
|
||||
self.vars = ModuleHelper.AttrDict()
|
||||
self.output_dict = dict()
|
||||
self.facts_dict = dict()
|
||||
self._changed = False
|
||||
|
||||
if module:
|
||||
self.module = module
|
||||
|
||||
if isinstance(self.module, dict):
|
||||
self.module = AnsibleModule(**self.module)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.output_dict.update(kwargs)
|
||||
|
||||
def update_facts(self, **kwargs):
|
||||
self.facts_dict.update(kwargs)
|
||||
|
||||
def __init_module__(self):
|
||||
pass
|
||||
|
||||
def __run__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __quit_module__(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return self._changed
|
||||
|
||||
@changed.setter
|
||||
def changed(self, value):
|
||||
self._changed = value
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
result = dict(self.vars)
|
||||
result.update(self.output_dict)
|
||||
if self.facts_name:
|
||||
result['ansible_facts'] = {self.facts_name: self.facts_dict}
|
||||
return result
|
||||
|
||||
@module_fails_on_exception
|
||||
def run(self):
|
||||
self.fail_on_missing_deps()
|
||||
self.__init_module__()
|
||||
self.__run__()
|
||||
self.__quit_module__()
|
||||
self.module.exit_json(changed=self.changed, **self.output_dict)
|
||||
|
||||
@classmethod
|
||||
def dependency(cls, name, msg):
|
||||
cls._dependencies.append(DependencyCtxMgr(name, msg))
|
||||
return cls._dependencies[-1]
|
||||
|
||||
def fail_on_missing_deps(self):
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
exception=d.exc_val.__traceback__.format_exc(),
|
||||
msg=d.text,
|
||||
**self.output_dict)
|
||||
|
||||
|
||||
class StateMixin(object):
|
||||
state_param = 'state'
|
||||
default_state = None
|
||||
|
||||
def _state(self):
|
||||
state = self.module.params.get(self.state_param)
|
||||
return self.default_state if state is None else state
|
||||
|
||||
def _method(self, state):
|
||||
return "{0}_{1}".format(self.state_param, state)
|
||||
|
||||
def __run__(self):
|
||||
state = self._state()
|
||||
self.vars.state = state
|
||||
|
||||
# resolve aliases
|
||||
if state not in self.module.params:
|
||||
aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
|
||||
if aliased:
|
||||
state = aliased[0]
|
||||
self.vars.effective_state = state
|
||||
|
||||
method = self._method(state)
|
||||
if not hasattr(self, method):
|
||||
return self.__state_fallback__()
|
||||
func = getattr(self, method)
|
||||
return func()
|
||||
|
||||
def __state_fallback__(self):
|
||||
raise ValueError("Cannot find method: {0}".format(self._method(self._state())))
|
||||
|
||||
|
||||
class CmdMixin(object):
|
||||
"""
|
||||
Mixin for mapping module options to running a CLI command with its arguments.
|
||||
"""
|
||||
command = None
|
||||
command_args_formats = {}
|
||||
run_command_fixed_options = {}
|
||||
check_rc = False
|
||||
force_lang = "C"
|
||||
|
||||
@property
|
||||
def module_formats(self):
|
||||
result = {}
|
||||
for param in self.module.params.keys():
|
||||
result[param] = ArgFormat(param)
|
||||
return result
|
||||
|
||||
@property
|
||||
def custom_formats(self):
|
||||
result = {}
|
||||
for param, fmt_spec in self.command_args_formats.items():
|
||||
result[param] = ArgFormat(param, **fmt_spec)
|
||||
return result
|
||||
|
||||
def _calculate_args(self, extra_params=None, params=None):
|
||||
def add_arg_formatted_param(_cmd_args, arg_format, _value):
|
||||
args = list(arg_format.to_text(_value))
|
||||
return _cmd_args + args
|
||||
|
||||
def find_format(_param):
|
||||
return self.custom_formats.get(_param, self.module_formats.get(_param))
|
||||
|
||||
extra_params = extra_params or dict()
|
||||
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0])
|
||||
param_list = params if params else self.module.params.keys()
|
||||
|
||||
for param in param_list:
|
||||
if isinstance(param, dict):
|
||||
if len(param) != 1:
|
||||
raise ModuleHelperException("run_command parameter as a dict must "
|
||||
"contain only one key: {0}".format(param))
|
||||
_param = list(param.keys())[0]
|
||||
fmt = find_format(_param)
|
||||
value = param[_param]
|
||||
elif isinstance(param, str):
|
||||
if param in self.module.argument_spec:
|
||||
fmt = find_format(param)
|
||||
value = self.module.params[param]
|
||||
elif param in extra_params:
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
||||
version="4.0.0", collection_name="community.general")
|
||||
continue
|
||||
|
||||
else:
|
||||
raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
|
||||
return cmd_args
|
||||
|
||||
def process_command_output(self, rc, out, err):
|
||||
return rc, out, err
|
||||
|
||||
def run_command(self, extra_params=None, params=None, *args, **kwargs):
|
||||
self.vars['cmd_args'] = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
if self.force_lang:
|
||||
env_update.update({'LANGUAGE': self.force_lang})
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
options.update(kwargs)
|
||||
rc, out, err = self.module.run_command(self.vars['cmd_args'], *args, **options)
|
||||
self.update_output(rc=rc, stdout=out, stderr=err)
|
||||
return self.process_command_output(rc, out, err)
|
||||
|
||||
|
||||
class StateModuleHelper(StateMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdModuleHelper(CmdMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
|
||||
class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
|
||||
pass
|
||||
|
||||
0
plugins/module_utils/net_tools/__init__.py
Normal file
0
plugins/module_utils/net_tools/__init__.py
Normal file
0
plugins/module_utils/net_tools/nios/__init__.py
Normal file
0
plugins/module_utils/net_tools/nios/__init__.py
Normal file
@@ -18,7 +18,6 @@ from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.validation import check_type_dict
|
||||
|
||||
try:
|
||||
from infoblox_client.connector import Connector
|
||||
@@ -400,11 +399,11 @@ class WapiModule(WapiBase):
|
||||
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
elif 'ipv4addr' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
|
||||
return proposed_object
|
||||
@@ -486,7 +485,7 @@ class WapiModule(WapiBase):
|
||||
if ('name' in obj_filter):
|
||||
# gets and returns the current object based on name/old_name passed
|
||||
try:
|
||||
name_obj = check_type_dict(obj_filter['name'])
|
||||
name_obj = self.module._check_type_dict(obj_filter['name'])
|
||||
old_name = name_obj['old_name']
|
||||
new_name = name_obj['new_name']
|
||||
except TypeError:
|
||||
@@ -522,7 +521,7 @@ class WapiModule(WapiBase):
|
||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
@@ -531,7 +530,7 @@ class WapiModule(WapiBase):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
@@ -544,7 +543,7 @@ class WapiModule(WapiBase):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
@@ -554,7 +553,7 @@ class WapiModule(WapiBase):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""
|
||||
Pritunl API that offers CRUD operations on Pritunl Organizations and Users
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class PritunlException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def pritunl_argument_spec():
|
||||
return dict(
|
||||
pritunl_url=dict(required=True, type="str"),
|
||||
pritunl_api_token=dict(required=True, type="str", no_log=False),
|
||||
pritunl_api_secret=dict(required=True, type="str", no_log=True),
|
||||
validate_certs=dict(required=False, type="bool", default=True),
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_settings(module):
|
||||
"""
|
||||
Helper function to set required Pritunl request params from module arguments.
|
||||
"""
|
||||
return {
|
||||
"api_token": module.params.get("pritunl_api_token"),
|
||||
"api_secret": module.params.get("pritunl_api_secret"),
|
||||
"base_url": module.params.get("pritunl_url"),
|
||||
"validate_certs": module.params.get("validate_certs"),
|
||||
}
|
||||
|
||||
|
||||
def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="GET",
|
||||
path="/organization",
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="DELETE",
|
||||
path="/organization/%s" % (organization_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _get_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="GET",
|
||||
path="/user/%s" % organization_id,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="DELETE",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/user/%s" % organization_id,
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _put_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_id,
|
||||
user_data,
|
||||
validate_certs=True,
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="PUT",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def list_pritunl_organizations(
|
||||
api_token, api_secret, base_url, validate_certs=True, filters=None
|
||||
):
|
||||
orgs = []
|
||||
|
||||
response = _get_pritunl_organizations(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve organizations from Pritunl")
|
||||
else:
|
||||
for org in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
orgs.append(org)
|
||||
else:
|
||||
if not any(
|
||||
filter_val != org[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
orgs.append(org)
|
||||
|
||||
return orgs
|
||||
|
||||
|
||||
def list_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
|
||||
):
|
||||
users = []
|
||||
|
||||
response = _get_pritunl_users(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
organization_id=organization_id,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve users from Pritunl")
|
||||
else:
|
||||
for user in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
users.append(user)
|
||||
|
||||
else:
|
||||
if not any(
|
||||
filter_val != user[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
users.append(user)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def post_pritunl_organization(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_name,
|
||||
validate_certs=True,
|
||||
):
|
||||
response = _post_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not add organization %s to Pritunl" % (organization_name)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def post_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_data,
|
||||
user_id=None,
|
||||
validate_certs=True,
|
||||
):
|
||||
# If user_id is provided will do PUT otherwise will do POST
|
||||
if user_id is None:
|
||||
response = _post_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# user POST request returns an array of a single item,
|
||||
# so return this item instead of the list
|
||||
return json.loads(response.read())[0]
|
||||
else:
|
||||
response = _put_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not update user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove organization %s from Pritunl" % (organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def pritunl_auth_request(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
method,
|
||||
path,
|
||||
validate_certs=True,
|
||||
headers=None,
|
||||
data=None,
|
||||
):
|
||||
"""
|
||||
Send an API call to a Pritunl server.
|
||||
Taken from https://pritunl.com/api and adaped work with Ansible open_url
|
||||
"""
|
||||
auth_timestamp = str(int(time.time()))
|
||||
auth_nonce = uuid.uuid4().hex
|
||||
|
||||
auth_string = "&".join(
|
||||
[api_token, auth_timestamp, auth_nonce, method.upper(), path]
|
||||
+ ([data] if data else [])
|
||||
)
|
||||
|
||||
auth_signature = base64.b64encode(
|
||||
hmac.new(
|
||||
api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
)
|
||||
|
||||
auth_headers = {
|
||||
"Auth-Token": api_token,
|
||||
"Auth-Timestamp": auth_timestamp,
|
||||
"Auth-Nonce": auth_nonce,
|
||||
"Auth-Signature": auth_signature,
|
||||
}
|
||||
|
||||
if headers:
|
||||
auth_headers.update(headers)
|
||||
|
||||
try:
|
||||
uri = "%s%s" % (base_url, path)
|
||||
|
||||
return open_url(
|
||||
uri,
|
||||
method=method.upper(),
|
||||
headers=auth_headers,
|
||||
data=data,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
except Exception as e:
|
||||
raise PritunlException(e)
|
||||
@@ -39,16 +39,14 @@ class OpenNebulaModule:
|
||||
wait_timeout=dict(type='int', default=300),
|
||||
)
|
||||
|
||||
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
|
||||
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None):
|
||||
|
||||
module_args = OpenNebulaModule.common_args.copy()
|
||||
module_args = OpenNebulaModule.common_args
|
||||
module_args.update(argument_spec)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=module_args,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_one_of=required_one_of,
|
||||
required_if=required_if)
|
||||
mutually_exclusive=mutually_exclusive)
|
||||
self.result = dict(changed=False,
|
||||
original_message='',
|
||||
message='')
|
||||
|
||||
0
plugins/module_utils/oracle/__init__.py
Normal file
0
plugins/module_utils/oracle/__init__.py
Normal file
@@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False):
|
||||
|
||||
if supports_create:
|
||||
common_args.update(
|
||||
key_by=dict(type="list", elements="str", no_log=False),
|
||||
key_by=dict(type="list", elements="str"),
|
||||
force_create=dict(type="bool", default=False),
|
||||
)
|
||||
|
||||
|
||||
@@ -19,10 +19,11 @@ PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json
|
||||
'OData-Version': '4.0'}
|
||||
DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
|
||||
|
||||
FAIL_MSG = 'Issuing a data modification command without specifying the '\
|
||||
'ID of the target %(resource)s resource when there is more '\
|
||||
'than one %(resource)s is no longer allowed. Use the `resource_id` '\
|
||||
'option to specify the target %(resource)s ID.'
|
||||
DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\
|
||||
'ID of the target %(resource)s resource when there is more '\
|
||||
'than one %(resource)s will use the first one in the '\
|
||||
'collection. Use the `resource_id` option to specify the '\
|
||||
'target %(resource)s ID'
|
||||
|
||||
|
||||
class RedfishUtils(object):
|
||||
@@ -38,34 +39,13 @@ class RedfishUtils(object):
|
||||
self.data_modification = data_modification
|
||||
self._init_session()
|
||||
|
||||
def _auth_params(self, headers):
|
||||
"""
|
||||
Return tuple of required authentication params based on the presence
|
||||
of a token in the self.creds dict. If using a token, set the
|
||||
X-Auth-Token header in the `headers` param.
|
||||
|
||||
:param headers: dict containing headers to send in request
|
||||
:return: tuple of username, password and force_basic_auth
|
||||
"""
|
||||
if self.creds.get('token'):
|
||||
username = None
|
||||
password = None
|
||||
force_basic_auth = False
|
||||
headers['X-Auth-Token'] = self.creds['token']
|
||||
else:
|
||||
username = self.creds['user']
|
||||
password = self.creds['pswd']
|
||||
force_basic_auth = True
|
||||
return username, password, force_basic_auth
|
||||
|
||||
# The following functions are to send GET/POST/PATCH/DELETE requests
|
||||
def get_request(self, uri):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
resp = open_url(uri, method="GET", headers=GET_HEADERS,
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
@@ -86,16 +66,14 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def post_request(self, uri, pyld):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=POST_HEADERS, method="POST",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
msg = self._get_extended_message(e)
|
||||
return {'ret': False,
|
||||
@@ -109,10 +87,10 @@ class RedfishUtils(object):
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
def patch_request(self, uri, pyld):
|
||||
req_headers = dict(PATCH_HEADERS)
|
||||
headers = PATCH_HEADERS
|
||||
r = self.get_request(uri)
|
||||
if r['ret']:
|
||||
# Get etag from etag header or @odata.etag property
|
||||
@@ -120,13 +98,15 @@ class RedfishUtils(object):
|
||||
if not etag:
|
||||
etag = r['data'].get('@odata.etag')
|
||||
if etag:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
# Make copy of headers and add If-Match header
|
||||
headers = dict(headers)
|
||||
headers['If-Match'] = etag
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=req_headers, method="PATCH",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=headers, method="PATCH",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -145,14 +125,13 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
def delete_request(self, uri, pyld=None):
|
||||
req_headers = dict(DELETE_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
data = json.dumps(pyld) if pyld else None
|
||||
resp = open_url(uri, data=data,
|
||||
headers=req_headers, method="DELETE",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=DELETE_HEADERS, method="DELETE",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -266,7 +245,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "System resource %s not found" % self.resource_id}
|
||||
elif len(self.systems_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _find_updateservice_resource(self):
|
||||
@@ -316,7 +296,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "Chassis resource %s not found" % self.resource_id}
|
||||
elif len(self.chassis_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _find_managers_resource(self):
|
||||
@@ -345,7 +326,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "Manager resource %s not found" % self.resource_id}
|
||||
elif len(self.manager_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _get_all_action_info_values(self, action):
|
||||
@@ -1214,54 +1196,6 @@ class RedfishUtils(object):
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
|
||||
|
||||
def create_session(self):
|
||||
if not self.creds.get('user') or not self.creds.get('pswd'):
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the username and password parameters for '
|
||||
'the CreateSession command'}
|
||||
|
||||
payload = {
|
||||
'UserName': self.creds['user'],
|
||||
'Password': self.creds['pswd']
|
||||
}
|
||||
response = self.post_request(self.root_uri + self.sessions_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
headers = response['headers']
|
||||
if 'x-auth-token' not in headers:
|
||||
return {'ret': False, 'msg':
|
||||
'The service did not return the X-Auth-Token header in '
|
||||
'the response from the Sessions collection POST'}
|
||||
|
||||
if 'location' not in headers:
|
||||
self.module.warn(
|
||||
'The service did not return the Location header for the '
|
||||
'session URL in the response from the Sessions collection '
|
||||
'POST')
|
||||
session_uri = None
|
||||
else:
|
||||
session_uri = urlparse(headers.get('location')).path
|
||||
|
||||
session = dict()
|
||||
session['token'] = headers.get('x-auth-token')
|
||||
session['uri'] = session_uri
|
||||
return {'ret': True, 'changed': True, 'session': session,
|
||||
'msg': 'Session created successfully'}
|
||||
|
||||
def delete_session(self, session_uri):
|
||||
if not session_uri:
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the session_uri parameter for the '
|
||||
'DeleteSession command'}
|
||||
|
||||
response = self.delete_request(self.root_uri + session_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': 'Session deleted successfully'}
|
||||
|
||||
def get_firmware_update_capabilities(self):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + self.update_uri)
|
||||
@@ -1671,31 +1605,19 @@ class RedfishUtils(object):
|
||||
|
||||
# Make a copy of the attributes dict
|
||||
attrs_to_patch = dict(attributes)
|
||||
# List to hold attributes not found
|
||||
attrs_bad = {}
|
||||
|
||||
# Check the attributes
|
||||
for attr_name, attr_value in attributes.items():
|
||||
# Check if attribute exists
|
||||
if attr_name not in data[u'Attributes']:
|
||||
# Remove and proceed to next attribute if this isn't valid
|
||||
attrs_bad.update({attr_name: attr_value})
|
||||
del attrs_to_patch[attr_name]
|
||||
continue
|
||||
|
||||
for attr in attributes:
|
||||
if attr not in data[u'Attributes']:
|
||||
return {'ret': False, 'msg': "BIOS attribute %s not found" % attr}
|
||||
# If already set to requested value, remove it from PATCH payload
|
||||
if data[u'Attributes'][attr_name] == attributes[attr_name]:
|
||||
del attrs_to_patch[attr_name]
|
||||
|
||||
warning = ""
|
||||
if attrs_bad:
|
||||
warning = "Incorrect attributes %s" % (attrs_bad)
|
||||
if data[u'Attributes'][attr] == attributes[attr]:
|
||||
del attrs_to_patch[attr]
|
||||
|
||||
# Return success w/ changed=False if no attrs need to be changed
|
||||
if not attrs_to_patch:
|
||||
return {'ret': True, 'changed': False,
|
||||
'msg': "BIOS attributes already set",
|
||||
'warning': warning}
|
||||
'msg': "BIOS attributes already set"}
|
||||
|
||||
# Get the SettingsObject URI
|
||||
set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
|
||||
@@ -1705,9 +1627,7 @@ class RedfishUtils(object):
|
||||
response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': "Modified BIOS attributes %s" % (attrs_to_patch),
|
||||
'warning': warning}
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"}
|
||||
|
||||
def set_boot_order(self, boot_list):
|
||||
if not boot_list:
|
||||
@@ -2756,10 +2676,6 @@ class RedfishUtils(object):
|
||||
need_change = True
|
||||
# type is list
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
# if arrays are not the same len, no need to check each element
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in target_ethernet_current_setting[property][i]:
|
||||
|
||||
0
plugins/module_utils/remote_management/__init__.py
Normal file
0
plugins/module_utils/remote_management/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 1.0
|
||||
# Copyright (C) 2018 Dell Inc.
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
|
||||
# Other trademarks may be trademarks of their respective owners.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from omsdk.sdkinfra import sdkinfra
|
||||
from omsdk.sdkcreds import UserCredentials
|
||||
from omsdk.sdkfile import FileOnShare, file_share_manager
|
||||
from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
|
||||
from omsdk.http.sdkwsmanbase import WsManOptions
|
||||
HAS_OMSDK = True
|
||||
except ImportError:
|
||||
HAS_OMSDK = False
|
||||
|
||||
|
||||
class iDRACConnection:
|
||||
|
||||
def __init__(self, module_params):
|
||||
if not HAS_OMSDK:
|
||||
raise ImportError("Dell EMC OMSDK library is required for this module")
|
||||
self.idrac_ip = module_params['idrac_ip']
|
||||
self.idrac_user = module_params['idrac_user']
|
||||
self.idrac_pwd = module_params['idrac_password']
|
||||
self.idrac_port = module_params['idrac_port']
|
||||
if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
|
||||
raise ValueError("hostname, username and password required")
|
||||
self.handle = None
|
||||
self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
|
||||
self.pOp = WsManOptions(port=self.idrac_port)
|
||||
self.sdk = sdkinfra()
|
||||
if self.sdk is None:
|
||||
msg = "Could not initialize iDRAC drivers."
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def __enter__(self):
|
||||
self.sdk.importPath()
|
||||
self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp)
|
||||
if self.handle is None:
|
||||
msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip)
|
||||
raise RuntimeError(msg)
|
||||
return self.handle
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.handle.disconnect()
|
||||
return False
|
||||
163
plugins/module_utils/remote_management/dellemc/ome.py
Normal file
163
plugins/module_utils/remote_management/dellemc/ome.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 1.3
|
||||
# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
|
||||
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
SESSION_RESOURCE_COLLECTION = {
|
||||
"SESSION": "SessionService/Sessions",
|
||||
"SESSION_ID": "SessionService/Sessions('{Id}')",
|
||||
}
|
||||
|
||||
|
||||
class OpenURLResponse(object):
|
||||
"""Handles HTTPResponse"""
|
||||
|
||||
def __init__(self, resp):
|
||||
self.body = None
|
||||
self.resp = resp
|
||||
if self.resp:
|
||||
self.body = self.resp.read()
|
||||
|
||||
@property
|
||||
def json_data(self):
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
raise ValueError("Unable to parse json")
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.resp.getcode()
|
||||
|
||||
@property
|
||||
def success(self):
|
||||
return self.status_code in (200, 201, 202, 204)
|
||||
|
||||
@property
|
||||
def token_header(self):
|
||||
return self.resp.headers.get('X-Auth-Token')
|
||||
|
||||
|
||||
class RestOME(object):
|
||||
"""Handles OME API requests"""
|
||||
|
||||
def __init__(self, module_params=None, req_session=False):
|
||||
self.module_params = module_params
|
||||
self.hostname = self.module_params["hostname"]
|
||||
self.username = self.module_params["username"]
|
||||
self.password = self.module_params["password"]
|
||||
self.port = self.module_params["port"]
|
||||
self.req_session = req_session
|
||||
self.session_id = None
|
||||
self.protocol = 'https'
|
||||
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
|
||||
|
||||
def _get_base_url(self):
|
||||
"""builds base url"""
|
||||
return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
|
||||
|
||||
def _build_url(self, path, query_param=None):
|
||||
"""builds complete url"""
|
||||
url = path
|
||||
base_uri = self._get_base_url()
|
||||
if path:
|
||||
url = '{0}/{1}'.format(base_uri, path)
|
||||
if query_param:
|
||||
url += "?{0}".format(urlencode(query_param))
|
||||
return url
|
||||
|
||||
def _url_common_args_spec(self, method, api_timeout, headers=None):
|
||||
"""Creates an argument common spec"""
|
||||
req_header = self._headers
|
||||
if headers:
|
||||
req_header.update(headers)
|
||||
url_kwargs = {
|
||||
"method": method,
|
||||
"validate_certs": False,
|
||||
"use_proxy": True,
|
||||
"headers": req_header,
|
||||
"timeout": api_timeout,
|
||||
"follow_redirects": 'all',
|
||||
}
|
||||
return url_kwargs
|
||||
|
||||
def _args_without_session(self, method, api_timeout=30, headers=None):
|
||||
"""Creates an argument spec in case of basic authentication"""
|
||||
req_header = self._headers
|
||||
if headers:
|
||||
req_header.update(headers)
|
||||
url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
|
||||
url_kwargs["url_username"] = self.username
|
||||
url_kwargs["url_password"] = self.password
|
||||
url_kwargs["force_basic_auth"] = True
|
||||
return url_kwargs
|
||||
|
||||
def _args_with_session(self, method, api_timeout=30, headers=None):
|
||||
"""Creates an argument spec, in case of authentication with session"""
|
||||
url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
|
||||
url_kwargs["force_basic_auth"] = False
|
||||
return url_kwargs
|
||||
|
||||
def invoke_request(self, method, path, data=None, query_param=None, headers=None,
|
||||
api_timeout=30, dump=True):
|
||||
"""
|
||||
Sends a request via open_url
|
||||
Returns :class:`OpenURLResponse` object.
|
||||
:arg method: HTTP verb to use for the request
|
||||
:arg path: path to request without query parameter
|
||||
:arg data: (optional) Payload to send with the request
|
||||
:arg query_param: (optional) Dictionary of query parameter to send with request
|
||||
:arg headers: (optional) Dictionary of HTTP Headers to send with the
|
||||
request
|
||||
:arg api_timeout: (optional) How long to wait for the server to send
|
||||
data before giving up
|
||||
:arg dump: (Optional) boolean value for dumping payload data.
|
||||
:returns: OpenURLResponse
|
||||
"""
|
||||
try:
|
||||
if 'X-Auth-Token' in self._headers:
|
||||
url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
|
||||
else:
|
||||
url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
|
||||
if data and dump:
|
||||
data = json.dumps(data)
|
||||
url = self._build_url(path, query_param=query_param)
|
||||
resp = open_url(url, data=data, **url_kwargs)
|
||||
resp_data = OpenURLResponse(resp)
|
||||
except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
|
||||
raise err
|
||||
return resp_data
|
||||
|
||||
def __enter__(self):
|
||||
"""Creates sessions by passing it to header"""
|
||||
if self.req_session:
|
||||
payload = {'UserName': self.username,
|
||||
'Password': self.password,
|
||||
'SessionType': 'API', }
|
||||
path = SESSION_RESOURCE_COLLECTION["SESSION"]
|
||||
resp = self.invoke_request('POST', path, data=payload)
|
||||
if resp and resp.success:
|
||||
self.session_id = resp.json_data.get("Id")
|
||||
self._headers["X-Auth-Token"] = resp.token_header
|
||||
else:
|
||||
msg = "Could not create the session"
|
||||
raise ConnectionError(msg)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Deletes a session id, which is in use for request"""
|
||||
if self.session_id:
|
||||
path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
|
||||
self.invoke_request('DELETE', path)
|
||||
return False
|
||||
@@ -39,7 +39,7 @@ class ScalewayException(Exception):
|
||||
R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
|
||||
(,<[^>]+>;\srel="(first|previous|next|last)")*'''
|
||||
# Specify a single relation, for iteration and string extraction purposes
|
||||
R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
|
||||
|
||||
def parse_pagination_link(header):
|
||||
|
||||
0
plugins/module_utils/source_control/__init__.py
Normal file
0
plugins/module_utils/source_control/__init__.py
Normal file
0
plugins/module_utils/storage/__init__.py
Normal file
0
plugins/module_utils/storage/__init__.py
Normal file
0
plugins/module_utils/storage/emc/__init__.py
Normal file
0
plugins/module_utils/storage/emc/__init__.py
Normal file
0
plugins/module_utils/storage/hpe3par/__init__.py
Normal file
0
plugins/module_utils/storage/hpe3par/__init__.py
Normal file
@@ -20,6 +20,7 @@ except ImportError:
|
||||
XENAPI_IMP_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.common.network import is_mac
|
||||
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user