mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
148 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eac1dfdc78 | ||
|
|
930b64226c | ||
|
|
885bb73cc6 | ||
|
|
6afde82f2c | ||
|
|
62d53eb3cf | ||
|
|
1ae9bcc2dd | ||
|
|
1533b5b244 | ||
|
|
99295fac75 | ||
|
|
434ff80ec6 | ||
|
|
be6d3e9aa1 | ||
|
|
b06d46f4d1 | ||
|
|
acea90ceec | ||
|
|
034d09532e | ||
|
|
f815bef3d1 | ||
|
|
e205494c8c | ||
|
|
1541eecd0e | ||
|
|
6cd87580da | ||
|
|
d227ace4a0 | ||
|
|
e7770b9132 | ||
|
|
d0f1d9efd9 | ||
|
|
22f0747e03 | ||
|
|
2ee7de681a | ||
|
|
2e20e9bb8f | ||
|
|
9d9e2bd797 | ||
|
|
d9aa9e8021 | ||
|
|
a9eeced6d7 | ||
|
|
066b306deb | ||
|
|
7426c3839e | ||
|
|
4d6735bebf | ||
|
|
1ceed02048 | ||
|
|
6dc31b13c3 | ||
|
|
c7899e384a | ||
|
|
35c8bbec8a | ||
|
|
87c37ea441 | ||
|
|
0f7e39fa1a | ||
|
|
cde48c3c03 | ||
|
|
778c91caa7 | ||
|
|
61258c9216 | ||
|
|
99e0f8a3a0 | ||
|
|
2dd159493b | ||
|
|
28caeff7bd | ||
|
|
76cf21a05b | ||
|
|
64248acce6 | ||
|
|
8ed5beb978 | ||
|
|
9b7194be53 | ||
|
|
984d9d91b8 | ||
|
|
071d89acac | ||
|
|
afc620fc74 | ||
|
|
acae2a11aa | ||
|
|
f17690e7d0 | ||
|
|
4ca716a1cf | ||
|
|
92f1a33d80 | ||
|
|
c9e07d19d8 | ||
|
|
c7bffaf270 | ||
|
|
1b0f4fdd28 | ||
|
|
02ea90f680 | ||
|
|
6d08dcdef3 | ||
|
|
8dd00a2b9b | ||
|
|
a090e2ff85 | ||
|
|
ca0b1efa5b | ||
|
|
b2f01f4c20 | ||
|
|
81b390b7dc | ||
|
|
9c7eed43a8 | ||
|
|
6999881985 | ||
|
|
d746293884 | ||
|
|
7b2853d9aa | ||
|
|
e05e7babbe | ||
|
|
01773c5338 | ||
|
|
e46e6e4dd0 | ||
|
|
270e3df416 | ||
|
|
d9524bae93 | ||
|
|
77fc407a73 | ||
|
|
2d73089ddc | ||
|
|
7a185cef08 | ||
|
|
6158b5f56b | ||
|
|
1b05e03384 | ||
|
|
0f0eb53efa | ||
|
|
6b58e784af | ||
|
|
7d644ef3d4 | ||
|
|
9db69a62b2 | ||
|
|
1c23ab8d44 | ||
|
|
96c5ceee97 | ||
|
|
768512645d | ||
|
|
66656abe17 | ||
|
|
022a7834df | ||
|
|
09de2dfd77 | ||
|
|
bb910f6aa1 | ||
|
|
56d8554b70 | ||
|
|
4fa140d896 | ||
|
|
5fc3f9c766 | ||
|
|
0f53fba20a | ||
|
|
d6c3661e3e | ||
|
|
b18c88248b | ||
|
|
fd5e05cc77 | ||
|
|
56a1d3ffd6 | ||
|
|
f1477ec8db | ||
|
|
2fb1dc0cf7 | ||
|
|
b9b4837d72 | ||
|
|
9ea8f41ebb | ||
|
|
b1fe3e34f3 | ||
|
|
841286444e | ||
|
|
4c13f10a05 | ||
|
|
9b844fc8d5 | ||
|
|
92514ee143 | ||
|
|
6621eb8b87 | ||
|
|
f4b4a2813a | ||
|
|
6f2cb85fae | ||
|
|
5cdc70bda9 | ||
|
|
89498d3650 | ||
|
|
c553351563 | ||
|
|
72c1a17bd9 | ||
|
|
694584f907 | ||
|
|
73e2c2eb85 | ||
|
|
f3ddc8757d | ||
|
|
9241b853c0 | ||
|
|
1053b3c658 | ||
|
|
d9daa6b851 | ||
|
|
a876fa0262 | ||
|
|
f64ace97af | ||
|
|
b701b5893f | ||
|
|
24667e12d0 | ||
|
|
9d93760564 | ||
|
|
ec78558559 | ||
|
|
d5c8d7ddcc | ||
|
|
6338048c73 | ||
|
|
92b388817f | ||
|
|
c72b337327 | ||
|
|
e5080b7847 | ||
|
|
079925fe66 | ||
|
|
19a87874f7 | ||
|
|
809cdda9ef | ||
|
|
bec6f732ad | ||
|
|
d2cdca416c | ||
|
|
0f1ccc07c5 | ||
|
|
deb1071666 | ||
|
|
eb9c5eb796 | ||
|
|
5c8504323e | ||
|
|
ab391c2cfa | ||
|
|
a14b525bdc | ||
|
|
996ef6ab49 | ||
|
|
055c8dac9c | ||
|
|
f4a9c7cc8b | ||
|
|
0c1f96290a | ||
|
|
d260f7ffda | ||
|
|
35d81adabf | ||
|
|
10a61c9dc3 | ||
|
|
6f47bcc399 | ||
|
|
7140b456ae |
@@ -13,13 +13,25 @@ pr:
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
- cron: 0 8 * * *
|
||||
displayName: Nightly (main)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
- cron: 0 10 * * *
|
||||
displayName: Nightly (active stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-2
|
||||
- stable-3
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-1
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -56,6 +68,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.11/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
@@ -99,6 +124,23 @@ stages:
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
@@ -146,14 +188,33 @@ stages:
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 11.4
|
||||
test: freebsd/11.4
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
@@ -208,10 +269,10 @@ stages:
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
@@ -224,6 +285,25 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
@@ -270,6 +350,16 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
displayName: Cloud 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
@@ -299,17 +389,22 @@ stages:
|
||||
- Sanity_devel
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
@@ -7,7 +7,7 @@ set -o pipefail -eu
|
||||
|
||||
output_path="$1"
|
||||
|
||||
curl --silent --show-error https://codecov.io/bash > codecov.sh
|
||||
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
|
||||
|
||||
for file in "${output_path}"/reports/coverage*.xml; do
|
||||
name="${file}"
|
||||
|
||||
55
.github/BOTMETA.yml
vendored
55
.github/BOTMETA.yml
vendored
@@ -4,17 +4,16 @@ files:
|
||||
support: community
|
||||
$actions:
|
||||
labels: action
|
||||
$actions/aireos.py:
|
||||
labels: aireos cisco networking
|
||||
$actions/ironware.py:
|
||||
maintainers: paulquack
|
||||
labels: ironware networking
|
||||
$actions/shutdown.py:
|
||||
$actions/system/iptables_state.py:
|
||||
maintainers: quidame
|
||||
$actions/system/shutdown.py:
|
||||
maintainers: nitzmahone samdoran aminvakil
|
||||
$becomes/:
|
||||
labels: become
|
||||
$callbacks/:
|
||||
labels: callbacks
|
||||
$callbacks/loganalytics.py:
|
||||
maintainers: zhcli
|
||||
$callbacks/logstash.py:
|
||||
maintainers: ujenmr
|
||||
$callbacks/say.py:
|
||||
@@ -53,14 +52,22 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/dict.py:
|
||||
maintainers: felixfontein
|
||||
$filters/dict_kv.py:
|
||||
maintainers: giner
|
||||
$filters/from_csv.py:
|
||||
maintainers: Ajpantuso
|
||||
$filters/jc.py:
|
||||
maintainers: kellyjonbrazil
|
||||
$filters/list.py:
|
||||
maintainers: vbotka
|
||||
$filters/path_join_shim.py:
|
||||
maintainers: felixfontein
|
||||
$filters/time.py:
|
||||
maintainers: resmo
|
||||
$filters/version_sort.py:
|
||||
maintainers: ericzolf
|
||||
$httpapis/:
|
||||
maintainers: $team_networking
|
||||
labels: networking
|
||||
@@ -74,6 +81,8 @@ files:
|
||||
maintainers: $team_linode
|
||||
labels: cloud linode
|
||||
keywords: linode dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -175,6 +184,8 @@ files:
|
||||
maintainers: zbal
|
||||
$modules/cloud/lxc/lxc_container.py:
|
||||
maintainers: cloudnull
|
||||
$modules/cloud/lxc/lxc_profile.py:
|
||||
maintainers: conloos
|
||||
$modules/cloud/lxd/:
|
||||
ignore: hnakamur
|
||||
$modules/cloud/memset/:
|
||||
@@ -210,7 +221,7 @@ files:
|
||||
$modules/cloud/misc/:
|
||||
ignore: ryansb
|
||||
$modules/cloud/misc/terraform.py:
|
||||
maintainers: m-yosefpor
|
||||
maintainers: m-yosefpor rainerleber
|
||||
$modules/cloud/misc/xenserver_facts.py:
|
||||
maintainers: caphrim007 cheese
|
||||
labels: xenserver_facts
|
||||
@@ -294,6 +305,7 @@ files:
|
||||
maintainers: bvitnik
|
||||
$modules/clustering/consul/:
|
||||
maintainers: $team_consul
|
||||
ignore: colin-nolan
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
@@ -418,6 +430,8 @@ files:
|
||||
maintainers: andsens
|
||||
$modules/monitoring/spectrum_device.py:
|
||||
maintainers: orgito
|
||||
$modules/monitoring/spectrum_model_attrs.py:
|
||||
maintainers: tgates81
|
||||
$modules/monitoring/stackdriver.py:
|
||||
maintainers: bwhaley
|
||||
$modules/monitoring/statsd.py:
|
||||
@@ -434,7 +448,7 @@ files:
|
||||
$modules/net_tools/dnsmadeeasy.py:
|
||||
maintainers: briceburg
|
||||
$modules/net_tools/haproxy.py:
|
||||
maintainers: ravibhure
|
||||
maintainers: ravibhure Normo
|
||||
$modules/net_tools/:
|
||||
maintainers: nerzhul
|
||||
$modules/net_tools/infinity/infinity.py:
|
||||
@@ -544,7 +558,8 @@ files:
|
||||
$modules/packaging/language/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/packaging/language/composer.py:
|
||||
maintainers: dmtrs resmo
|
||||
maintainers: dmtrs
|
||||
ignore: resmo
|
||||
$modules/packaging/language/cpanm.py:
|
||||
maintainers: fcuny
|
||||
$modules/packaging/language/easy_install.py:
|
||||
@@ -691,7 +706,9 @@ files:
|
||||
labels: zypper
|
||||
ignore: dirtyharrycallahan robinro
|
||||
$modules/packaging/os/zypper_repository.py:
|
||||
maintainers: matze
|
||||
maintainers: $team_suse
|
||||
labels: zypper
|
||||
ignore: matze
|
||||
$modules/remote_management/cobbler/:
|
||||
maintainers: dagwieers
|
||||
$modules/remote_management/dellemc/:
|
||||
@@ -753,6 +770,8 @@ files:
|
||||
ignore: erydo
|
||||
$modules/source_control/github/github_release.py:
|
||||
maintainers: adrianmoisey
|
||||
$modules/source_control/github/github_repo.py:
|
||||
maintainers: atorrescogollo
|
||||
$modules/source_control/github/:
|
||||
maintainers: stpierre
|
||||
$modules/source_control/gitlab/:
|
||||
@@ -835,8 +854,10 @@ files:
|
||||
labels: interfaces_file
|
||||
$modules/system/iptables_state.py:
|
||||
maintainers: quidame
|
||||
$modules/system/shutdown.py:
|
||||
maintainers: nitzmahone samdoran aminvakil
|
||||
$modules/system/java_cert.py:
|
||||
maintainers: haad
|
||||
maintainers: haad absynth76
|
||||
$modules/system/java_keystore.py:
|
||||
maintainers: Mogztter
|
||||
$modules/system/kernel_blacklist.py:
|
||||
@@ -1006,14 +1027,14 @@ macros:
|
||||
terminals: plugins/terminal
|
||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||
team_consul: colin-nolan sgargan
|
||||
team_consul: sgargan
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
@@ -1021,12 +1042,12 @@ macros:
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
||||
team_virt: joshainglis karmab Aversiste Thulium-Drake
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||
|
||||
181
CHANGELOG.rst
181
CHANGELOG.rst
@@ -6,6 +6,187 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 1.0.0.
|
||||
|
||||
v2.5.3
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495).
|
||||
- consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124).
|
||||
- influxdb_user - allow creation of admin users when InfluxDB authentication is enabled but no other user exists on the database. In this scenario, InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364).
|
||||
- influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499).
|
||||
- influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614).
|
||||
- iptables_state - call ``async_status`` action plugin rather than its module (https://github.com/ansible-collections/community.general/issues/2700).
|
||||
- iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up (https://github.com/ansible-collections/community.general/pull/2525).
|
||||
- iptables_state - fix a broken query of ``async_status`` result with current ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627, https://github.com/ansible-collections/community.general/pull/2671).
|
||||
- iptables_state - fix initialization of iptables from null state when adressing more than one table (https://github.com/ansible-collections/community.general/issues/2523).
|
||||
- java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560).
|
||||
- jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510).
|
||||
- json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607).
|
||||
- module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
|
||||
- netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590).
|
||||
- nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512).
|
||||
- ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674).
|
||||
- proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648).
|
||||
- redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497).
|
||||
- rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571).
|
||||
- ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/).
|
||||
- terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634).
|
||||
- xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715).
|
||||
- zypper_repository - fix idempotency on adding repository with ``$releasever`` and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985).
|
||||
|
||||
v2.5.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348).
|
||||
- hiera lookup plugin - converts the return type of plugin to unicode string (https://github.com/ansible-collections/community.general/pull/2329).
|
||||
- influxdb_retention_policy - ensure idempotent module execution with different duration and shard duration parameter values (https://github.com/ansible-collections/community.general/issues/2281).
|
||||
- influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing (https://github.com/ansible-collections/community.general/pull/2385).
|
||||
- inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337).
|
||||
- jenkins_plugin - fixes Python 2 compatibility issue (https://github.com/ansible-collections/community.general/pull/2340).
|
||||
- jira - fixed error when loading base64-encoded content as attachment (https://github.com/ansible-collections/community.general/pull/2349).
|
||||
- linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430).
|
||||
- nmap inventory plugin - fix cache and constructed group support (https://github.com/ansible-collections/community.general/issues/2242).
|
||||
- nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409).
|
||||
- nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408).
|
||||
- one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435).
|
||||
- ovirt* modules - remove bad unnecessary import for current ansible-core development version (https://github.com/ansible-collections/community.general/pull/2381).
|
||||
- proxmox inventory - added handling of commas in KVM agent configuration string (https://github.com/ansible-collections/community.general/pull/2245).
|
||||
- puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190).
|
||||
- stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448).
|
||||
- svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373).
|
||||
- terraform - fix issue that cause the destroy to fail because from Terraform 0.15 on, the ``terraform destroy -force`` option is replaced with ``terraform destroy -auto-approve`` (https://github.com/ansible-collections/community.general/issues/2247).
|
||||
- terraform - fix issue that cause the execution fail because from Terraform 0.15 on, the ``-var`` and ``-var-file`` options are no longer available on ``terraform validate`` (https://github.com/ansible-collections/community.general/pull/2246).
|
||||
- terraform - remove uses of ``use_unsafe_shell=True`` (https://github.com/ansible-collections/community.general/pull/2246).
|
||||
- zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454).
|
||||
|
||||
v2.5.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release for some bugs discovered right after the 2.5.0 release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- funcd connection plugin - can now load (https://github.com/ansible-collections/community.general/pull/2235).
|
||||
- jira - fixed calling of ``isinstance`` (https://github.com/ansible-collections/community.general/issues/2234).
|
||||
|
||||
v2.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature release. Will be the last 2.x.0 minor release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- apache2_mod_proxy - refactored/cleaned-up part of the code (https://github.com/ansible-collections/community.general/pull/2142).
|
||||
- atomic_container - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- atomic_host - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- atomic_image - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- beadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- bitbucket_pipeline_variable - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- hiera lookup - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- ipa_config - add new options ``ipaconfigstring``, ``ipadefaultprimarygroup``, ``ipagroupsearchfields``, ``ipahomesrootdir``, ``ipabrkauthzdata``, ``ipamaxusernamelength``, ``ipapwdexpadvnotify``, ``ipasearchrecordslimit``, ``ipasearchtimelimit``, ``ipauserauthtype``, and ``ipausersearchfields`` (https://github.com/ansible-collections/community.general/pull/2116).
|
||||
- ipa_user - fix ``userauthtype`` option to take in list of strings for the multi-select field instead of single string (https://github.com/ansible-collections/community.general/pull/2174).
|
||||
- ipwcli_dns - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- java_cert - change ``state: present`` to check certificates by hash, not just alias name (https://github.com/ansible/ansible/issues/43249).
|
||||
- jira - added ``attach`` operation, which allows a user to attach a file to an issue (https://github.com/ansible-collections/community.general/pull/2192).
|
||||
- jira - added parameter ``account_id`` for compatibility with recent versions of JIRA (https://github.com/ansible-collections/community.general/issues/818, https://github.com/ansible-collections/community.general/pull/1978).
|
||||
- known_hosts module utils - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- module_helper module utils - added management of facts and adhoc setting of the initial value for variables (https://github.com/ansible-collections/community.general/pull/2188).
|
||||
- module_helper module utils - added mechanism to manage variables, providing automatic output of variables, change status and diff information (https://github.com/ansible-collections/community.general/pull/2162).
|
||||
- nictagadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- npm - add ``no_bin_links`` option (https://github.com/ansible-collections/community.general/issues/2128).
|
||||
- ovh_ip_failover - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- proxmox inventory plugin - added ``Constructable`` class to the inventory to provide options ``strict``, ``keyed_groups``, ``groups``, and ``compose`` (https://github.com/ansible-collections/community.general/pull/2180).
|
||||
- proxmox inventory plugin - added ``proxmox_agent_interfaces`` fact describing network interfaces returned from a QEMU guest agent (https://github.com/ansible-collections/community.general/pull/2148).
|
||||
- rhevm - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- smartos_image_info - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- svr4pkg - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- xattr - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- xfconf - changed implementation to use ``ModuleHelper`` new features (https://github.com/ansible-collections/community.general/pull/2188).
|
||||
- zfs_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- zpool_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
|
||||
Security Fixes
|
||||
--------------
|
||||
|
||||
- java_cert - remove password from ``run_command`` arguments (https://github.com/ansible-collections/community.general/pull/2008).
|
||||
- java_keystore - pass secret to keytool through an environment variable to not expose it as a commandline argument (https://github.com/ansible-collections/community.general/issues/1668).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- dimensiondata_network - bug when formatting message, instead of % a simple comma was used (https://github.com/ansible-collections/community.general/pull/2139).
|
||||
- github_repo - PyGithub bug does not allow explicit port in ``base_url``. Specifying port is not required (https://github.com/PyGithub/PyGithub/issues/1913).
|
||||
- haproxy - fix a bug preventing haproxy from properly entering ``DRAIN`` mode (https://github.com/ansible-collections/community.general/issues/1913).
|
||||
- ipa_user - allow ``sshpubkey`` to permit multiple word comments (https://github.com/ansible-collections/community.general/pull/2159).
|
||||
- java_cert - allow setting ``state: absent`` by providing just the ``cert_alias`` (https://github.com/ansible/ansible/issues/27982).
|
||||
- java_cert - properly handle proxy arguments when the scheme is provided (https://github.com/ansible/ansible/issues/54481).
|
||||
- java_keystore - improve error handling and return ``cmd`` as documented. Force ``LANG``, ``LC_ALL`` and ``LC_MESSAGES`` environment variables to ``C`` to rely on ``keytool`` output parsing. Fix pylint's ``unused-variable`` and ``no-else-return`` hints (https://github.com/ansible-collections/community.general/pull/2183).
|
||||
- java_keystore - use tempfile lib to create temporary files with randomized names, and remove the temporary PKCS#12 keystore as well as other materials (https://github.com/ansible-collections/community.general/issues/1667).
|
||||
- jira - fixed fields' update in ticket transitions (https://github.com/ansible-collections/community.general/issues/818).
|
||||
- kibana_plugin - added missing parameters to ``remove_plugin`` when using ``state=present force=true``, and fix potential quoting errors when invoking ``kibana`` (https://github.com/ansible-collections/community.general/pull/2143).
|
||||
- module_helper module utils - fixed decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/2203).
|
||||
- pkgutil - fixed calls to ``list.extend()`` (https://github.com/ansible-collections/community.general/pull/2161).
|
||||
- vmadm - correct type of list elements in ``resolvers`` parameter (https://github.com/ansible-collections/community.general/issues/2135).
|
||||
- xfconf - module was not honoring check mode when ``state`` was ``absent`` (https://github.com/ansible-collections/community.general/pull/2185).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- dict - The ``dict`` function as a filter: converts a list of tuples to a dictionary
|
||||
- path_join - Redirects to ansible.builtin.path_join for ansible-base 2.10 or newer, and provides a compatible implementation for Ansible 2.9
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
ipa
|
||||
^^^
|
||||
|
||||
- ipa_otpconfig - Manage FreeIPA OTP Configuration Settings
|
||||
- ipa_otptoken - Manage FreeIPA OTPs
|
||||
|
||||
Monitoring
|
||||
~~~~~~~~~~
|
||||
|
||||
- spectrum_model_attrs - Enforce a model's attributes in CA Spectrum.
|
||||
|
||||
Net Tools
|
||||
~~~~~~~~~
|
||||
|
||||
pritunl
|
||||
^^^^^^^
|
||||
|
||||
- pritunl_org - Manages Pritunl Organizations using the Pritunl API
|
||||
- pritunl_org_info - List Pritunl Organizations using the Pritunl API
|
||||
|
||||
v2.4.0
|
||||
======
|
||||
|
||||
|
||||
32
CONTRIBUTING.md
Normal file
32
CONTRIBUTING.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Contributing
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
|
||||
|
||||
If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
|
||||
## Issue tracker
|
||||
|
||||
Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
|
||||
There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
|
||||
Also somebody may already have started discussing or working on implementing the same or a similar idea,
|
||||
so you can cooperate to create a better solution together.
|
||||
|
||||
* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
|
||||
* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
|
||||
|
||||
## Open pull requests
|
||||
|
||||
Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
|
||||
You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
|
||||
Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
|
||||
|
||||
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
||||
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though!
|
||||
* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
||||
12
README.md
12
README.md
@@ -7,9 +7,11 @@ This repo contains the `community.general` Ansible Collection. The collection in
|
||||
|
||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -48,6 +50,8 @@ export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Running tests
|
||||
|
||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||
@@ -56,10 +60,10 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
||||
|
||||
We have a dedicated Working Group for Ansible development.
|
||||
|
||||
You can find other people interested on the following Freenode IRC channels -
|
||||
You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels -
|
||||
- `#ansible` - For general use questions and support.
|
||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
|
||||
- `#ansible-community` - For discussions on community topics and community meetings.
|
||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core.
|
||||
- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections.
|
||||
|
||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||
|
||||
|
||||
@@ -1661,3 +1661,297 @@ releases:
|
||||
name: loganalytics
|
||||
namespace: null
|
||||
release_date: '2021-03-30'
|
||||
2.5.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- dimensiondata_network - bug when formatting message, instead of % a simple
|
||||
comma was used (https://github.com/ansible-collections/community.general/pull/2139).
|
||||
- github_repo - PyGithub bug does not allow explicit port in ``base_url``. Specifying
|
||||
port is not required (https://github.com/PyGithub/PyGithub/issues/1913).
|
||||
- haproxy - fix a bug preventing haproxy from properly entering ``DRAIN`` mode
|
||||
(https://github.com/ansible-collections/community.general/issues/1913).
|
||||
- ipa_user - allow ``sshpubkey`` to permit multiple word comments (https://github.com/ansible-collections/community.general/pull/2159).
|
||||
- 'java_cert - allow setting ``state: absent`` by providing just the ``cert_alias``
|
||||
(https://github.com/ansible/ansible/issues/27982).'
|
||||
- java_cert - properly handle proxy arguments when the scheme is provided (https://github.com/ansible/ansible/issues/54481).
|
||||
- java_keystore - improve error handling and return ``cmd`` as documented. Force
|
||||
``LANG``, ``LC_ALL`` and ``LC_MESSAGES`` environment variables to ``C`` to
|
||||
rely on ``keytool`` output parsing. Fix pylint's ``unused-variable`` and ``no-else-return``
|
||||
hints (https://github.com/ansible-collections/community.general/pull/2183).
|
||||
- java_keystore - use tempfile lib to create temporary files with randomized
|
||||
names, and remove the temporary PKCS#12 keystore as well as other materials
|
||||
(https://github.com/ansible-collections/community.general/issues/1667).
|
||||
- jira - fixed fields' update in ticket transitions (https://github.com/ansible-collections/community.general/issues/818).
|
||||
- kibana_plugin - added missing parameters to ``remove_plugin`` when using ``state=present
|
||||
force=true``, and fix potential quoting errors when invoking ``kibana`` (https://github.com/ansible-collections/community.general/pull/2143).
|
||||
- module_helper module utils - fixed decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/2203).
|
||||
- pkgutil - fixed calls to ``list.extend()`` (https://github.com/ansible-collections/community.general/pull/2161).
|
||||
- vmadm - correct type of list elements in ``resolvers`` parameter (https://github.com/ansible-collections/community.general/issues/2135).
|
||||
- xfconf - module was not honoring check mode when ``state`` was ``absent``
|
||||
(https://github.com/ansible-collections/community.general/pull/2185).
|
||||
minor_changes:
|
||||
- apache2_mod_proxy - refactored/cleaned-up part of the code (https://github.com/ansible-collections/community.general/pull/2142).
|
||||
- atomic_container - using ``get_bin_path()`` before calling ``run_command()``
|
||||
(https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- atomic_host - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- atomic_image - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144).
|
||||
- beadm - minor refactor converting multiple statements to a single list literal
|
||||
(https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- bitbucket_pipeline_variable - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- hiera lookup - minor refactor converting multiple statements to a single list
|
||||
literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- ipa_config - add new options ``ipaconfigstring``, ``ipadefaultprimarygroup``,
|
||||
``ipagroupsearchfields``, ``ipahomesrootdir``, ``ipabrkauthzdata``, ``ipamaxusernamelength``,
|
||||
``ipapwdexpadvnotify``, ``ipasearchrecordslimit``, ``ipasearchtimelimit``,
|
||||
``ipauserauthtype``, and ``ipausersearchfields`` (https://github.com/ansible-collections/community.general/pull/2116).
|
||||
- ipa_user - fix ``userauthtype`` option to take in list of strings for the
|
||||
multi-select field instead of single string (https://github.com/ansible-collections/community.general/pull/2174).
|
||||
- ipwcli_dns - minor refactor converting multiple statements to a single list
|
||||
literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- 'java_cert - change ``state: present`` to check certificates by hash, not
|
||||
just alias name (https://github.com/ansible/ansible/issues/43249).'
|
||||
- jira - added ``attach`` operation, which allows a user to attach a file to
|
||||
an issue (https://github.com/ansible-collections/community.general/pull/2192).
|
||||
- jira - added parameter ``account_id`` for compatibility with recent versions
|
||||
of JIRA (https://github.com/ansible-collections/community.general/issues/818,
|
||||
https://github.com/ansible-collections/community.general/pull/1978).
|
||||
- known_hosts module utils - minor refactor converting multiple statements to
|
||||
a single list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- module_helper module utils - added management of facts and adhoc setting of
|
||||
the initial value for variables (https://github.com/ansible-collections/community.general/pull/2188).
|
||||
- module_helper module utils - added mechanism to manage variables, providing
|
||||
automatic output of variables, change status and diff information (https://github.com/ansible-collections/community.general/pull/2162).
|
||||
- nictagadm - minor refactor converting multiple statements to a single list
|
||||
literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- npm - add ``no_bin_links`` option (https://github.com/ansible-collections/community.general/issues/2128).
|
||||
- ovh_ip_failover - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- proxmox inventory plugin - added ``Constructable`` class to the inventory
|
||||
to provide options ``strict``, ``keyed_groups``, ``groups``, and ``compose``
|
||||
(https://github.com/ansible-collections/community.general/pull/2180).
|
||||
- proxmox inventory plugin - added ``proxmox_agent_interfaces`` fact describing
|
||||
network interfaces returned from a QEMU guest agent (https://github.com/ansible-collections/community.general/pull/2148).
|
||||
- rhevm - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157).
|
||||
- smartos_image_info - minor refactor converting multiple statements to a single
|
||||
list literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- svr4pkg - minor refactor converting multiple statements to a single list literal
|
||||
(https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- xattr - minor refactor converting multiple statements to a single list literal
|
||||
(https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- xfconf - changed implementation to use ``ModuleHelper`` new features (https://github.com/ansible-collections/community.general/pull/2188).
|
||||
- zfs_facts - minor refactor converting multiple statements to a single list
|
||||
literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
- zpool_facts - minor refactor converting multiple statements to a single list
|
||||
literal (https://github.com/ansible-collections/community.general/pull/2160).
|
||||
release_summary: Regular feature release. Will be the last 2.x.0 minor release.
|
||||
security_fixes:
|
||||
- java_cert - remove password from ``run_command`` arguments (https://github.com/ansible-collections/community.general/pull/2008).
|
||||
- java_keystore - pass secret to keytool through an environment variable to
|
||||
not expose it as a commandline argument (https://github.com/ansible-collections/community.general/issues/1668).
|
||||
fragments:
|
||||
- 1978-jira-transition-logic.yml
|
||||
- 1993-haproxy-fix-draining.yml
|
||||
- 2.5.0.yml
|
||||
- 2008-update-java-cert-replace-cert-when-changed.yml
|
||||
- 2116-add-fields-to-ipa-config-module.yml
|
||||
- 2135-vmadm-resolvers-type-fix.yml
|
||||
- 2139-dimensiondata_network-str-format.yml
|
||||
- 2142-apache2_mod_proxy-cleanup.yml
|
||||
- 2143-kibana_plugin-fixed-function-calls.yml
|
||||
- 2144-atomic_get_bin_path.yml
|
||||
- 2146-npm-add_no_bin_links_option.yaml
|
||||
- 2148-proxmox-inventory-agent-interfaces.yml
|
||||
- 2157-unreachable-code.yml
|
||||
- 2159-ipa-user-sshpubkey-multi-word-comments.yaml
|
||||
- 2160-list-literals.yml
|
||||
- 2161-pkgutil-list-extend.yml
|
||||
- 2162-modhelper-variables.yml
|
||||
- 2162-proxmox-constructable.yml
|
||||
- 2163-java_keystore_1667_improve_temp_files_storage.yml
|
||||
- 2174-ipa-user-userauthtype-multiselect.yml
|
||||
- 2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml
|
||||
- 2183-java_keystore_improve_error_handling.yml
|
||||
- 2185-xfconf-absent-check-mode.yml
|
||||
- 2188-xfconf-modhelper-variables.yml
|
||||
- 2192-add-jira-attach.yml
|
||||
- 2203-modhelper-cause-changes-deco.yml
|
||||
- 2204-github_repo-fix-baseurl_port.yml
|
||||
- dict-filter.yml
|
||||
- path_join-shim-filter.yml
|
||||
modules:
|
||||
- description: Manage FreeIPA OTP Configuration Settings
|
||||
name: ipa_otpconfig
|
||||
namespace: identity.ipa
|
||||
- description: Manage FreeIPA OTPs
|
||||
name: ipa_otptoken
|
||||
namespace: identity.ipa
|
||||
- description: Manages Pritunl Organizations using the Pritunl API
|
||||
name: pritunl_org
|
||||
namespace: net_tools.pritunl
|
||||
- description: List Pritunl Organizations using the Pritunl API
|
||||
name: pritunl_org_info
|
||||
namespace: net_tools.pritunl
|
||||
- description: Enforce a model's attributes in CA Spectrum.
|
||||
name: spectrum_model_attrs
|
||||
namespace: monitoring
|
||||
plugins:
|
||||
filter:
|
||||
- description: 'The ``dict`` function as a filter: converts a list of tuples
|
||||
to a dictionary'
|
||||
name: dict
|
||||
namespace: null
|
||||
- description: Redirects to ansible.builtin.path_join for ansible-base 2.10
|
||||
or newer, and provides a compatible implementation for Ansible 2.9
|
||||
name: path_join
|
||||
namespace: null
|
||||
release_date: '2021-04-13'
|
||||
2.5.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- funcd connection plugin - can now load (https://github.com/ansible-collections/community.general/pull/2235).
|
||||
- jira - fixed calling of ``isinstance`` (https://github.com/ansible-collections/community.general/issues/2234).
|
||||
release_summary: Bugfix release for some bugs discovered right after the 2.5.0
|
||||
release.
|
||||
fragments:
|
||||
- 2.5.1.yml
|
||||
- 2236-jira-isinstance.yml
|
||||
- allow_funcd_to_load.yml
|
||||
release_date: '2021-04-14'
|
||||
2.5.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- composer - use ``no-interaction`` option when discovering available options
|
||||
to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348).
|
||||
- hiera lookup plugin - converts the return type of plugin to unicode string
|
||||
(https://github.com/ansible-collections/community.general/pull/2329).
|
||||
- influxdb_retention_policy - ensure idempotent module execution with different
|
||||
duration and shard duration parameter values (https://github.com/ansible-collections/community.general/issues/2281).
|
||||
- influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing
|
||||
(https://github.com/ansible-collections/community.general/pull/2385).
|
||||
- inventory and vault scripts - change file permissions to make vendored inventory
|
||||
and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337).
|
||||
- jenkins_plugin - fixes Python 2 compatibility issue (https://github.com/ansible-collections/community.general/pull/2340).
|
||||
- jira - fixed error when loading base64-encoded content as attachment (https://github.com/ansible-collections/community.general/pull/2349).
|
||||
- linode_v4 - changed the error message to point to the correct bugtracker URL
|
||||
(https://github.com/ansible-collections/community.general/pull/2430).
|
||||
- nmap inventory plugin - fix cache and constructed group support (https://github.com/ansible-collections/community.general/issues/2242).
|
||||
- nmcli - compare MAC addresses case insensitively to fix idempotency issue
|
||||
(https://github.com/ansible-collections/community.general/issues/2409).
|
||||
- nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli``
|
||||
command (https://github.com/ansible-collections/community.general/issues/2408).
|
||||
- one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435).
|
||||
- ovirt* modules - remove bad unnecessary import for current ansible-core development
|
||||
version (https://github.com/ansible-collections/community.general/pull/2381).
|
||||
- proxmox inventory - added handling of commas in KVM agent configuration string
|
||||
(https://github.com/ansible-collections/community.general/pull/2245).
|
||||
- puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all``
|
||||
has been chosen (https://github.com/ansible-collections/community.general/issues/1190).
|
||||
- stackpath_compute inventory script - fix broken validation checks for client
|
||||
ID and client secret (https://github.com/ansible-collections/community.general/pull/2448).
|
||||
- svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with
|
||||
Python 3 (https://github.com/ansible-collections/community.general/issues/2373).
|
||||
- terraform - fix issue that cause the destroy to fail because from Terraform
|
||||
0.15 on, the ``terraform destroy -force`` option is replaced with ``terraform
|
||||
destroy -auto-approve`` (https://github.com/ansible-collections/community.general/issues/2247).
|
||||
- terraform - fix issue that cause the execution fail because from Terraform
|
||||
0.15 on, the ``-var`` and ``-var-file`` options are no longer available on
|
||||
``terraform validate`` (https://github.com/ansible-collections/community.general/pull/2246).
|
||||
- terraform - remove uses of ``use_unsafe_shell=True`` (https://github.com/ansible-collections/community.general/pull/2246).
|
||||
- zfs - certain ZFS properties, especially sizes, would lead to a task being
|
||||
falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975,
|
||||
https://github.com/ansible-collections/community.general/pull/2454).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 2.5.2.yml
|
||||
- 2245-proxmox_fix_agent_string_handling.yml
|
||||
- 2246-terraform.yaml
|
||||
- 2282-nmap-fix-cache-support.yml
|
||||
- 2284-influxdb_retention_policy-fix_duration_parsing.yml
|
||||
- 2284-influxdb_retention_policy-idempotence.yml
|
||||
- 2329-hiera-lookup-plugin-return-type.yaml
|
||||
- 2337-mark-inventory-scripts-executable.yml
|
||||
- 2340-jenkins_plugin-py2.yml
|
||||
- 2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml
|
||||
- 2349-jira-bugfix-b64decode.yml
|
||||
- 2373-svr4pkg-fix-typeerror.yml
|
||||
- 2407-puppet-change_stdout_to_console.yaml
|
||||
- 2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml
|
||||
- 2416-nmcli_compare_mac_addresses_case_insensitively.yml
|
||||
- 2430-linodev4-error-message.yml
|
||||
- 2435-one_vm-fix_missing_keys.yml
|
||||
- 2448-stackpath_compute-fix.yml
|
||||
- 2454-detect_zfs_changed.yml
|
||||
- ovirt-fixup.yml
|
||||
release_date: '2021-05-11'
|
||||
2.5.3:
|
||||
changes:
|
||||
bugfixes:
|
||||
- consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495).
|
||||
- consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter``
|
||||
and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124).
|
||||
- influxdb_user - allow creation of admin users when InfluxDB authentication
|
||||
is enabled but no other user exists on the database. In this scenario, InfluxDB
|
||||
1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364).
|
||||
- influxdb_user - fix bug where an influxdb user has no privileges for 2 or
|
||||
more databases (https://github.com/ansible-collections/community.general/pull/2499).
|
||||
- influxdb_user - fix bug which removed current privileges instead of appending
|
||||
them to existing ones (https://github.com/ansible-collections/community.general/issues/2609,
|
||||
https://github.com/ansible-collections/community.general/pull/2614).
|
||||
- iptables_state - call ``async_status`` action plugin rather than its module
|
||||
(https://github.com/ansible-collections/community.general/issues/2700).
|
||||
- iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean
|
||||
up (https://github.com/ansible-collections/community.general/pull/2525).
|
||||
- iptables_state - fix a broken query of ``async_status`` result with current
|
||||
ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627,
|
||||
https://github.com/ansible-collections/community.general/pull/2671).
|
||||
- iptables_state - fix initialization of iptables from null state when adressing
|
||||
more than one table (https://github.com/ansible-collections/community.general/issues/2523).
|
||||
- java_cert - fix issue with incorrect alias used on PKCS#12 certificate import
|
||||
(https://github.com/ansible-collections/community.general/pull/2560).
|
||||
- jenkins_plugin - use POST method for sending request to jenkins API when ``state``
|
||||
option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent``
|
||||
(https://github.com/ansible-collections/community.general/issues/2510).
|
||||
- json_query filter plugin - avoid 'unknown type' errors for more Ansible internal
|
||||
types (https://github.com/ansible-collections/community.general/pull/2607).
|
||||
- module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce
|
||||
locale choice (https://github.com/ansible-collections/community.general/pull/2731).
|
||||
- netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception
|
||||
handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590).
|
||||
- nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512).
|
||||
- ovir4 inventory script - improve configparser creation to avoid crashes for
|
||||
options without values (https://github.com/ansible-collections/community.general/issues/674).
|
||||
- proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists
|
||||
(https://github.com/ansible-collections/community.general/issues/2648).
|
||||
- redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497).
|
||||
- rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation
|
||||
as invalid releases (https://github.com/ansible-collections/community.general/pull/2571).
|
||||
- ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/).
|
||||
- terraform - ensure the workspace is set back to its previous value when the
|
||||
apply fails (https://github.com/ansible-collections/community.general/pull/2634).
|
||||
- xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715).
|
||||
- zypper_repository - fix idempotency on adding repository with ``$releasever``
|
||||
and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 1085-consul-acl-hcl-whitelist-update.yml
|
||||
- 2.5.3.yml
|
||||
- 2126-consul_kv-pass-token.yml
|
||||
- 2364-influxdb_user-first_user.yml
|
||||
- 2461-ovirt4-fix-configparser.yml
|
||||
- 2499-influxdb_user-fix-multiple-no-privileges.yml
|
||||
- 2510-jenkins_plugin_use_post_method.yml
|
||||
- 2518-nmap-fix-cache-disabled.yml
|
||||
- 2525-iptables_state-fix-initialization-command.yml
|
||||
- 2560-java_cert-pkcs12-alias-bugfix.yml
|
||||
- 2568-ssh_config-reduce-stormssh-searches-based-on-host.yml
|
||||
- 2571-rhsm_release-fix-release_matcher.yaml
|
||||
- 2579-redis-cache-ipv6.yml
|
||||
- 2590-netcup_dns-exception-no-message-attr.yml
|
||||
- 2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml
|
||||
- 2634-terraform-switch-workspace.yml
|
||||
- 2648-proxmox_kvm-fix-vmid-return-value.yml
|
||||
- 2671-fix-broken-query-of-async_status-result.yml
|
||||
- 2711-fix-iptables_state-2700-async_status-call.yml
|
||||
- 2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml
|
||||
- 2731-mh-cmd-locale.yml
|
||||
- json_query_more_types.yml
|
||||
release_date: '2021-06-08'
|
||||
|
||||
74
commit-rights.md
Normal file
74
commit-rights.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Committers Guidelines for community.general
|
||||
===========================================
|
||||
|
||||
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
|
||||
|
||||
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
|
||||
|
||||
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
|
||||
|
||||
That said, use the trust wisely.
|
||||
|
||||
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
|
||||
|
||||
Our workflow on GitHub
|
||||
----------------------
|
||||
|
||||
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
|
||||
|
||||
* Fork the repository upon which you want to do some work to your own personal repository
|
||||
* Work on the specific branch upon which you need to commit
|
||||
* Create a Pull Request back to the collection repository and await reviews
|
||||
* Adjust code as necessary based on the Comments provided
|
||||
* Ask someone from the other committers to do a final review and merge
|
||||
|
||||
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
|
||||
|
||||
Roles
|
||||
-----
|
||||
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
|
||||
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
|
||||
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
|
||||
|
||||
General rules
|
||||
-------------
|
||||
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
|
||||
|
||||
* Do NOTs:
|
||||
|
||||
- Do not commit directly.
|
||||
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
|
||||
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
|
||||
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
|
||||
- Do not break playbooks. Always keep backwards compatibility in mind.
|
||||
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
|
||||
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
|
||||
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
|
||||
|
||||
* Do:
|
||||
|
||||
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
|
||||
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
|
||||
- Consider backwards compatibility (goes back to "do not break existing playbooks").
|
||||
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
|
||||
- Discuss with other committers, specially when you are unsure of something.
|
||||
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
|
||||
- Consider scope, sometimes a fix can be generalized.
|
||||
- Keep it simple, then things are maintainable, debuggable and intelligible.
|
||||
|
||||
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
|
||||
|
||||
|
||||
People
|
||||
------
|
||||
|
||||
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
|
||||
|
||||
| Name | GitHub ID | IRC Nick | Other |
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Alexei Znamensky | russoz | russoz | |
|
||||
| Amin Vakil | aminvakil | aminvakil | |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 2.4.0
|
||||
version: 2.5.3
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -601,3 +601,10 @@ plugin_routing:
|
||||
redirect: community.docker.docker_swarm
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
filter:
|
||||
path_join:
|
||||
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
|
||||
# Since plugin routing is only available since ansible-base 2.10, this
|
||||
# redirect will be used for ansible-base 2.10 or later, and the included
|
||||
# path_join filter will be used for Ansible 2.9 or earlier.
|
||||
redirect: ansible.builtin.path_join
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
import time
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.utils.display import Display
|
||||
|
||||
@@ -40,19 +40,27 @@ class ActionModule(ActionBase):
|
||||
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
|
||||
"'ansible_timeout' (=%s) (recommended).")
|
||||
|
||||
def _async_result(self, module_args, task_vars, timeout):
|
||||
def _async_result(self, async_status_args, task_vars, timeout):
|
||||
'''
|
||||
Retrieve results of the asynchonous task, and display them in place of
|
||||
the async wrapper results (those with the ansible_job_id key).
|
||||
'''
|
||||
async_status = self._task.copy()
|
||||
async_status.args = async_status_args
|
||||
async_status.action = 'ansible.builtin.async_status'
|
||||
async_status.async_val = 0
|
||||
async_action = self._shared_loader_obj.action_loader.get(
|
||||
async_status.action, task=async_status, connection=self._connection,
|
||||
play_context=self._play_context, loader=self._loader, templar=self._templar,
|
||||
shared_loader_obj=self._shared_loader_obj)
|
||||
|
||||
if async_status.args['mode'] == 'cleanup':
|
||||
return async_action.run(task_vars=task_vars)
|
||||
|
||||
# At least one iteration is required, even if timeout is 0.
|
||||
for i in range(max(1, timeout)):
|
||||
async_result = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=module_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
if async_result['finished'] == 1:
|
||||
for dummy in range(max(1, timeout)):
|
||||
async_result = async_action.run(task_vars=task_vars)
|
||||
if async_result.get('finished', 0) == 1:
|
||||
break
|
||||
time.sleep(min(1, timeout))
|
||||
|
||||
@@ -76,7 +84,6 @@ class ActionModule(ActionBase):
|
||||
task_async = self._task.async_val
|
||||
check_mode = self._play_context.check_mode
|
||||
max_timeout = self._connection._play_context.timeout
|
||||
module_name = self._task.action
|
||||
module_args = self._task.args
|
||||
|
||||
if module_args.get('state', None) == 'restored':
|
||||
@@ -107,7 +114,7 @@ class ActionModule(ActionBase):
|
||||
# longer on the controller); and set a backup file path.
|
||||
module_args['_timeout'] = task_async
|
||||
module_args['_back'] = '%s/iptables.state' % async_dir
|
||||
async_status_args = dict(_async_dir=async_dir)
|
||||
async_status_args = dict(mode='status')
|
||||
confirm_cmd = 'rm -f %s' % module_args['_back']
|
||||
starter_cmd = 'touch %s.starter' % module_args['_back']
|
||||
remaining_time = max(task_async, max_timeout)
|
||||
@@ -133,7 +140,7 @@ class ActionModule(ActionBase):
|
||||
# The module is aware to not process the main iptables-restore
|
||||
# command before finding (and deleting) the 'starter' cookie on
|
||||
# the host, so the previous query will not reach ssh timeout.
|
||||
garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
|
||||
# As the main command is not yet executed on the target, here
|
||||
# 'finished' means 'failed before main command be executed'.
|
||||
@@ -143,7 +150,7 @@ class ActionModule(ActionBase):
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
for x in range(max_timeout):
|
||||
for dummy in range(max_timeout):
|
||||
time.sleep(1)
|
||||
remaining_time -= 1
|
||||
# - AnsibleConnectionFailure covers rejected requests (i.e.
|
||||
@@ -151,7 +158,7 @@ class ActionModule(ActionBase):
|
||||
# - ansible_timeout is able to cover dropped requests (due
|
||||
# to a rule or policy DROP) if not lower than async_val.
|
||||
try:
|
||||
garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
continue
|
||||
@@ -164,16 +171,12 @@ class ActionModule(ActionBase):
|
||||
del result[key]
|
||||
|
||||
if result.get('invocation', {}).get('module_args'):
|
||||
if '_timeout' in result['invocation']['module_args']:
|
||||
del result['invocation']['module_args']['_back']
|
||||
del result['invocation']['module_args']['_timeout']
|
||||
for key in ('_back', '_timeout', '_async_dir', 'jid'):
|
||||
if result['invocation']['module_args'].get(key):
|
||||
del result['invocation']['module_args'][key]
|
||||
|
||||
async_status_args['mode'] = 'cleanup'
|
||||
garbage = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=async_status_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
dummy = self._async_result(async_status_args, task_vars, 0)
|
||||
|
||||
if not wrap_async:
|
||||
# remove a temporary path we created
|
||||
|
||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
become: sudosu
|
||||
name: sudosu
|
||||
short_description: Run tasks using sudo su -
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
|
||||
|
||||
14
plugins/cache/redis.py
vendored
14
plugins/cache/redis.py
vendored
@@ -61,6 +61,7 @@ DOCUMENTATION = '''
|
||||
type: integer
|
||||
'''
|
||||
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
|
||||
@@ -91,6 +92,8 @@ class CacheModule(BaseCacheModule):
|
||||
performance.
|
||||
"""
|
||||
_sentinel_service_name = None
|
||||
re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
|
||||
re_sent_conn = re.compile(r'^(.*):(\d+)$')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
uri = ''
|
||||
@@ -130,11 +133,18 @@ class CacheModule(BaseCacheModule):
|
||||
self._db = self._get_sentinel_connection(uri, kw)
|
||||
# normal connection
|
||||
else:
|
||||
connection = uri.split(':')
|
||||
connection = self._parse_connection(self.re_url_conn, uri)
|
||||
self._db = StrictRedis(*connection, **kw)
|
||||
|
||||
display.vv('Redis connection: %s' % self._db)
|
||||
|
||||
@staticmethod
|
||||
def _parse_connection(re_patt, uri):
|
||||
match = re_patt.match(uri)
|
||||
if not match:
|
||||
raise AnsibleError("Unable to parse connection string")
|
||||
return match.groups()
|
||||
|
||||
def _get_sentinel_connection(self, uri, kw):
|
||||
"""
|
||||
get sentinel connection details from _uri
|
||||
@@ -158,7 +168,7 @@ class CacheModule(BaseCacheModule):
|
||||
except IndexError:
|
||||
pass # password is optional
|
||||
|
||||
sentinels = [tuple(shost.split(':')) for shost in connections]
|
||||
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
|
||||
display.vv('\nUsing redis sentinels: %s' % sentinels)
|
||||
scon = Sentinel(sentinels, **kw)
|
||||
try:
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
callback: loganalytics
|
||||
name: loganalytics
|
||||
type: aggregate
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
|
||||
@@ -37,12 +37,13 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
class Connection(ConnectionBase):
|
||||
''' Func-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
|
||||
24
plugins/filter/dict.py
Normal file
24
plugins/filter/dict.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def dict_filter(sequence):
|
||||
'''Convert a list of tuples to a dictionary.
|
||||
|
||||
Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
|
||||
'''
|
||||
return dict(sequence)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'dict': dict_filter,
|
||||
}
|
||||
@@ -35,9 +35,11 @@ def json_query(data, expr):
|
||||
raise AnsibleError('You need to install "jmespath" prior to running '
|
||||
'json_query filter')
|
||||
|
||||
# Hack to handle Ansible String Types
|
||||
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
|
||||
# See issue: https://github.com/ansible-collections/community.general/issues/320
|
||||
jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
|
||||
jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
|
||||
jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
|
||||
try:
|
||||
return jmespath.search(expr, data)
|
||||
except jmespath.exceptions.JMESPathError as e:
|
||||
|
||||
28
plugins/filter/path_join_shim.py
Normal file
28
plugins/filter/path_join_shim.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os.path
|
||||
|
||||
|
||||
def path_join(list):
|
||||
'''Join list of paths.
|
||||
|
||||
This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10.
|
||||
This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details.
|
||||
'''
|
||||
return os.path.join(*list)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'path_join': path_join,
|
||||
}
|
||||
@@ -71,6 +71,25 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self._nmap = None
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
def _populate(self, hosts):
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
for host in hosts:
|
||||
hostname = host['name']
|
||||
self.inventory.add_host(hostname)
|
||||
for var, value in host.items():
|
||||
self.inventory.set_variable(hostname, var, value)
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
@@ -82,7 +101,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=False):
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
try:
|
||||
self._nmap = get_bin_path('nmap')
|
||||
@@ -93,75 +112,102 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
self._read_config_data(path)
|
||||
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
cache_key = self.get_cache_key(path)
|
||||
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
# cache may be True or False at this point to indicate if the inventory is being refreshed
|
||||
# get the user's cache option too to see if we should save the cache if it is changing
|
||||
user_cache_setting = self.get_option('cache')
|
||||
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
# read if the user has caching enabled and the cache isn't being refreshed
|
||||
attempt_to_read_cache = user_cache_setting and cache
|
||||
# update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
|
||||
cache_needs_update = user_cache_setting and not cache
|
||||
|
||||
if attempt_to_read_cache:
|
||||
try:
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
results = self._cache[cache_key]
|
||||
except KeyError:
|
||||
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
|
||||
cache_needs_update = True
|
||||
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
if not user_cache_setting or cache_needs_update:
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
if host is not None:
|
||||
# update inventory
|
||||
self.inventory.add_host(host)
|
||||
self.inventory.set_variable(host, 'ip', ip)
|
||||
ports = []
|
||||
continue
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
|
||||
continue
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
results = []
|
||||
|
||||
# TODO: parse more data, OS?
|
||||
try:
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None and ports:
|
||||
results[-1]['ports'] = ports
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
if host is not None:
|
||||
# update inventory
|
||||
results.append(dict())
|
||||
results[-1]['name'] = host
|
||||
results[-1]['ip'] = ip
|
||||
ports = []
|
||||
continue
|
||||
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1),
|
||||
'protocol': host_ports.group(2),
|
||||
'state': host_ports.group(3),
|
||||
'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
results[-1]['ports'] = ports
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
|
||||
if cache_needs_update:
|
||||
self._cache[cache_key] = results
|
||||
|
||||
self._populate(results)
|
||||
|
||||
@@ -19,6 +19,7 @@ DOCUMENTATION = '''
|
||||
- Will retrieve the first network interface with an IP for Proxmox nodes.
|
||||
- Can retrieve LXC/QEMU configuration as facts.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
@@ -69,6 +70,14 @@ DOCUMENTATION = '''
|
||||
description: Gather LXC/QEMU configuration facts.
|
||||
default: no
|
||||
type: bool
|
||||
strict:
|
||||
version_added: 2.5.0
|
||||
compose:
|
||||
version_added: 2.5.0
|
||||
groups:
|
||||
version_added: 2.5.0
|
||||
keyed_groups:
|
||||
version_added: 2.5.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -78,6 +87,15 @@ url: http://localhost:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: no
|
||||
keyed_groups:
|
||||
- key: proxmox_tags_parsed
|
||||
separator: ""
|
||||
prefix: group
|
||||
groups:
|
||||
webservers: "'web' in (proxmox_tags_parsed|list)"
|
||||
mailservers: "'mail' in (proxmox_tags_parsed|list)"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -86,7 +104,7 @@ from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
# 3rd party imports
|
||||
@@ -99,7 +117,7 @@ except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Proxmox as source. '''
|
||||
|
||||
NAME = 'community.general.proxmox'
|
||||
@@ -206,9 +224,36 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_agent_network_interfaces(self, node, vmid, vmtype):
|
||||
result = []
|
||||
|
||||
try:
|
||||
ifaces = self._get_json(
|
||||
"%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
|
||||
self.proxmox_url, node, vmtype, vmid
|
||||
)
|
||||
)['result']
|
||||
|
||||
for iface in ifaces:
|
||||
result.append({
|
||||
'name': iface['name'],
|
||||
'mac-address': iface['hardware-address'],
|
||||
'ip-addresses': [
|
||||
"%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']
|
||||
]
|
||||
})
|
||||
except requests.HTTPError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
def _get_vm_config(self, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
node_key = 'node'
|
||||
node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower()))
|
||||
self.inventory.set_variable(name, node_key, node)
|
||||
|
||||
vmid_key = 'vmid'
|
||||
vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
|
||||
self.inventory.set_variable(name, vmid_key, vmid)
|
||||
@@ -236,6 +281,14 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
parsed_value = [tag.strip() for tag in value.split(",")]
|
||||
self.inventory.set_variable(name, parsed_key, parsed_value)
|
||||
|
||||
# The first field in the agent string tells you whether the agent is enabled
|
||||
# the rest of the comma separated string is extra config for the agent
|
||||
if config == 'agent' and int(value.split(',')[0]):
|
||||
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
|
||||
agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
|
||||
if agent_iface_value:
|
||||
self.inventory.set_variable(name, agent_iface_key, agent_iface_value)
|
||||
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
@@ -264,6 +317,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _populate(self):
|
||||
|
||||
self._get_auth()
|
||||
@@ -318,6 +377,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
|
||||
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
||||
|
||||
# get QEMU vm's for this node
|
||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||
self.inventory.add_group(node_qemu_group)
|
||||
@@ -340,6 +401,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
|
||||
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
||||
|
||||
# gather vm's in pools
|
||||
for pool in self._get_pools():
|
||||
if pool.get('poolid'):
|
||||
|
||||
@@ -10,6 +10,8 @@ DOCUMENTATION = '''
|
||||
name: stackpath_compute
|
||||
short_description: StackPath Edge Computing inventory source
|
||||
version_added: 1.2.0
|
||||
author:
|
||||
- UNKNOWN (@shayrybak)
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
- constructed
|
||||
@@ -102,13 +104,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
raise AnsibleError("plugin doesn't match this plugin")
|
||||
try:
|
||||
client_id = config['client_id']
|
||||
if client_id != 32:
|
||||
if len(client_id) != 32:
|
||||
raise AnsibleError("client_id must be 32 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
try:
|
||||
client_secret = config['client_secret']
|
||||
if client_secret != 64:
|
||||
if len(client_secret) != 64:
|
||||
raise AnsibleError("client_secret must be 64 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
|
||||
@@ -171,10 +171,10 @@ class LookupModule(LookupBase):
|
||||
|
||||
paramvals = {
|
||||
'key': params[0],
|
||||
'token': None,
|
||||
'recurse': False,
|
||||
'index': None,
|
||||
'datacenter': None
|
||||
'token': self.get_option('token'),
|
||||
'recurse': self.get_option('recurse'),
|
||||
'index': self.get_option('index'),
|
||||
'datacenter': self.get_option('datacenter')
|
||||
}
|
||||
|
||||
# parameters specified?
|
||||
|
||||
@@ -31,7 +31,9 @@ EXAMPLES = r"""
|
||||
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
||||
ansible.builtin.template:
|
||||
src: '{{ item.src }}'
|
||||
dest: /web/{{ item.path }}
|
||||
# Your template files should be stored with a .j2 file extension,
|
||||
# but should not be deployed with it. splitext|first removes it.
|
||||
dest: /web/{{ item.path | splitext | first }}
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'file'
|
||||
@@ -41,6 +43,7 @@ EXAMPLES = r"""
|
||||
src: '{{ item.src }}'
|
||||
dest: /web/{{ item.path }}
|
||||
state: link
|
||||
follow: false # avoid corrupting target files if the link already exists
|
||||
force: yes
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
|
||||
@@ -63,6 +63,7 @@ import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
||||
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
||||
@@ -78,13 +79,11 @@ class Hiera(object):
|
||||
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
||||
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
||||
|
||||
return output.strip()
|
||||
return to_text(output.strip())
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=''):
|
||||
hiera = Hiera()
|
||||
ret = []
|
||||
|
||||
ret.append(hiera.get(terms))
|
||||
ret = [hiera.get(terms)]
|
||||
return ret
|
||||
|
||||
@@ -103,6 +103,14 @@ EXAMPLES = r"""
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret_password: >-
|
||||
{{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}"
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: the password is {{ secret_password }}
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
|
||||
@@ -119,9 +119,9 @@ class IPAClient(object):
|
||||
data = dict(method=method)
|
||||
|
||||
# TODO: We should probably handle this a little better.
|
||||
if method in ('ping', 'config_show'):
|
||||
if method in ('ping', 'config_show', 'otpconfig_show'):
|
||||
data['params'] = [[], {}]
|
||||
elif method == 'config_mod':
|
||||
elif method in ('config_mod', 'otpconfig_mod'):
|
||||
data['params'] = [[], item]
|
||||
else:
|
||||
data['params'] = [[name], item]
|
||||
|
||||
@@ -87,11 +87,12 @@ def not_in_host_file(self, host):
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = []
|
||||
host_file_list.append(user_host_file)
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||
host_file_list.append("/etc/openssh/ssh_known_hosts")
|
||||
host_file_list = [
|
||||
user_host_file,
|
||||
"/etc/ssh/ssh_known_hosts",
|
||||
"/etc/ssh/ssh_known_hosts2",
|
||||
"/etc/openssh/ssh_known_hosts",
|
||||
]
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
|
||||
@@ -10,6 +10,7 @@ from functools import partial, wraps
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
|
||||
class ModuleHelperException(Exception):
|
||||
@@ -24,12 +25,12 @@ class ModuleHelperException(Exception):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
|
||||
self.update_output = self._get_remove('update_output', kwargs) or {}
|
||||
super(ModuleHelperException, self).__init__(*args, **kwargs)
|
||||
super(ModuleHelperException, self).__init__(*args)
|
||||
|
||||
|
||||
class ArgFormat(object):
|
||||
"""
|
||||
Argument formatter
|
||||
Argument formatter for use as a command line parameter. Used in CmdMixin.
|
||||
"""
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
@@ -50,7 +51,8 @@ class ArgFormat(object):
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
Creates a new formatter
|
||||
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
|
||||
the CLI command execution.
|
||||
:param name: Name of the argument to be formatted
|
||||
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
|
||||
:param style: Whether arg_format (as str) should use printf-style formatting.
|
||||
@@ -99,18 +101,27 @@ class ArgFormat(object):
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
|
||||
def cause_changes(func, on_success=True, on_failure=False):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
if on_success:
|
||||
self.changed = True
|
||||
except Exception as e:
|
||||
if on_failure:
|
||||
self.changed = True
|
||||
raise
|
||||
return wrapper
|
||||
def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
def deco(func):
|
||||
if on_success is None and on_failure is None:
|
||||
return func
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
self = args[0]
|
||||
func(*args, **kwargs)
|
||||
if on_success is not None:
|
||||
self.changed = on_success
|
||||
except Exception:
|
||||
if on_failure is not None:
|
||||
self.changed = on_failure
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return deco
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
@@ -123,11 +134,12 @@ def module_fails_on_exception(func):
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
self.module.fail_json(changed=False, msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
except Exception as e:
|
||||
self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.vars.exception = traceback.format_exc()
|
||||
self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars)
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
|
||||
|
||||
@@ -141,7 +153,7 @@ class DependencyCtxMgr(object):
|
||||
self.exc_tb = None
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.has_it = exc_type is None
|
||||
@@ -155,32 +167,157 @@ class DependencyCtxMgr(object):
|
||||
return self.msg or str(self.exc_val)
|
||||
|
||||
|
||||
class ModuleHelper(object):
|
||||
_dependencies = []
|
||||
module = {}
|
||||
facts_name = None
|
||||
class VarMeta(object):
|
||||
NOTHING = object()
|
||||
|
||||
def __init__(self, diff=False, output=True, change=None, fact=False):
|
||||
self.init = False
|
||||
self.initial_value = None
|
||||
self.value = None
|
||||
|
||||
self.diff = diff
|
||||
self.change = diff if change is None else change
|
||||
self.output = output
|
||||
self.fact = fact
|
||||
|
||||
def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
|
||||
if diff is not None:
|
||||
self.diff = diff
|
||||
if output is not None:
|
||||
self.output = output
|
||||
if change is not None:
|
||||
self.change = change
|
||||
if fact is not None:
|
||||
self.fact = fact
|
||||
if initial_value is not self.NOTHING:
|
||||
self.initial_value = initial_value
|
||||
|
||||
def set_value(self, value):
|
||||
if not self.init:
|
||||
self.initial_value = value
|
||||
self.init = True
|
||||
self.value = value
|
||||
return self
|
||||
|
||||
@property
|
||||
def has_changed(self):
|
||||
return self.change and (self.initial_value != self.value)
|
||||
|
||||
@property
|
||||
def diff_result(self):
|
||||
return None if not (self.diff and self.has_changed) else {
|
||||
'before': self.initial_value,
|
||||
'after': self.value,
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
return "<VarMeta: value={0}, initial={1}, diff={2}, output={3}, change={4}>".format(
|
||||
self.value, self.initial_value, self.diff, self.output, self.change
|
||||
)
|
||||
|
||||
|
||||
class ModuleHelper(object):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
_dependencies = []
|
||||
module = None
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
change_params = ()
|
||||
facts_params = ()
|
||||
|
||||
class VarDict(object):
|
||||
def __init__(self):
|
||||
self._data = dict()
|
||||
self._meta = dict()
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self._data[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.set(key, value)
|
||||
|
||||
class AttrDict(dict):
|
||||
def __getattr__(self, item):
|
||||
return self[item]
|
||||
try:
|
||||
return self._data[item]
|
||||
except KeyError:
|
||||
return getattr(self._data, item)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in ('_data', '_meta'):
|
||||
super(ModuleHelper.VarDict, self).__setattr__(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
|
||||
def meta(self, name):
|
||||
return self._meta[name]
|
||||
|
||||
def set_meta(self, name, **kwargs):
|
||||
self.meta(name).set(**kwargs)
|
||||
|
||||
def set(self, name, value, **kwargs):
|
||||
if name in ('_data', '_meta'):
|
||||
raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
|
||||
self._data[name] = value
|
||||
if name in self._meta:
|
||||
meta = self.meta(name)
|
||||
else:
|
||||
meta = VarMeta(**kwargs)
|
||||
meta.set_value(value)
|
||||
self._meta[name] = meta
|
||||
|
||||
def output(self):
|
||||
return dict((k, v) for k, v in self._data.items() if self.meta(k).output)
|
||||
|
||||
def diff(self):
|
||||
diff_results = [(k, self.meta(k).diff_result) for k in self._data]
|
||||
diff_results = [dr for dr in diff_results if dr[1] is not None]
|
||||
if diff_results:
|
||||
before = dict((dr[0], dr[1]['before']) for dr in diff_results)
|
||||
after = dict((dr[0], dr[1]['after']) for dr in diff_results)
|
||||
return {'before': before, 'after': after}
|
||||
return None
|
||||
|
||||
def facts(self):
|
||||
facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact)
|
||||
return facts_result if facts_result else None
|
||||
|
||||
def change_vars(self):
|
||||
return [v for v in self._data if self.meta(v).change]
|
||||
|
||||
def has_changed(self, v):
|
||||
return self._meta[v].has_changed
|
||||
|
||||
def __init__(self, module=None):
|
||||
self.vars = ModuleHelper.AttrDict()
|
||||
self.output_dict = dict()
|
||||
self.facts_dict = dict()
|
||||
self.vars = ModuleHelper.VarDict()
|
||||
self._changed = False
|
||||
|
||||
if module:
|
||||
self.module = module
|
||||
|
||||
if isinstance(self.module, dict):
|
||||
if not isinstance(self.module, AnsibleModule):
|
||||
self.module = AnsibleModule(**self.module)
|
||||
|
||||
for name, value in self.module.params.items():
|
||||
self.vars.set(
|
||||
name, value,
|
||||
diff=name in self.diff_params,
|
||||
output=name in self.output_params,
|
||||
change=None if not self.change_params else name in self.change_params,
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
def update_vars(self, meta=None, **kwargs):
|
||||
if meta is None:
|
||||
meta = {}
|
||||
for k, v in kwargs.items():
|
||||
self.vars.set(k, v, **meta)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.output_dict.update(kwargs)
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
def update_facts(self, **kwargs):
|
||||
self.facts_dict.update(kwargs)
|
||||
self.update_vars(meta={"fact": True}, **kwargs)
|
||||
|
||||
def __init_module__(self):
|
||||
pass
|
||||
@@ -191,6 +328,9 @@ class ModuleHelper(object):
|
||||
def __quit_module__(self):
|
||||
pass
|
||||
|
||||
def _vars_changed(self):
|
||||
return any(self.vars.has_changed(v) for v in self.vars.change_vars())
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return self._changed
|
||||
@@ -199,12 +339,25 @@ class ModuleHelper(object):
|
||||
def changed(self, value):
|
||||
self._changed = value
|
||||
|
||||
def has_changed(self):
|
||||
return self.changed or self._vars_changed()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
result = dict(self.vars)
|
||||
result.update(self.output_dict)
|
||||
result = dict(self.vars.output())
|
||||
if self.facts_name:
|
||||
result['ansible_facts'] = {self.facts_name: self.facts_dict}
|
||||
facts = self.vars.facts()
|
||||
if facts is not None:
|
||||
result['ansible_facts'] = {self.facts_name: facts}
|
||||
if self.module._diff:
|
||||
diff = result.get('diff', {})
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
return result
|
||||
|
||||
@module_fails_on_exception
|
||||
@@ -213,7 +366,7 @@ class ModuleHelper(object):
|
||||
self.__init_module__()
|
||||
self.__run__()
|
||||
self.__quit_module__()
|
||||
self.module.exit_json(changed=self.changed, **self.output_dict)
|
||||
self.module.exit_json(changed=self.has_changed(), **self.output)
|
||||
|
||||
@classmethod
|
||||
def dependency(cls, name, msg):
|
||||
@@ -224,9 +377,9 @@ class ModuleHelper(object):
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
exception=d.exc_val.__traceback__.format_exc(),
|
||||
exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
|
||||
msg=d.text,
|
||||
**self.output_dict)
|
||||
**self.output)
|
||||
|
||||
|
||||
class StateMixin(object):
|
||||
@@ -332,16 +485,19 @@ class CmdMixin(object):
|
||||
return rc, out, err
|
||||
|
||||
def run_command(self, extra_params=None, params=None, *args, **kwargs):
|
||||
self.vars['cmd_args'] = self._calculate_args(extra_params, params)
|
||||
self.vars.cmd_args = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
options.update(kwargs)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
if self.force_lang:
|
||||
env_update.update({'LANGUAGE': self.force_lang})
|
||||
env_update.update({
|
||||
'LANGUAGE': self.force_lang,
|
||||
'LC_ALL': self.force_lang,
|
||||
})
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
options.update(kwargs)
|
||||
rc, out, err = self.module.run_command(self.vars['cmd_args'], *args, **options)
|
||||
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
|
||||
self.update_output(rc=rc, stdout=out, stderr=err)
|
||||
return self.process_command_output(rc, out, err)
|
||||
|
||||
|
||||
@@ -57,6 +57,34 @@ def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=T
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="DELETE",
|
||||
path="/organization/%s" % (organization_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _get_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
@@ -179,6 +207,29 @@ def list_pritunl_users(
|
||||
return users
|
||||
|
||||
|
||||
def post_pritunl_organization(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_name,
|
||||
validate_certs=True,
|
||||
):
|
||||
response = _post_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not add organization %s to Pritunl" % (organization_name)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def post_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
@@ -227,6 +278,25 @@ def post_pritunl_user(
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove organization %s from Pritunl" % (organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
|
||||
@@ -20,7 +20,6 @@ except ImportError:
|
||||
XENAPI_IMP_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.common.network import is_mac
|
||||
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
|
||||
|
||||
|
||||
|
||||
@@ -102,7 +102,8 @@ def do_install(module, mode, rootfs, container, image, values_list, backend):
|
||||
system_list = ["--system"] if mode == 'system' else []
|
||||
user_list = ["--user"] if mode == 'user' else []
|
||||
rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
|
||||
args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -112,7 +113,8 @@ def do_install(module, mode, rootfs, container, image, values_list, backend):
|
||||
|
||||
|
||||
def do_update(module, container, image, values_list):
|
||||
args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -122,7 +124,8 @@ def do_update(module, container, image, values_list):
|
||||
|
||||
|
||||
def do_uninstall(module, name, backend):
|
||||
args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -130,7 +133,8 @@ def do_uninstall(module, name, backend):
|
||||
|
||||
|
||||
def do_rollback(module, name):
|
||||
args = ['atomic', 'containers', 'rollback', name]
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'rollback', name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -148,14 +152,12 @@ def core(module):
|
||||
backend = module.params['backend']
|
||||
state = module.params['state']
|
||||
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
values_list = ["--set=%s" % x for x in values] if values else []
|
||||
|
||||
args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
|
||||
args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -194,9 +196,7 @@ def main():
|
||||
module.fail_json(msg="values is supported only with user or system mode")
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
rc, out, err = module.run_command('atomic -v', check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error in running atomic command", err=err)
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
|
||||
@@ -57,18 +57,14 @@ from ansible.module_utils._text import to_native
|
||||
|
||||
def core(module):
|
||||
revision = module.params['revision']
|
||||
args = []
|
||||
atomic_bin = module.get_bin_path('atomic', required=True)
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
|
||||
if revision == 'latest':
|
||||
args = ['atomic', 'host', 'upgrade']
|
||||
args = [atomic_bin, 'host', 'upgrade']
|
||||
else:
|
||||
args = ['atomic', 'host', 'deploy', revision]
|
||||
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
args = [atomic_bin, 'host', 'deploy', revision]
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
|
||||
@@ -73,7 +73,8 @@ from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def do_upgrade(module, image):
|
||||
args = ['atomic', 'update', '--force', image]
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'update', '--force', image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -91,20 +92,21 @@ def core(module):
|
||||
is_upgraded = False
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
if backend:
|
||||
if state == 'present' or state == 'latest':
|
||||
args = ['atomic', 'pull', "--storage=%s" % backend, image]
|
||||
args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
out_run = ""
|
||||
if started:
|
||||
args = ['atomic', 'run', "--storage=%s" % backend, image]
|
||||
args = [atomic_bin, 'run', "--storage=%s" % backend, image]
|
||||
rc, out_run, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -112,7 +114,7 @@ def core(module):
|
||||
changed = "Extracting" in out or "Copying blob" in out
|
||||
module.exit_json(msg=(out + out_run), changed=changed)
|
||||
elif state == 'absent':
|
||||
args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
|
||||
args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -126,11 +128,11 @@ def core(module):
|
||||
is_upgraded = do_upgrade(module, image)
|
||||
|
||||
if started:
|
||||
args = ['atomic', 'run', image]
|
||||
args = [atomic_bin, 'run', image]
|
||||
else:
|
||||
args = ['atomic', 'install', image]
|
||||
args = [atomic_bin, 'install', image]
|
||||
elif state == 'absent':
|
||||
args = ['atomic', 'uninstall', image]
|
||||
args = [atomic_bin, 'uninstall', image]
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
@@ -155,9 +157,7 @@ def main():
|
||||
)
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
rc, out, err = module.run_command('atomic -v', check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error in running atomic command", err=err)
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
|
||||
@@ -260,7 +260,7 @@ class DimensionDataNetworkModule(DimensionDataModule):
|
||||
)
|
||||
|
||||
self.module.fail_json(
|
||||
"Unexpected failure deleting network with id %s", network.id
|
||||
"Unexpected failure deleting network with id %s" % network.id
|
||||
)
|
||||
|
||||
except DimensionDataAPIException as e:
|
||||
|
||||
@@ -28,7 +28,6 @@ options:
|
||||
- The region of the instance. This is a required parameter only when
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/regions/).
|
||||
required: false
|
||||
type: str
|
||||
image:
|
||||
description:
|
||||
@@ -36,14 +35,12 @@ options:
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/images/).
|
||||
type: str
|
||||
required: false
|
||||
type:
|
||||
description:
|
||||
- The type of the instance. This is a required parameter only when
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/linode-types/).
|
||||
type: str
|
||||
required: false
|
||||
label:
|
||||
description:
|
||||
- The instance label. This label is used as the main determiner for
|
||||
@@ -56,12 +53,10 @@ options:
|
||||
group labelling is deprecated but still supported. The encouraged
|
||||
method for marking instances is to use tags.
|
||||
type: str
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- The tags that the instance should be marked under. See
|
||||
U(https://www.linode.com/docs/api/tags/).
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
root_pass:
|
||||
@@ -69,12 +64,10 @@ options:
|
||||
- The password for the root user. If not specified, one will be
|
||||
generated. This generated password will be available in the task
|
||||
success JSON.
|
||||
required: false
|
||||
type: str
|
||||
authorized_keys:
|
||||
description:
|
||||
- A list of SSH public key parts to deploy for the root user.
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
@@ -208,9 +201,8 @@ def create_linode(module, client, **kwargs):
|
||||
else:
|
||||
return response._raw_json
|
||||
except TypeError:
|
||||
module.fail_json(msg='Unable to parse Linode instance creation'
|
||||
' response. Please raise a bug against this'
|
||||
' module on https://github.com/ansible/ansible/issues'
|
||||
module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this'
|
||||
' module on https://github.com/ansible-collections/community.general/issues'
|
||||
)
|
||||
|
||||
|
||||
@@ -242,15 +234,15 @@ def initialise_module():
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
|
||||
),
|
||||
authorized_keys=dict(type='list', elements='str', required=False, no_log=False),
|
||||
group=dict(type='str', required=False),
|
||||
image=dict(type='str', required=False),
|
||||
region=dict(type='str', required=False),
|
||||
root_pass=dict(type='str', required=False, no_log=True),
|
||||
tags=dict(type='list', elements='str', required=False),
|
||||
type=dict(type='str', required=False),
|
||||
stackscript_id=dict(type='int', required=False),
|
||||
stackscript_data=dict(type='dict', required=False),
|
||||
authorized_keys=dict(type='list', elements='str', no_log=False),
|
||||
group=dict(type='str'),
|
||||
image=dict(type='str'),
|
||||
region=dict(type='str'),
|
||||
root_pass=dict(type='str', no_log=True),
|
||||
tags=dict(type='list', elements='str'),
|
||||
type=dict(type='str'),
|
||||
stackscript_id=dict(type='int'),
|
||||
stackscript_data=dict(type='dict'),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
required_one_of=(
|
||||
|
||||
@@ -243,7 +243,6 @@ except ImportError:
|
||||
HAS_OVIRTSDK = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
|
||||
# Copyright: Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
@@ -21,7 +21,7 @@ options:
|
||||
- Restrict results to a specific authentication realm.
|
||||
aliases: ['realm', 'name']
|
||||
type: str
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ options:
|
||||
- Restrict results to a specific group.
|
||||
aliases: ['groupid', 'name']
|
||||
type: str
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -1226,7 +1226,7 @@ def main():
|
||||
if get_vm(proxmox, vmid) and not (update or clone):
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
|
||||
elif get_vmid(proxmox, name) and not (update or clone):
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name)
|
||||
module.exit_json(changed=False, vmid=get_vmid(proxmox, name)[0], msg="VM with name <%s> already exists" % name)
|
||||
elif not (node, name):
|
||||
module.fail_json(msg='node, name is mandatory for creating/updating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
|
||||
# Copyright: Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
@@ -25,7 +25,7 @@ options:
|
||||
description:
|
||||
- Filter on a specifc storage type.
|
||||
type: str
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
notes:
|
||||
- Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
|
||||
|
||||
@@ -30,7 +30,7 @@ options:
|
||||
description:
|
||||
- Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
|
||||
type: str
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -1229,24 +1229,6 @@ class RHEV(object):
|
||||
self.__get_conn()
|
||||
return self.conn.set_VM_Host(vmname, vmhost)
|
||||
|
||||
# pylint: disable=unreachable
|
||||
VM = self.conn.get_VM(vmname)
|
||||
HOST = self.conn.get_Host(vmhost)
|
||||
|
||||
if VM.placement_policy.host is None:
|
||||
self.conn.set_VM_Host(vmname, vmhost)
|
||||
elif str(VM.placement_policy.host.id) != str(HOST.id):
|
||||
self.conn.set_VM_Host(vmname, vmhost)
|
||||
else:
|
||||
setMsg("VM's startup host was already set to " + vmhost)
|
||||
checkFail()
|
||||
|
||||
if str(VM.status.state) == "up":
|
||||
self.conn.migrate_VM(vmname, vmhost)
|
||||
checkFail()
|
||||
|
||||
return True
|
||||
|
||||
def setHost(self, hostname, cluster, ifaces):
|
||||
self.__get_conn()
|
||||
return self.conn.set_Host(hostname, cluster, ifaces)
|
||||
|
||||
@@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: terraform
|
||||
short_description: Manages a Terraform deployment (and plans)
|
||||
@@ -177,24 +177,31 @@ command:
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-force')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
|
||||
module = None
|
||||
|
||||
|
||||
def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
|
||||
def get_version(bin_path):
|
||||
extract_version = module.run_command([bin_path, 'version', '-json'])
|
||||
terraform_version = (json.loads(extract_version[1]))['terraform_version']
|
||||
return terraform_version
|
||||
|
||||
|
||||
def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None):
|
||||
if project_path in [None, ''] or '/' not in project_path:
|
||||
module.fail_json(msg="Path for Terraform project can not be None or ''.")
|
||||
if not os.path.exists(bin_path):
|
||||
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
|
||||
if not os.path.isdir(project_path):
|
||||
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
|
||||
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
|
||||
if LooseVersion(version) < LooseVersion('0.15.0'):
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path)
|
||||
else:
|
||||
rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path)
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
@@ -267,7 +274,7 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
|
||||
|
||||
plan_command.extend(_state_args(state_file))
|
||||
|
||||
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
|
||||
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path)
|
||||
|
||||
if rc == 0:
|
||||
# no changes
|
||||
@@ -326,6 +333,15 @@ def main():
|
||||
else:
|
||||
command = [module.get_bin_path('terraform', required=True)]
|
||||
|
||||
checked_version = get_version(command[0])
|
||||
|
||||
if LooseVersion(checked_version) < LooseVersion('0.15.0'):
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-force')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
|
||||
else:
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
|
||||
|
||||
if force_init:
|
||||
init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
|
||||
|
||||
@@ -351,7 +367,7 @@ def main():
|
||||
for f in variables_files:
|
||||
variables_args.extend(['-var-file', f])
|
||||
|
||||
preflight_validation(command[0], project_path, variables_args)
|
||||
preflight_validation(command[0], project_path, checked_version, variables_args)
|
||||
|
||||
if module.params.get('lock') is not None:
|
||||
if module.params.get('lock'):
|
||||
@@ -382,7 +398,14 @@ def main():
|
||||
command.append(plan_file)
|
||||
|
||||
if needs_application and not module.check_mode and not state == 'planned':
|
||||
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
|
||||
rc, out, err = module.run_command(command, check_rc=False, cwd=project_path)
|
||||
if rc != 0:
|
||||
if workspace_ctx["current"] != workspace:
|
||||
select_workspace(command[0], project_path, workspace_ctx["current"])
|
||||
module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
|
||||
stdout_lines=out.splitlines(), stderr=err,
|
||||
stderr_lines=err.splitlines(),
|
||||
cmd=' '.join(command))
|
||||
# checks out to decide if changes were made during execution
|
||||
if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
|
||||
changed = True
|
||||
|
||||
@@ -752,11 +752,20 @@ def get_vm_info(client, vm):
|
||||
if 'NIC' in vm.TEMPLATE:
|
||||
if isinstance(vm.TEMPLATE['NIC'], list):
|
||||
for nic in vm.TEMPLATE['NIC']:
|
||||
networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
|
||||
networks_info.append({
|
||||
'ip': nic.get('IP', ''),
|
||||
'mac': nic.get('MAC', ''),
|
||||
'name': nic.get('NETWORK', ''),
|
||||
'security_groups': nic.get('SECURITY_GROUPS', '')
|
||||
})
|
||||
else:
|
||||
networks_info.append(
|
||||
{'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
|
||||
'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
|
||||
networks_info.append({
|
||||
'ip': vm.TEMPLATE['NIC'].get('IP', ''),
|
||||
'mac': vm.TEMPLATE['NIC'].get('MAC', ''),
|
||||
'name': vm.TEMPLATE['NIC'].get('NETWORK', ''),
|
||||
'security_groups':
|
||||
vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '')
|
||||
})
|
||||
import time
|
||||
|
||||
current_time = time.localtime()
|
||||
|
||||
@@ -162,7 +162,6 @@ def waitForTaskDone(client, name, taskId, timeout):
|
||||
currentTimeout -= 5
|
||||
if currentTimeout < 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -111,7 +111,6 @@ ovirt_affinity_labels:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -52,7 +52,6 @@ ovirt_api:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -73,7 +73,6 @@ ovirt_clusters:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -56,7 +56,6 @@ ovirt_datacenters:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -72,7 +72,6 @@ ovirt_disks:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -103,7 +103,6 @@ ovirt_events:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -87,7 +87,6 @@ ovirt_external_providers:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -71,7 +71,6 @@ ovirt_groups:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -78,7 +78,6 @@ ovirt_hosts:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -92,7 +92,6 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -73,7 +73,6 @@ ovirt_networks:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -76,7 +76,6 @@ ovirt_nics:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -88,7 +88,6 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -76,7 +76,6 @@ ovirt_quotas:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -65,7 +65,6 @@ ovirt_snapshots:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -74,7 +74,6 @@ ovirt_storage_domains:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -79,7 +79,6 @@ ovirt_storage_templates:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -79,7 +79,6 @@ ovirt_storage_vms:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -95,7 +95,6 @@ ovirt_tags:
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -72,7 +72,6 @@ ovirt_templates:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -71,7 +71,6 @@ ovirt_users:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -102,7 +102,6 @@ ovirt_vms:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -71,7 +71,6 @@ ovirt_vm_pools:
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
|
||||
@@ -119,20 +119,13 @@ class NicTag(object):
|
||||
return is_mac(self.mac.lower())
|
||||
|
||||
def nictag_exists(self):
|
||||
cmd = [self.nictagadm_bin]
|
||||
|
||||
cmd.append('exists')
|
||||
cmd.append(self.name)
|
||||
|
||||
cmd = [self.nictagadm_bin, 'exists', self.name]
|
||||
(rc, dummy, dummy) = self.module.run_command(cmd)
|
||||
|
||||
return rc == 0
|
||||
|
||||
def add_nictag(self):
|
||||
cmd = [self.nictagadm_bin]
|
||||
|
||||
cmd.append('-v')
|
||||
cmd.append('add')
|
||||
cmd = [self.nictagadm_bin, '-v', 'add']
|
||||
|
||||
if self.etherstub:
|
||||
cmd.append('-l')
|
||||
@@ -150,10 +143,7 @@ class NicTag(object):
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def delete_nictag(self):
|
||||
cmd = [self.nictagadm_bin]
|
||||
|
||||
cmd.append('-v')
|
||||
cmd.append('delete')
|
||||
cmd = [self.nictagadm_bin, '-v', 'delete']
|
||||
|
||||
if self.force:
|
||||
cmd.append('-f')
|
||||
|
||||
@@ -72,10 +72,7 @@ class ImageFacts(object):
|
||||
self.filters = module.params['filters']
|
||||
|
||||
def return_all_installed_images(self):
|
||||
cmd = [self.module.get_bin_path('imgadm')]
|
||||
|
||||
cmd.append('list')
|
||||
cmd.append('-j')
|
||||
cmd = [self.module.get_bin_path('imgadm'), 'list', '-j']
|
||||
|
||||
if self.filters:
|
||||
cmd.append(self.filters)
|
||||
|
||||
@@ -233,7 +233,7 @@ options:
|
||||
description:
|
||||
- List of resolvers to be put into C(/etc/resolv.conf).
|
||||
type: list
|
||||
elements: dict
|
||||
elements: str
|
||||
routes:
|
||||
required: false
|
||||
description:
|
||||
@@ -702,7 +702,7 @@ def main():
|
||||
vnc_password=dict(type='str', no_log=True),
|
||||
disks=dict(type='list', elements='dict'),
|
||||
nics=dict(type='list', elements='dict'),
|
||||
resolvers=dict(type='list', elements='dict'),
|
||||
resolvers=dict(type='list', elements='str'),
|
||||
filesystems=dict(type='list', elements='dict'),
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
@@ -24,14 +24,14 @@ notes:
|
||||
Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
|
||||
Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
|
||||
U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
|
||||
- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
|
||||
- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
|
||||
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
|
||||
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
|
||||
- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(no)
|
||||
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
|
||||
- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
|
||||
- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on
|
||||
XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
|
||||
detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
|
||||
agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
|
||||
agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6)
|
||||
values C(none) and C(dhcp) have same effect. More info here:
|
||||
U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
|
||||
- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
|
||||
@@ -49,10 +49,10 @@ options:
|
||||
state:
|
||||
description:
|
||||
- Specify the state VM should be in.
|
||||
- If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
|
||||
- If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
|
||||
- If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
|
||||
- If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
|
||||
- If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
|
||||
- If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
|
||||
- If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
|
||||
- If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ present, absent, poweredon ]
|
||||
@@ -60,10 +60,9 @@ options:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
|
||||
- In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
|
||||
- In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage.
|
||||
- This parameter is case sensitive.
|
||||
type: str
|
||||
required: yes
|
||||
aliases: [ name_label ]
|
||||
name_desc:
|
||||
description:
|
||||
@@ -79,7 +78,7 @@ options:
|
||||
description:
|
||||
- Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
|
||||
- Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
|
||||
- In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
|
||||
- In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template.
|
||||
- If VM already exists, this setting will be ignored.
|
||||
- This parameter is case sensitive.
|
||||
type: str
|
||||
@@ -104,56 +103,138 @@ options:
|
||||
hardware:
|
||||
description:
|
||||
- Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
|
||||
- 'Valid parameters are:'
|
||||
- ' - C(num_cpus) (integer): Number of CPUs.'
|
||||
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
|
||||
- ' - C(memory_mb) (integer): Amount of memory in MB.'
|
||||
type: dict
|
||||
suboptions:
|
||||
num_cpus:
|
||||
description:
|
||||
- Number of CPUs.
|
||||
type: int
|
||||
num_cpu_cores_per_socket:
|
||||
description:
|
||||
- Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket).
|
||||
type: int
|
||||
memory_mb:
|
||||
description:
|
||||
- Amount of memory in MB.
|
||||
type: int
|
||||
disks:
|
||||
description:
|
||||
- A list of disks to add to VM.
|
||||
- All parameters are case sensitive.
|
||||
- Removing or detaching existing disks of VM is not supported.
|
||||
- 'Required parameters per entry:'
|
||||
- ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
|
||||
- 'Optional parameters per entry:'
|
||||
- ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
|
||||
- ' - C(name_desc) (string): Disk description.'
|
||||
- ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
|
||||
- ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
|
||||
- New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified.
|
||||
- VM needs to be shut down to reconfigure disk size.
|
||||
type: list
|
||||
elements: dict
|
||||
aliases: [ disk ]
|
||||
suboptions:
|
||||
size:
|
||||
description:
|
||||
- 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.'
|
||||
- If no unit is specified, size is assumed to be in bytes.
|
||||
type: str
|
||||
size_b:
|
||||
description:
|
||||
- Disk size in bytes.
|
||||
type: str
|
||||
size_kb:
|
||||
description:
|
||||
- Disk size in kilobytes.
|
||||
type: str
|
||||
size_mb:
|
||||
description:
|
||||
- Disk size in megabytes.
|
||||
type: str
|
||||
size_gb:
|
||||
description:
|
||||
- Disk size in gigabytes.
|
||||
type: str
|
||||
size_tb:
|
||||
description:
|
||||
- Disk size in terabytes.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Disk name.
|
||||
type: str
|
||||
aliases: [ name_label ]
|
||||
name_desc:
|
||||
description:
|
||||
- Disk description.
|
||||
type: str
|
||||
sr:
|
||||
description:
|
||||
- Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.
|
||||
type: str
|
||||
sr_uuid:
|
||||
description:
|
||||
- UUID of a SR to create disk on. Use if SR name is not unique.
|
||||
type: str
|
||||
cdrom:
|
||||
description:
|
||||
- A CD-ROM configuration for the VM.
|
||||
- All parameters are case sensitive.
|
||||
- 'Valid parameters are:'
|
||||
- ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
|
||||
- ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
|
||||
Required if C(type) is set to C(iso).'
|
||||
type: dict
|
||||
suboptions:
|
||||
type:
|
||||
description:
|
||||
- The type of CD-ROM. With C(none) the CD-ROM device will be present but empty.
|
||||
type: str
|
||||
choices: [ none, iso ]
|
||||
iso_name:
|
||||
description:
|
||||
- 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).'
|
||||
- Required if I(type) is set to C(iso).
|
||||
type: str
|
||||
networks:
|
||||
description:
|
||||
- A list of networks (in the order of the NICs).
|
||||
- All parameters are case sensitive.
|
||||
- 'Required parameters per entry:'
|
||||
- ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
|
||||
- 'Optional parameters per entry (used for VM hardware):'
|
||||
- ' - C(mac) (string): Customize MAC address of the interface.'
|
||||
- 'Optional parameters per entry (used for OS customization):'
|
||||
- ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
|
||||
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
|
||||
- ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
|
||||
- ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
|
||||
- ' - C(gateway) (string): Static IPv4 gateway.'
|
||||
- ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
|
||||
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
|
||||
- ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
|
||||
- ' - C(gateway6) (string): Static IPv6 gateway.'
|
||||
- Name is required for new NICs. Other parameters are optional in all cases.
|
||||
type: list
|
||||
elements: dict
|
||||
aliases: [ network ]
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of a XenServer network to attach the network interface to.
|
||||
type: str
|
||||
aliases: [ name_label ]
|
||||
mac:
|
||||
description:
|
||||
- Customize MAC address of the interface.
|
||||
type: str
|
||||
type:
|
||||
description:
|
||||
- Type of IPv4 assignment. Value C(none) means whatever is default for OS.
|
||||
- On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).
|
||||
type: str
|
||||
choices: [ none, dhcp, static ]
|
||||
ip:
|
||||
description:
|
||||
- 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(<IPv4 address>/<prefix>) instead of using C(netmask).'
|
||||
type: str
|
||||
netmask:
|
||||
description:
|
||||
- Static IPv4 netmask required for I(ip) if prefix is not specified.
|
||||
type: str
|
||||
gateway:
|
||||
description:
|
||||
- Static IPv4 gateway.
|
||||
type: str
|
||||
type6:
|
||||
description:
|
||||
- Type of IPv6 assignment. Value C(none) means whatever is default for OS.
|
||||
type: str
|
||||
choices: [ none, dhcp, static ]
|
||||
ip6:
|
||||
description:
|
||||
- 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(<IPv6 address>/<prefix>).'
|
||||
type: str
|
||||
gateway6:
|
||||
description:
|
||||
- Static IPv6 gateway.
|
||||
type: str
|
||||
home_server:
|
||||
description:
|
||||
- Name of a XenServer host that will be a Home Server for the VM.
|
||||
@@ -163,18 +244,29 @@ options:
|
||||
description:
|
||||
- Define a list of custom VM params to set on VM.
|
||||
- Useful for advanced users familiar with managing VM params trough xe CLI.
|
||||
- A custom value object takes two fields C(key) and C(value) (see example below).
|
||||
- A custom value object takes two fields I(key) and I(value) (see example below).
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
key:
|
||||
description:
|
||||
- VM param name.
|
||||
type: str
|
||||
required: yes
|
||||
value:
|
||||
description:
|
||||
- VM param value.
|
||||
type: raw
|
||||
required: yes
|
||||
wait_for_ip_address:
|
||||
description:
|
||||
- Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
|
||||
- Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored.
|
||||
- This requires XenServer Tools to be preinstalled on the VM to work properly.
|
||||
type: bool
|
||||
default: no
|
||||
state_change_timeout:
|
||||
description:
|
||||
- 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
|
||||
- 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(yes).'
|
||||
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
|
||||
- In case of timeout, module will generate an error message.
|
||||
type: int
|
||||
@@ -441,11 +533,12 @@ except ImportError:
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.network import is_mac
|
||||
from ansible.module_utils import six
|
||||
from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
|
||||
gather_vm_params, gather_vm_facts, set_vm_power_state,
|
||||
wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
|
||||
is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
|
||||
is_valid_ip6_addr, is_valid_ip6_prefix)
|
||||
from ansible_collections.community.general.plugins.module_utils.xenserver import (
|
||||
xenserver_common_argument_spec, XenServerObject, get_object_ref,
|
||||
gather_vm_params, gather_vm_facts, set_vm_power_state,
|
||||
wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
|
||||
is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
|
||||
is_valid_ip6_addr, is_valid_ip6_prefix)
|
||||
|
||||
|
||||
class XenServerVM(XenServerObject):
|
||||
|
||||
@@ -189,7 +189,24 @@ from collections import defaultdict
|
||||
from ansible.module_utils.basic import to_text, AnsibleModule
|
||||
|
||||
|
||||
RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
|
||||
RULE_SCOPES = [
|
||||
"agent",
|
||||
"agent_prefix",
|
||||
"event",
|
||||
"event_prefix",
|
||||
"key",
|
||||
"key_prefix",
|
||||
"keyring",
|
||||
"node",
|
||||
"node_prefix",
|
||||
"operator",
|
||||
"query",
|
||||
"query_prefix",
|
||||
"service",
|
||||
"service_prefix",
|
||||
"session",
|
||||
"session_prefix",
|
||||
]
|
||||
|
||||
MANAGEMENT_PARAMETER_NAME = "mgmt_token"
|
||||
HOST_PARAMETER_NAME = "host"
|
||||
|
||||
@@ -36,13 +36,13 @@ seealso:
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info for job awx
|
||||
community.general.nomad_job:
|
||||
community.general.nomad_job_info:
|
||||
host: localhost
|
||||
name: awx
|
||||
register: result
|
||||
|
||||
- name: List Nomad jobs
|
||||
community.general.nomad_job:
|
||||
community.general.nomad_job_info:
|
||||
host: localhost
|
||||
register: result
|
||||
|
||||
|
||||
@@ -31,7 +31,9 @@ options:
|
||||
type: str
|
||||
duration:
|
||||
description:
|
||||
- Determines how long InfluxDB should keep the data.
|
||||
- Determines how long InfluxDB should keep the data. If specified, it
|
||||
should be C(INF) or at least one hour. If not specified, C(INF) is
|
||||
assumed. Supports complex duration expressions with multiple units.
|
||||
required: true
|
||||
type: str
|
||||
replication:
|
||||
@@ -46,9 +48,10 @@ options:
|
||||
default: false
|
||||
shard_group_duration:
|
||||
description:
|
||||
- Determines the size of a shard group.
|
||||
- Value needs to be integer literal followed immediately (with no spaces) by a duration unit.
|
||||
Supported duration units are C(h) for hours, C(d) for days, and C(w) for weeks. For example C(10d), C(1h), C(2w).
|
||||
- Determines the time range covered by a shard group. If specified it
|
||||
must be at least one hour. If none, it's determined by InfluxDB by
|
||||
the rentention policy's duration. Supports complex duration expressions
|
||||
with multiple units.
|
||||
type: str
|
||||
version_added: '2.0.0'
|
||||
extends_documentation_fragment:
|
||||
@@ -96,6 +99,17 @@ EXAMPLES = r'''
|
||||
ssl: no
|
||||
validate_certs: no
|
||||
shard_group_duration: 1w
|
||||
|
||||
- name: Create retention policy with complex durations
|
||||
community.general.influxdb_retention_policy:
|
||||
hostname: "{{influxdb_ip_address}}"
|
||||
database_name: "{{influxdb_database_name}}"
|
||||
policy_name: test
|
||||
duration: 5d1h30m
|
||||
replication: 1
|
||||
ssl: no
|
||||
validate_certs: no
|
||||
shard_group_duration: 1d10h30m
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -115,6 +129,51 @@ from ansible_collections.community.general.plugins.module_utils.influxdb import
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$')
|
||||
|
||||
DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)')
|
||||
EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))')
|
||||
|
||||
|
||||
def check_duration_literal(value):
|
||||
return VALID_DURATION_REGEX.search(value) is not None
|
||||
|
||||
|
||||
def parse_duration_literal(value, extended=False):
|
||||
duration = 0.0
|
||||
|
||||
if value == "INF":
|
||||
return duration
|
||||
|
||||
lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value)
|
||||
|
||||
for duration_literal in lookup:
|
||||
if extended and duration_literal[3] == 's':
|
||||
duration_val = float(duration_literal[2])
|
||||
duration += duration_val * 1000 * 1000 * 1000
|
||||
else:
|
||||
duration_val = int(duration_literal[0])
|
||||
|
||||
if duration_literal[1] == 'ns':
|
||||
duration += duration_val
|
||||
elif duration_literal[1] == 'u' or duration_literal[1] == 'µ':
|
||||
duration += duration_val * 1000
|
||||
elif duration_literal[1] == 'ms':
|
||||
duration += duration_val * 1000 * 1000
|
||||
elif duration_literal[1] == 's':
|
||||
duration += duration_val * 1000 * 1000 * 1000
|
||||
elif duration_literal[1] == 'm':
|
||||
duration += duration_val * 1000 * 1000 * 1000 * 60
|
||||
elif duration_literal[1] == 'h':
|
||||
duration += duration_val * 1000 * 1000 * 1000 * 60 * 60
|
||||
elif duration_literal[1] == 'd':
|
||||
duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24
|
||||
elif duration_literal[1] == 'w':
|
||||
duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 * 7
|
||||
|
||||
return duration
|
||||
|
||||
|
||||
def find_retention_policy(module, client):
|
||||
database_name = module.params['database_name']
|
||||
policy_name = module.params['policy_name']
|
||||
@@ -129,6 +188,11 @@ def find_retention_policy(module, client):
|
||||
break
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
|
||||
|
||||
if retention_policy is not None:
|
||||
retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True)
|
||||
retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True)
|
||||
|
||||
return retention_policy
|
||||
|
||||
|
||||
@@ -140,6 +204,21 @@ def create_retention_policy(module, client):
|
||||
default = module.params['default']
|
||||
shard_group_duration = module.params['shard_group_duration']
|
||||
|
||||
if not check_duration_literal(duration):
|
||||
module.fail_json(msg="Failed to parse value of duration")
|
||||
|
||||
influxdb_duration_format = parse_duration_literal(duration)
|
||||
if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000:
|
||||
module.fail_json(msg="duration value must be at least 1h")
|
||||
|
||||
if shard_group_duration is not None:
|
||||
if not check_duration_literal(shard_group_duration):
|
||||
module.fail_json(msg="Failed to parse value of shard_group_duration")
|
||||
|
||||
influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
|
||||
if influxdb_shard_group_duration_format < 3600000000000:
|
||||
module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
if shard_group_duration:
|
||||
@@ -159,38 +238,30 @@ def alter_retention_policy(module, client, retention_policy):
|
||||
replication = module.params['replication']
|
||||
default = module.params['default']
|
||||
shard_group_duration = module.params['shard_group_duration']
|
||||
duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
|
||||
|
||||
changed = False
|
||||
|
||||
duration_lookup = duration_regexp.search(duration)
|
||||
if not check_duration_literal(duration):
|
||||
module.fail_json(msg="Failed to parse value of duration")
|
||||
|
||||
if duration_lookup.group(2) == 'h':
|
||||
influxdb_duration_format = '%s0m0s' % duration
|
||||
elif duration_lookup.group(2) == 'd':
|
||||
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
|
||||
elif duration_lookup.group(2) == 'w':
|
||||
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
|
||||
elif duration == 'INF':
|
||||
influxdb_duration_format = '0'
|
||||
influxdb_duration_format = parse_duration_literal(duration)
|
||||
if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000:
|
||||
module.fail_json(msg="duration value must be at least 1h")
|
||||
|
||||
if shard_group_duration:
|
||||
shard_group_duration_lookup = duration_regexp.search(shard_group_duration)
|
||||
if not shard_group_duration_lookup:
|
||||
module.fail_json(
|
||||
msg="Failed to parse value of shard_group_duration. Please see the documentation for valid values")
|
||||
if shard_group_duration_lookup.group(2) == 'h':
|
||||
influxdb_shard_group_duration_format = '%s0m0s' % duration
|
||||
elif shard_group_duration_lookup.group(2) == 'd':
|
||||
influxdb_shard_group_duration_format = '%sh0m0s' % (int(shard_group_duration_lookup.group(1)) * 24)
|
||||
elif shard_group_duration_lookup.group(2) == 'w':
|
||||
influxdb_shard_group_duration_format = '%sh0m0s' % (int(shard_group_duration_lookup.group(1)) * 24 * 7)
|
||||
if shard_group_duration is None:
|
||||
influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"]
|
||||
else:
|
||||
influxdb_shard_group_duration_format = retention_policy['shardGroupDuration']
|
||||
if not check_duration_literal(shard_group_duration):
|
||||
module.fail_json(msg="Failed to parse value of shard_group_duration")
|
||||
|
||||
if (not retention_policy['duration'] == influxdb_duration_format or
|
||||
not retention_policy['replicaN'] == int(replication) or
|
||||
not retention_policy['shardGroupDuration'] == influxdb_shard_group_duration_format or
|
||||
not retention_policy['default'] == default):
|
||||
influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
|
||||
if influxdb_shard_group_duration_format < 3600000000000:
|
||||
module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
|
||||
|
||||
if (retention_policy['duration'] != influxdb_duration_format or
|
||||
retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or
|
||||
retention_policy['replicaN'] != int(replication) or
|
||||
retention_policy['default'] != default):
|
||||
if not module.check_mode:
|
||||
try:
|
||||
client.alter_retention_policy(policy_name, database_name, duration, replication, default,
|
||||
|
||||
@@ -100,6 +100,8 @@ RETURN = r'''
|
||||
#only defaults
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import ConnectionError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
@@ -115,7 +117,7 @@ def find_user(module, client, user_name):
|
||||
if user['user'] == user_name:
|
||||
user_result = user
|
||||
break
|
||||
except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
return user_result
|
||||
|
||||
@@ -166,16 +168,16 @@ def set_user_grants(module, client, user_name, grants):
|
||||
|
||||
try:
|
||||
current_grants = client.get_list_privileges(user_name)
|
||||
parsed_grants = []
|
||||
# Fix privileges wording
|
||||
for i, v in enumerate(current_grants):
|
||||
if v['privilege'] == 'ALL PRIVILEGES':
|
||||
v['privilege'] = 'ALL'
|
||||
current_grants[i] = v
|
||||
elif v['privilege'] == 'NO PRIVILEGES':
|
||||
del(current_grants[i])
|
||||
if v['privilege'] != 'NO PRIVILEGES':
|
||||
if v['privilege'] == 'ALL PRIVILEGES':
|
||||
v['privilege'] = 'ALL'
|
||||
parsed_grants.append(v)
|
||||
|
||||
# check if the current grants are included in the desired ones
|
||||
for current_grant in current_grants:
|
||||
for current_grant in parsed_grants:
|
||||
if current_grant not in grants:
|
||||
if not module.check_mode:
|
||||
client.revoke_privilege(current_grant['privilege'],
|
||||
@@ -185,7 +187,7 @@ def set_user_grants(module, client, user_name, grants):
|
||||
|
||||
# check if the desired grants are included in the current ones
|
||||
for grant in grants:
|
||||
if grant not in current_grants:
|
||||
if grant not in parsed_grants:
|
||||
if not module.check_mode:
|
||||
client.grant_privilege(grant['privilege'],
|
||||
grant['database'],
|
||||
@@ -198,6 +200,9 @@ def set_user_grants(module, client, user_name, grants):
|
||||
return changed
|
||||
|
||||
|
||||
INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication"
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = influx.InfluxDb.influxdb_argument_spec()
|
||||
argument_spec.update(
|
||||
@@ -219,7 +224,23 @@ def main():
|
||||
grants = module.params['grants']
|
||||
influxdb = influx.InfluxDb(module)
|
||||
client = influxdb.connect_to_influxdb()
|
||||
user = find_user(module, client, user_name)
|
||||
|
||||
user = None
|
||||
try:
|
||||
user = find_user(module, client, user_name)
|
||||
except influx.exceptions.InfluxDBClientError as e:
|
||||
if e.code == 403:
|
||||
reason = None
|
||||
try:
|
||||
msg = json.loads(e.content)
|
||||
reason = msg["error"]
|
||||
except (KeyError, ValueError):
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
if reason != INFLUX_AUTH_FIRST_USER_REQUIRED:
|
||||
module.fail_json(msg=to_native(e))
|
||||
else:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
changed = False
|
||||
|
||||
|
||||
@@ -170,25 +170,23 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, ki
|
||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
|
||||
|
||||
if url:
|
||||
cmd_args.append("--url %s" % url)
|
||||
cmd_args.extend(["--url", url])
|
||||
|
||||
if timeout:
|
||||
cmd_args.append("--timeout %s" % timeout)
|
||||
cmd_args.extend(["--timeout", timeout])
|
||||
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
return True, cmd, "check mode", ""
|
||||
return True, " ".join(cmd_args), "check mode", ""
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
rc, out, err = module.run_command(cmd_args)
|
||||
if rc != 0:
|
||||
reason = parse_error(out)
|
||||
module.fail_json(msg=reason)
|
||||
|
||||
return True, cmd, out, err
|
||||
return True, " ".join(cmd_args), out, err
|
||||
|
||||
|
||||
def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'):
|
||||
@@ -201,17 +199,15 @@ def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
return True, cmd, "check mode", ""
|
||||
return True, " ".join(cmd_args), "check mode", ""
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
rc, out, err = module.run_command(cmd_args)
|
||||
if rc != 0:
|
||||
reason = parse_error(out)
|
||||
module.fail_json(msg=reason)
|
||||
|
||||
return True, cmd, out, err
|
||||
return True, " ".join(cmd_args), out, err
|
||||
|
||||
|
||||
def get_kibana_version(module, plugin_bin, allow_root):
|
||||
@@ -220,8 +216,7 @@ def get_kibana_version(module, plugin_bin, allow_root):
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
rc, out, err = module.run_command(cmd)
|
||||
rc, out, err = module.run_command(cmd_args)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to get Kibana version : %s" % err)
|
||||
|
||||
@@ -269,7 +264,7 @@ def main():
|
||||
|
||||
if state == "present":
|
||||
if force:
|
||||
remove_plugin(module, plugin_bin, name)
|
||||
remove_plugin(module, plugin_bin, name, allow_root, kibana_version)
|
||||
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version)
|
||||
|
||||
elif state == "absent":
|
||||
|
||||
@@ -98,9 +98,8 @@ from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def get_xattr_keys(module, path, follow):
|
||||
cmd = [module.get_bin_path('getfattr', True)]
|
||||
# prevents warning and not sure why it's not default
|
||||
cmd.append('--absolute-names')
|
||||
cmd = [module.get_bin_path('getfattr', True), '--absolute-names']
|
||||
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
cmd.append(path)
|
||||
@@ -109,10 +108,8 @@ def get_xattr_keys(module, path, follow):
|
||||
|
||||
|
||||
def get_xattr(module, path, key, follow):
|
||||
cmd = [module.get_bin_path('getfattr', True), '--absolute-names']
|
||||
|
||||
cmd = [module.get_bin_path('getfattr', True)]
|
||||
# prevents warning and not sure why it's not default
|
||||
cmd.append('--absolute-names')
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
if key is None:
|
||||
|
||||
@@ -285,6 +285,39 @@ EXAMPLES = r'''
|
||||
z: http://z.test
|
||||
attribute: z:my_namespaced_attribute
|
||||
value: 'false'
|
||||
|
||||
- name: Adding building nodes with floor subnodes from a YAML variable
|
||||
community.general.xml:
|
||||
path: /foo/bar.xml
|
||||
xpath: /business
|
||||
add_children:
|
||||
- building:
|
||||
# Attributes
|
||||
name: Scumm bar
|
||||
location: Monkey island
|
||||
# Subnodes
|
||||
_:
|
||||
- floor: Pirate hall
|
||||
- floor: Grog storage
|
||||
- construction_date: "1990" # Only strings are valid
|
||||
- building: Grog factory
|
||||
|
||||
# Consider this XML for following example -
|
||||
#
|
||||
# <config>
|
||||
# <element name="test1">
|
||||
# <text>part to remove</text>
|
||||
# </element>
|
||||
# <element name="test2">
|
||||
# <text>part to keep</text>
|
||||
# </element>
|
||||
# </config>
|
||||
|
||||
- name: Delete element node based upon attribute
|
||||
community.general.xml:
|
||||
path: bar.xml
|
||||
xpath: /config/element[@name='test1']
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
@@ -14,6 +14,13 @@ short_description: Manage Global FreeIPA Configuration Settings
|
||||
description:
|
||||
- Modify global configuration settings of a FreeIPA Server.
|
||||
options:
|
||||
ipaconfigstring:
|
||||
description: Extra hashes to generate in password plug-in.
|
||||
aliases: ["configstring"]
|
||||
type: list
|
||||
elements: str
|
||||
choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"]
|
||||
version_added: '2.5.0'
|
||||
ipadefaultloginshell:
|
||||
description: Default shell for new users.
|
||||
aliases: ["loginshell"]
|
||||
@@ -22,25 +29,158 @@ options:
|
||||
description: Default e-mail domain for new users.
|
||||
aliases: ["emaildomain"]
|
||||
type: str
|
||||
ipadefaultprimarygroup:
|
||||
description: Default group for new users.
|
||||
aliases: ["primarygroup"]
|
||||
type: str
|
||||
version_added: '2.5.0'
|
||||
ipagroupsearchfields:
|
||||
description: A list of fields to search in when searching for groups.
|
||||
aliases: ["groupsearchfields"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '2.5.0'
|
||||
ipahomesrootdir:
|
||||
description: Default location of home directories.
|
||||
aliases: ["homesrootdir"]
|
||||
type: str
|
||||
version_added: '2.5.0'
|
||||
ipakrbauthzdata:
|
||||
description: Default types of PAC supported for services.
|
||||
aliases: ["krbauthzdata"]
|
||||
type: list
|
||||
elements: str
|
||||
choices: ["MS-PAC", "PAD", "nfs:NONE"]
|
||||
version_added: '2.5.0'
|
||||
ipamaxusernamelength:
|
||||
description: Maximum length of usernames.
|
||||
aliases: ["maxusernamelength"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipapwdexpadvnotify:
|
||||
description: Notice of impending password expiration, in days.
|
||||
aliases: ["pwdexpadvnotify"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipasearchrecordslimit:
|
||||
description: Maximum number of records to search (-1 or 0 is unlimited).
|
||||
aliases: ["searchrecordslimit"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipasearchtimelimit:
|
||||
description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited).
|
||||
aliases: ["searchtimelimit"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipauserauthtype:
|
||||
description: The authentication type to use by default.
|
||||
aliases: ["userauthtype"]
|
||||
choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '2.5.0'
|
||||
ipausersearchfields:
|
||||
description: A list of fields to search in when searching for users.
|
||||
aliases: ["usersearchfields"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '2.5.0'
|
||||
extends_documentation_fragment:
|
||||
- community.general.ipa.documentation
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure the default login shell is bash.
|
||||
- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled
|
||||
community.general.ipa_config:
|
||||
ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"]
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the default login shell is bash
|
||||
community.general.ipa_config:
|
||||
ipadefaultloginshell: /bin/bash
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the default e-mail domain is ansible.com.
|
||||
- name: Ensure the default e-mail domain is ansible.com
|
||||
community.general.ipa_config:
|
||||
ipadefaultemaildomain: ansible.com
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the default primary group is set to ipausers
|
||||
community.general.ipa_config:
|
||||
ipadefaultprimarygroup: ipausers
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the group search fields are set to 'cn,description'
|
||||
community.general.ipa_config:
|
||||
ipagroupsearchfields: ['cn', 'description']
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the home directory location is set to /home
|
||||
community.general.ipa_config:
|
||||
ipahomesrootdir: /home
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD
|
||||
community.general.ipa_config:
|
||||
ipakrbauthzdata: ["MS-PAC", "PAD"]
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the maximum user name length is set to 32
|
||||
community.general.ipa_config:
|
||||
ipamaxusernamelength: 32
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the password expiration notice is set to 4 days
|
||||
community.general.ipa_config:
|
||||
ipapwdexpadvnotify: 4
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the search record limit is set to 100
|
||||
community.general.ipa_config:
|
||||
ipasearchrecordslimit: 100
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the search time limit is set to 2 seconds
|
||||
community.general.ipa_config:
|
||||
ipasearchtimelimit: 2
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the default user auth type is password
|
||||
community.general.ipa_config:
|
||||
ipauserauthtype: ['password']
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title'
|
||||
community.general.ipa_config:
|
||||
ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title']
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -68,12 +208,40 @@ class ConfigIPAClient(IPAClient):
|
||||
return self._post_json(method='config_mod', name=name, item=item)
|
||||
|
||||
|
||||
def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
|
||||
def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
ipadefaultemaildomain=None, ipadefaultprimarygroup=None,
|
||||
ipagroupsearchfields=None, ipahomesrootdir=None,
|
||||
ipakrbauthzdata=None, ipamaxusernamelength=None,
|
||||
ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
|
||||
ipasearchtimelimit=None, ipauserauthtype=None,
|
||||
ipausersearchfields=None):
|
||||
config = {}
|
||||
if ipaconfigstring is not None:
|
||||
config['ipaconfigstring'] = ipaconfigstring
|
||||
if ipadefaultloginshell is not None:
|
||||
config['ipadefaultloginshell'] = ipadefaultloginshell
|
||||
if ipadefaultemaildomain is not None:
|
||||
config['ipadefaultemaildomain'] = ipadefaultemaildomain
|
||||
if ipadefaultprimarygroup is not None:
|
||||
config['ipadefaultprimarygroup'] = ipadefaultprimarygroup
|
||||
if ipagroupsearchfields is not None:
|
||||
config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields)
|
||||
if ipahomesrootdir is not None:
|
||||
config['ipahomesrootdir'] = ipahomesrootdir
|
||||
if ipakrbauthzdata is not None:
|
||||
config['ipakrbauthzdata'] = ipakrbauthzdata
|
||||
if ipamaxusernamelength is not None:
|
||||
config['ipamaxusernamelength'] = str(ipamaxusernamelength)
|
||||
if ipapwdexpadvnotify is not None:
|
||||
config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify)
|
||||
if ipasearchrecordslimit is not None:
|
||||
config['ipasearchrecordslimit'] = str(ipasearchrecordslimit)
|
||||
if ipasearchtimelimit is not None:
|
||||
config['ipasearchtimelimit'] = str(ipasearchtimelimit)
|
||||
if ipauserauthtype is not None:
|
||||
config['ipauserauthtype'] = ipauserauthtype
|
||||
if ipausersearchfields is not None:
|
||||
config['ipausersearchfields'] = ','.join(ipausersearchfields)
|
||||
|
||||
return config
|
||||
|
||||
@@ -84,8 +252,19 @@ def get_config_diff(client, ipa_config, module_config):
|
||||
|
||||
def ensure(module, client):
|
||||
module_config = get_config_dict(
|
||||
ipaconfigstring=module.params.get('ipaconfigstring'),
|
||||
ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
|
||||
ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
|
||||
ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'),
|
||||
ipagroupsearchfields=module.params.get('ipagroupsearchfields'),
|
||||
ipahomesrootdir=module.params.get('ipahomesrootdir'),
|
||||
ipakrbauthzdata=module.params.get('ipakrbauthzdata'),
|
||||
ipamaxusernamelength=module.params.get('ipamaxusernamelength'),
|
||||
ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'),
|
||||
ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'),
|
||||
ipasearchtimelimit=module.params.get('ipasearchtimelimit'),
|
||||
ipauserauthtype=module.params.get('ipauserauthtype'),
|
||||
ipausersearchfields=module.params.get('ipausersearchfields'),
|
||||
)
|
||||
ipa_config = client.config_show()
|
||||
diff = get_config_diff(client, ipa_config, module_config)
|
||||
@@ -106,8 +285,31 @@ def ensure(module, client):
|
||||
def main():
|
||||
argument_spec = ipa_argument_spec()
|
||||
argument_spec.update(
|
||||
ipaconfigstring=dict(type='list', elements='str',
|
||||
choices=['AllowNThash',
|
||||
'KDC:Disable Last Success',
|
||||
'KDC:Disable Lockout',
|
||||
'KDC:Disable Default Preauth for SPNs'],
|
||||
aliases=['configstring']),
|
||||
ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
|
||||
ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
|
||||
ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']),
|
||||
ipagroupsearchfields=dict(type='list', elements='str',
|
||||
aliases=['groupsearchfields']),
|
||||
ipahomesrootdir=dict(type='str', aliases=['homesrootdir']),
|
||||
ipakrbauthzdata=dict(type='list', elements='str',
|
||||
choices=['MS-PAC', 'PAD', 'nfs:NONE'],
|
||||
aliases=['krbauthzdata']),
|
||||
ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']),
|
||||
ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']),
|
||||
ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']),
|
||||
ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']),
|
||||
ipauserauthtype=dict(type='list', elements='str',
|
||||
aliases=['userauthtype'],
|
||||
choices=["password", "radius", "otp", "pkinit",
|
||||
"hardened", "disabled"]),
|
||||
ipausersearchfields=dict(type='list', elements='str',
|
||||
aliases=['usersearchfields']),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
|
||||
172
plugins/modules/identity/ipa/ipa_otpconfig.py
Normal file
172
plugins/modules/identity/ipa/ipa_otpconfig.py
Normal file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Ansible Project
|
||||
# Heavily influenced from Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com> ipa_config module
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: ipa_otpconfig
|
||||
author: justchris1 (@justchris1)
|
||||
short_description: Manage FreeIPA OTP Configuration Settings
|
||||
version_added: 2.5.0
|
||||
description:
|
||||
- Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords).
|
||||
options:
|
||||
ipatokentotpauthwindow:
|
||||
description: TOTP authentication window in seconds.
|
||||
aliases: ["totpauthwindow"]
|
||||
type: int
|
||||
ipatokentotpsyncwindow:
|
||||
description: TOTP synchronization window in seconds.
|
||||
aliases: ["totpsyncwindow"]
|
||||
type: int
|
||||
ipatokenhotpauthwindow:
|
||||
description: HOTP authentication window in number of hops.
|
||||
aliases: ["hotpauthwindow"]
|
||||
type: int
|
||||
ipatokenhotpsyncwindow:
|
||||
description: HOTP synchronization window in hops.
|
||||
aliases: ["hotpsyncwindow"]
|
||||
type: int
|
||||
extends_documentation_fragment:
|
||||
- community.general.ipa.documentation
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure the TOTP authentication window is set to 300 seconds
|
||||
community.general.ipa_otpconfig:
|
||||
ipatokentotpauthwindow: '300'
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the TOTP syncronization window is set to 86400 seconds
|
||||
community.general.ipa_otpconfig:
|
||||
ipatokentotpsyncwindow: '86400'
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the HOTP authentication window is set to 10 hops
|
||||
community.general.ipa_otpconfig:
|
||||
ipatokenhotpauthwindow: '10'
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the HOTP syncronization window is set to 100 hops
|
||||
community.general.ipa_otpconfig:
|
||||
ipatokenhotpsyncwindow: '100'
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
otpconfig:
|
||||
description: OTP configuration as returned by IPA API.
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class OTPConfigIPAClient(IPAClient):
|
||||
def __init__(self, module, host, port, protocol):
|
||||
super(OTPConfigIPAClient, self).__init__(module, host, port, protocol)
|
||||
|
||||
def otpconfig_show(self):
|
||||
return self._post_json(method='otpconfig_show', name=None)
|
||||
|
||||
def otpconfig_mod(self, name, item):
|
||||
return self._post_json(method='otpconfig_mod', name=name, item=item)
|
||||
|
||||
|
||||
def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None,
|
||||
ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None):
|
||||
|
||||
config = {}
|
||||
if ipatokentotpauthwindow is not None:
|
||||
config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow)
|
||||
if ipatokentotpsyncwindow is not None:
|
||||
config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow)
|
||||
if ipatokenhotpauthwindow is not None:
|
||||
config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow)
|
||||
if ipatokenhotpsyncwindow is not None:
|
||||
config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_otpconfig_diff(client, ipa_config, module_config):
|
||||
return client.get_diff(ipa_data=ipa_config, module_data=module_config)
|
||||
|
||||
|
||||
def ensure(module, client):
|
||||
module_otpconfig = get_otpconfig_dict(
|
||||
ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'),
|
||||
ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'),
|
||||
ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'),
|
||||
ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'),
|
||||
)
|
||||
ipa_otpconfig = client.otpconfig_show()
|
||||
diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig)
|
||||
|
||||
changed = False
|
||||
new_otpconfig = {}
|
||||
for module_key in diff:
|
||||
if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None):
|
||||
changed = True
|
||||
new_otpconfig.update({module_key: module_otpconfig.get(module_key)})
|
||||
|
||||
if changed and not module.check_mode:
|
||||
client.otpconfig_mod(name=None, item=new_otpconfig)
|
||||
|
||||
return changed, client.otpconfig_show()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ipa_argument_spec()
|
||||
argument_spec.update(
|
||||
ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False),
|
||||
ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False),
|
||||
ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False),
|
||||
ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
client = OTPConfigIPAClient(
|
||||
module=module,
|
||||
host=module.params['ipa_host'],
|
||||
port=module.params['ipa_port'],
|
||||
protocol=module.params['ipa_prot']
|
||||
)
|
||||
|
||||
try:
|
||||
client.login(
|
||||
username=module.params['ipa_user'],
|
||||
password=module.params['ipa_pass']
|
||||
)
|
||||
changed, otpconfig = ensure(module, client)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(changed=changed, otpconfig=otpconfig)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
527
plugins/modules/identity/ipa/ipa_otptoken.py
Normal file
527
plugins/modules/identity/ipa/ipa_otptoken.py
Normal file
@@ -0,0 +1,527 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: ipa_otptoken
|
||||
author: justchris1 (@justchris1)
|
||||
short_description: Manage FreeIPA OTPs
|
||||
version_added: 2.5.0
|
||||
description:
|
||||
- Add, modify, and delete One Time Passwords in IPA.
|
||||
options:
|
||||
uniqueid:
|
||||
description: Unique ID of the token in IPA.
|
||||
required: true
|
||||
aliases: ["name"]
|
||||
type: str
|
||||
newuniqueid:
|
||||
description: If specified, the unique id specified will be changed to this.
|
||||
type: str
|
||||
otptype:
|
||||
description:
|
||||
- Type of OTP.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
type: str
|
||||
choices: [ totp, hotp ]
|
||||
secretkey:
|
||||
description:
|
||||
- Token secret (Base64).
|
||||
- If OTP is created and this is not specified, a random secret will be generated by IPA.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
type: str
|
||||
description:
|
||||
description: Description of the token (informational only).
|
||||
type: str
|
||||
owner:
|
||||
description: Assigned user of the token.
|
||||
type: str
|
||||
enabled:
|
||||
description: Mark the token as enabled (default C(true)).
|
||||
default: true
|
||||
type: bool
|
||||
notbefore:
|
||||
description:
|
||||
- First date/time the token can be used.
|
||||
- In the format C(YYYYMMddHHmmss).
|
||||
- For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22.
|
||||
type: str
|
||||
notafter:
|
||||
description:
|
||||
- Last date/time the token can be used.
|
||||
- In the format C(YYYYMMddHHmmss).
|
||||
- For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22.
|
||||
type: str
|
||||
vendor:
|
||||
description: Token vendor name (informational only).
|
||||
type: str
|
||||
model:
|
||||
description: Token model (informational only).
|
||||
type: str
|
||||
serial:
|
||||
description: Token serial (informational only).
|
||||
type: str
|
||||
state:
|
||||
description: State to ensure.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
algorithm:
|
||||
description:
|
||||
- Token hash algorithm.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
choices: ['sha1', 'sha256', 'sha384', 'sha512']
|
||||
type: str
|
||||
digits:
|
||||
description:
|
||||
- Number of digits each token code will have.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
choices: [ 6, 8 ]
|
||||
type: int
|
||||
offset:
|
||||
description:
|
||||
- TOTP token / IPA server time difference.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
type: int
|
||||
interval:
|
||||
description:
|
||||
- Length of TOTP token code validity in seconds.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
type: int
|
||||
counter:
|
||||
description:
|
||||
- Initial counter for the HOTP token.
|
||||
- "B(Note:) Cannot be modified after OTP is created."
|
||||
type: int
|
||||
extends_documentation_fragment:
|
||||
- community.general.ipa.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a totp for pinky, allowing the IPA server to generate using defaults
|
||||
community.general.ipa_otptoken:
|
||||
uniqueid: Token123
|
||||
otptype: totp
|
||||
owner: pinky
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Create a 8 digit hotp for pinky with sha256 with specified validity times
|
||||
community.general.ipa_otptoken:
|
||||
uniqueid: Token123
|
||||
enabled: true
|
||||
otptype: hotp
|
||||
digits: 8
|
||||
secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9
|
||||
algorithm: sha256
|
||||
notbefore: 20180121182123
|
||||
notafter: 20220121182123
|
||||
owner: pinky
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Update Token123 to indicate a vendor, model, serial number (info only), and description
|
||||
community.general.ipa_otptoken:
|
||||
uniqueid: Token123
|
||||
vendor: Acme
|
||||
model: acme101
|
||||
serial: SerialNumber1
|
||||
description: Acme OTP device
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Disable Token123
|
||||
community.general.ipa_otptoken:
|
||||
uniqueid: Token123
|
||||
enabled: false
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Rename Token123 to TokenABC and enable it
|
||||
community.general.ipa_otptoken:
|
||||
uniqueid: Token123
|
||||
newuniqueid: TokenABC
|
||||
enabled: true
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
otptoken:
|
||||
description: OTP Token as returned by IPA API
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import base64
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, sanitize_keys
|
||||
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class OTPTokenIPAClient(IPAClient):
|
||||
def __init__(self, module, host, port, protocol):
|
||||
super(OTPTokenIPAClient, self).__init__(module, host, port, protocol)
|
||||
|
||||
def otptoken_find(self, name):
|
||||
return self._post_json(method='otptoken_find', name=None, item={'all': True,
|
||||
'ipatokenuniqueid': name,
|
||||
'timelimit': '0',
|
||||
'sizelimit': '0'})
|
||||
|
||||
def otptoken_add(self, name, item):
|
||||
return self._post_json(method='otptoken_add', name=name, item=item)
|
||||
|
||||
def otptoken_mod(self, name, item):
|
||||
return self._post_json(method='otptoken_mod', name=name, item=item)
|
||||
|
||||
def otptoken_del(self, name):
|
||||
return self._post_json(method='otptoken_del', name=name)
|
||||
|
||||
|
||||
def base64_to_base32(base64_string):
|
||||
"""Converts base64 string to base32 string"""
|
||||
b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii')
|
||||
return b32_string
|
||||
|
||||
|
||||
def base32_to_base64(base32_string):
|
||||
"""Converts base32 string to base64 string"""
|
||||
b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii')
|
||||
return b64_string
|
||||
|
||||
|
||||
def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None,
|
||||
enabled=None, notbefore=None, notafter=None, vendor=None,
|
||||
model=None, serial=None, algorithm=None, digits=None, offset=None,
|
||||
interval=None, counter=None):
|
||||
"""Create the dictionary of settings passed in"""
|
||||
|
||||
otptoken = {}
|
||||
if uniqueid is not None:
|
||||
otptoken[ansible_to_ipa['uniqueid']] = uniqueid
|
||||
if newuniqueid is not None:
|
||||
otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid
|
||||
if otptype is not None:
|
||||
otptoken[ansible_to_ipa['otptype']] = otptype.upper()
|
||||
if secretkey is not None:
|
||||
# For some unknown reason, while IPA returns the secret in base64,
|
||||
# it wants the secret passed in as base32. This makes it more difficult
|
||||
# for comparison (does 'current' equal to 'new'). Moreover, this may
|
||||
# cause some subtle issue in a playbook as the output is encoded
|
||||
# in a different way than if it was passed in as a parameter. For
|
||||
# these reasons, have the module standardize on base64 input (as parameter)
|
||||
# and output (from IPA).
|
||||
otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey)
|
||||
if description is not None:
|
||||
otptoken[ansible_to_ipa['description']] = description
|
||||
if owner is not None:
|
||||
otptoken[ansible_to_ipa['owner']] = owner
|
||||
if enabled is not None:
|
||||
otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE'
|
||||
if notbefore is not None:
|
||||
otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z'
|
||||
if notafter is not None:
|
||||
otptoken[ansible_to_ipa['notafter']] = notafter + 'Z'
|
||||
if vendor is not None:
|
||||
otptoken[ansible_to_ipa['vendor']] = vendor
|
||||
if model is not None:
|
||||
otptoken[ansible_to_ipa['model']] = model
|
||||
if serial is not None:
|
||||
otptoken[ansible_to_ipa['serial']] = serial
|
||||
if algorithm is not None:
|
||||
otptoken[ansible_to_ipa['algorithm']] = algorithm
|
||||
if digits is not None:
|
||||
otptoken[ansible_to_ipa['digits']] = str(digits)
|
||||
if offset is not None:
|
||||
otptoken[ansible_to_ipa['offset']] = str(offset)
|
||||
if interval is not None:
|
||||
otptoken[ansible_to_ipa['interval']] = str(interval)
|
||||
if counter is not None:
|
||||
otptoken[ansible_to_ipa['counter']] = str(counter)
|
||||
|
||||
return otptoken
|
||||
|
||||
|
||||
def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible):
|
||||
"""Transform the output received by IPA to a format more friendly
|
||||
before it is returned to the user. IPA returns even simple
|
||||
strings as a list of strings. It also returns bools and
|
||||
int as string. This function cleans that up before return.
|
||||
"""
|
||||
updated_otptoken = ipa_otptoken
|
||||
|
||||
# Used to hold values that will be sanitized from output as no_log.
|
||||
# For the case where secretkey is not specified at the module, but
|
||||
# is passed back from IPA.
|
||||
sanitize_strings = set()
|
||||
|
||||
# Rename the IPA parameters to the more friendly ansible module names for them
|
||||
for ipa_parameter in ipa_to_ansible:
|
||||
if ipa_parameter in ipa_otptoken:
|
||||
updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter]
|
||||
updated_otptoken.pop(ipa_parameter)
|
||||
|
||||
# Change the type from IPA's list of string to the appropriate return value type
|
||||
# based on field. By default, assume they should be strings.
|
||||
for ansible_parameter in ansible_to_ipa:
|
||||
if ansible_parameter in updated_otptoken:
|
||||
if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1:
|
||||
if ansible_parameter in ['digits', 'offset', 'interval', 'counter']:
|
||||
updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0])
|
||||
elif ansible_parameter == 'enabled':
|
||||
updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0])
|
||||
else:
|
||||
updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0]
|
||||
|
||||
if 'secretkey' in updated_otptoken:
|
||||
if isinstance(updated_otptoken['secretkey'], dict):
|
||||
if '__base64__' in updated_otptoken['secretkey']:
|
||||
sanitize_strings.add(updated_otptoken['secretkey']['__base64__'])
|
||||
b64key = updated_otptoken['secretkey']['__base64__']
|
||||
updated_otptoken.pop('secretkey')
|
||||
updated_otptoken['secretkey'] = b64key
|
||||
sanitize_strings.add(b64key)
|
||||
elif '__base32__' in updated_otptoken['secretkey']:
|
||||
sanitize_strings.add(updated_otptoken['secretkey']['__base32__'])
|
||||
b32key = updated_otptoken['secretkey']['__base32__']
|
||||
b64key = base32_to_base64(b32key)
|
||||
updated_otptoken.pop('secretkey')
|
||||
updated_otptoken['secretkey'] = b64key
|
||||
sanitize_strings.add(b32key)
|
||||
sanitize_strings.add(b64key)
|
||||
|
||||
return updated_otptoken, sanitize_strings
|
||||
|
||||
|
||||
def validate_modifications(ansible_to_ipa, module, ipa_otptoken,
|
||||
module_otptoken, unmodifiable_after_creation):
|
||||
"""Checks to see if the requested modifications are valid. Some elements
|
||||
cannot be modified after initial creation. However, we still want to
|
||||
validate arguments that are specified, but are not different than what
|
||||
is currently set on the server.
|
||||
"""
|
||||
|
||||
modifications_valid = True
|
||||
|
||||
for parameter in unmodifiable_after_creation:
|
||||
if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken:
|
||||
mod_value = module_otptoken[ansible_to_ipa[parameter]]
|
||||
|
||||
# For someone unknown reason, the returns from IPA put almost all
|
||||
# values in a list, even though passing them in a list (even of
|
||||
# length 1) will be rejected. The module values for all elements
|
||||
# other than type (totp or hotp) have this happen.
|
||||
if parameter == 'otptype':
|
||||
ipa_value = ipa_otptoken[ansible_to_ipa[parameter]]
|
||||
else:
|
||||
if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1:
|
||||
module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " +
|
||||
"of length 1. Please open a bug report for the module."))
|
||||
if parameter == 'secretkey':
|
||||
# We stored the secret key in base32 since we had assumed that would need to
|
||||
# be the format if we were contacting IPA to create it. However, we are
|
||||
# now comparing it against what is already set in the IPA server, so convert
|
||||
# back to base64 for comparison.
|
||||
mod_value = base32_to_base64(mod_value)
|
||||
|
||||
# For the secret key, it is even more specific in that the key is returned
|
||||
# in a dict, in the list, as the __base64__ entry for the IPA response.
|
||||
ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__']
|
||||
if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]:
|
||||
ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__']
|
||||
elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]:
|
||||
b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__']
|
||||
b64key = base32_to_base64(b32key)
|
||||
ipa_value = b64key
|
||||
else:
|
||||
ipa_value = None
|
||||
else:
|
||||
ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]
|
||||
|
||||
if mod_value != ipa_value:
|
||||
modifications_valid = False
|
||||
fail_message = ("Parameter '" + parameter + "' cannot be changed once " +
|
||||
"the OTP is created and the requested value specified here (" +
|
||||
str(mod_value) +
|
||||
") differs from what is set in the IPA server ("
|
||||
+ str(ipa_value) + ")")
|
||||
module.fail_json(msg=fail_message)
|
||||
|
||||
return modifications_valid
|
||||
|
||||
|
||||
def ensure(module, client):
|
||||
# dict to map from ansible parameter names to attribute names
|
||||
# used by IPA (which are not so friendly).
|
||||
ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid',
|
||||
'newuniqueid': 'rename',
|
||||
'otptype': 'type',
|
||||
'secretkey': 'ipatokenotpkey',
|
||||
'description': 'description',
|
||||
'owner': 'ipatokenowner',
|
||||
'enabled': 'ipatokendisabled',
|
||||
'notbefore': 'ipatokennotbefore',
|
||||
'notafter': 'ipatokennotafter',
|
||||
'vendor': 'ipatokenvendor',
|
||||
'model': 'ipatokenmodel',
|
||||
'serial': 'ipatokenserial',
|
||||
'algorithm': 'ipatokenotpalgorithm',
|
||||
'digits': 'ipatokenotpdigits',
|
||||
'offset': 'ipatokentotpclockoffset',
|
||||
'interval': 'ipatokentotptimestep',
|
||||
'counter': 'ipatokenhotpcounter'}
|
||||
|
||||
# Create inverse dictionary for mapping return values
|
||||
ipa_to_ansible = {}
|
||||
for (k, v) in ansible_to_ipa.items():
|
||||
ipa_to_ansible[v] = k
|
||||
|
||||
unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm',
|
||||
'digits', 'offset', 'interval', 'counter']
|
||||
state = module.params['state']
|
||||
uniqueid = module.params['uniqueid']
|
||||
|
||||
module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa,
|
||||
uniqueid=module.params.get('uniqueid'),
|
||||
newuniqueid=module.params.get('newuniqueid'),
|
||||
otptype=module.params.get('otptype'),
|
||||
secretkey=module.params.get('secretkey'),
|
||||
description=module.params.get('description'),
|
||||
owner=module.params.get('owner'),
|
||||
enabled=module.params.get('enabled'),
|
||||
notbefore=module.params.get('notbefore'),
|
||||
notafter=module.params.get('notafter'),
|
||||
vendor=module.params.get('vendor'),
|
||||
model=module.params.get('model'),
|
||||
serial=module.params.get('serial'),
|
||||
algorithm=module.params.get('algorithm'),
|
||||
digits=module.params.get('digits'),
|
||||
offset=module.params.get('offset'),
|
||||
interval=module.params.get('interval'),
|
||||
counter=module.params.get('counter'))
|
||||
|
||||
ipa_otptoken = client.otptoken_find(name=uniqueid)
|
||||
|
||||
if ansible_to_ipa['newuniqueid'] in module_otptoken:
|
||||
# Check to see if the new unique id is already taken in use
|
||||
ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']])
|
||||
if ipa_otptoken_new:
|
||||
module.fail_json(msg=("Requested rename through newuniqueid to " +
|
||||
module_otptoken[ansible_to_ipa['newuniqueid']] +
|
||||
" failed because the new unique id is already in use"))
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
if not ipa_otptoken:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
# It would not make sense to have a rename after creation, so if the user
|
||||
# specified a newuniqueid, just replace the uniqueid with the updated one
|
||||
# before creation
|
||||
if ansible_to_ipa['newuniqueid'] in module_otptoken:
|
||||
module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']]
|
||||
uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']]
|
||||
module_otptoken.pop(ansible_to_ipa['newuniqueid'])
|
||||
|
||||
# IPA wants the unique id in the first position and not as a key/value pair.
|
||||
# Get rid of it from the otptoken dict and just specify it in the name field
|
||||
# for otptoken_add.
|
||||
if ansible_to_ipa['uniqueid'] in module_otptoken:
|
||||
module_otptoken.pop(ansible_to_ipa['uniqueid'])
|
||||
|
||||
module_otptoken['all'] = True
|
||||
ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken)
|
||||
else:
|
||||
if not(validate_modifications(ansible_to_ipa, module, ipa_otptoken,
|
||||
module_otptoken, unmodifiable_after_creation)):
|
||||
module.fail_json(msg="Modifications requested in module are not valid")
|
||||
|
||||
# IPA will reject 'modifications' that do not actually modify anything
|
||||
# if any of the unmodifiable elements are specified. Explicitly
|
||||
# get rid of them here. They were not different or else the
|
||||
# we would have failed out in validate_modifications.
|
||||
for x in unmodifiable_after_creation:
|
||||
if ansible_to_ipa[x] in module_otptoken:
|
||||
module_otptoken.pop(ansible_to_ipa[x])
|
||||
|
||||
diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken)
|
||||
if len(diff) > 0:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
|
||||
# IPA wants the unique id in the first position and not as a key/value pair.
|
||||
# Get rid of it from the otptoken dict and just specify it in the name field
|
||||
# for otptoken_mod.
|
||||
if ansible_to_ipa['uniqueid'] in module_otptoken:
|
||||
module_otptoken.pop(ansible_to_ipa['uniqueid'])
|
||||
|
||||
module_otptoken['all'] = True
|
||||
ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken)
|
||||
else:
|
||||
if ipa_otptoken:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
client.otptoken_del(name=uniqueid)
|
||||
|
||||
# Transform the output to use ansible keywords (not the IPA keywords) and
|
||||
# sanitize any key values in the output.
|
||||
ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible)
|
||||
module.no_log_values = module.no_log_values.union(sanitize_strings)
|
||||
sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values)
|
||||
return changed, sanitized_otptoken
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ipa_argument_spec()
|
||||
argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True),
|
||||
newuniqueid=dict(type='str'),
|
||||
otptype=dict(type='str', choices=['totp', 'hotp']),
|
||||
secretkey=dict(type='str', no_log=True),
|
||||
description=dict(type='str'),
|
||||
owner=dict(type='str'),
|
||||
enabled=dict(type='bool', default=True),
|
||||
notbefore=dict(type='str'),
|
||||
notafter=dict(type='str'),
|
||||
vendor=dict(type='str'),
|
||||
model=dict(type='str'),
|
||||
serial=dict(type='str'),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']),
|
||||
digits=dict(type='int', choices=[6, 8]),
|
||||
offset=dict(type='int'),
|
||||
interval=dict(type='int'),
|
||||
counter=dict(type='int'))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
client = OTPTokenIPAClient(module=module,
|
||||
host=module.params['ipa_host'],
|
||||
port=module.params['ipa_port'],
|
||||
protocol=module.params['ipa_prot'])
|
||||
|
||||
try:
|
||||
client.login(username=module.params['ipa_user'],
|
||||
password=module.params['ipa_pass'])
|
||||
changed, otptoken = ensure(module, client)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(changed=changed, otptoken=otptoken)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -94,7 +94,8 @@ options:
|
||||
description:
|
||||
- The authentication type to use for the user.
|
||||
choices: ["password", "radius", "otp", "pkinit", "hardened"]
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '1.2.0'
|
||||
extends_documentation_fragment:
|
||||
- community.general.ipa.documentation
|
||||
@@ -146,11 +147,13 @@ EXAMPLES = r'''
|
||||
ipa_pass: topsecret
|
||||
update_password: on_create
|
||||
|
||||
- name: Ensure pinky is present and using one time password authentication
|
||||
- name: Ensure pinky is present and using one time password and RADIUS authentication
|
||||
community.general.ipa_user:
|
||||
name: pinky
|
||||
state: present
|
||||
userauthtype: otp
|
||||
userauthtype:
|
||||
- otp
|
||||
- radius
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
@@ -269,16 +272,18 @@ def get_user_diff(client, ipa_user, module_user):
|
||||
def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
|
||||
"""
|
||||
Return the public key fingerprint of a given public SSH key
|
||||
in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
|
||||
in format "[fp] [comment] (ssh-rsa)" where fp is of the format:
|
||||
FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
|
||||
for md5 or
|
||||
SHA256:[base64]
|
||||
for sha256
|
||||
Comments are assumed to be all characters past the second
|
||||
whitespace character in the sshpubkey string.
|
||||
:param ssh_key:
|
||||
:param hash_algo:
|
||||
:return:
|
||||
"""
|
||||
parts = ssh_key.strip().split()
|
||||
parts = ssh_key.strip().split(None, 2)
|
||||
if len(parts) == 0:
|
||||
return None
|
||||
key_type = parts[0]
|
||||
@@ -293,8 +298,8 @@ def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
|
||||
if len(parts) < 3:
|
||||
return "%s (%s)" % (key_fp, key_type)
|
||||
else:
|
||||
user_host = parts[2]
|
||||
return "%s %s (%s)" % (key_fp, user_host, key_type)
|
||||
comment = parts[2]
|
||||
return "%s %s (%s)" % (key_fp, comment, key_type)
|
||||
|
||||
|
||||
def ensure(module, client):
|
||||
@@ -361,7 +366,7 @@ def main():
|
||||
telephonenumber=dict(type='list', elements='str'),
|
||||
title=dict(type='str'),
|
||||
homedirectory=dict(type='str'),
|
||||
userauthtype=dict(type='str',
|
||||
userauthtype=dict(type='list', elements='str',
|
||||
choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
|
||||
1
plugins/modules/ipa_otpconfig.py
Symbolic link
1
plugins/modules/ipa_otpconfig.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./identity/ipa/ipa_otpconfig.py
|
||||
1
plugins/modules/ipa_otptoken.py
Symbolic link
1
plugins/modules/ipa_otptoken.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./identity/ipa/ipa_otptoken.py
|
||||
528
plugins/modules/monitoring/spectrum_model_attrs.py
Normal file
528
plugins/modules/monitoring/spectrum_model_attrs.py
Normal file
@@ -0,0 +1,528 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2021, Tyler Gates <tgates81@gmail.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: spectrum_model_attrs
|
||||
short_description: Enforce a model's attributes in CA Spectrum.
|
||||
description:
|
||||
- This module can be used to enforce a model's attributes in CA Spectrum.
|
||||
version_added: 2.5.0
|
||||
author:
|
||||
- Tyler Gates (@tgates81)
|
||||
notes:
|
||||
- Tested on CA Spectrum version 10.4.2.0.189.
|
||||
- Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead.
|
||||
requirements:
|
||||
- 'python >= 2.7'
|
||||
options:
|
||||
url:
|
||||
description:
|
||||
- URL of OneClick server.
|
||||
type: str
|
||||
required: true
|
||||
url_username:
|
||||
description:
|
||||
- OneClick username.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [username]
|
||||
url_password:
|
||||
description:
|
||||
- OneClick password.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [password]
|
||||
use_proxy:
|
||||
description:
|
||||
- if C(no), it will not use a proxy, even if one is defined in
|
||||
an environment variable on the target hosts.
|
||||
default: yes
|
||||
required: false
|
||||
type: bool
|
||||
name:
|
||||
description:
|
||||
- Model name.
|
||||
type: str
|
||||
required: true
|
||||
type:
|
||||
description:
|
||||
- Model type.
|
||||
type: str
|
||||
required: true
|
||||
validate_certs:
|
||||
description:
|
||||
- Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no
|
||||
man-in-the-middle attack happening.
|
||||
type: bool
|
||||
default: yes
|
||||
required: false
|
||||
attributes:
|
||||
description:
|
||||
- A list of attribute names and values to enforce.
|
||||
- All values and parameters are case sensitive and must be provided as strings only.
|
||||
required: true
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Attribute name OR hex ID.
|
||||
- 'Currently defined names are:'
|
||||
- ' C(App_Manufacturer) (C(0x230683))'
|
||||
- ' C(CollectionsModelNameString) (C(0x12adb))'
|
||||
- ' C(Condition) (C(0x1000a))'
|
||||
- ' C(Criticality) (C(0x1290c))'
|
||||
- ' C(DeviceType) (C(0x23000e))'
|
||||
- ' C(isManaged) (C(0x1295d))'
|
||||
- ' C(Model_Class) (C(0x11ee8))'
|
||||
- ' C(Model_Handle) (C(0x129fa))'
|
||||
- ' C(Model_Name) (C(0x1006e))'
|
||||
- ' C(Modeltype_Handle) (C(0x10001))'
|
||||
- ' C(Modeltype_Name) (C(0x10000))'
|
||||
- ' C(Network_Address) (C(0x12d7f))'
|
||||
- ' C(Notes) (C(0x11564))'
|
||||
- ' C(ServiceDesk_Asset_ID) (C(0x12db9))'
|
||||
- ' C(TopologyModelNameString) (C(0x129e7))'
|
||||
- ' C(sysDescr) (C(0x10052))'
|
||||
- ' C(sysName) (C(0x10b5b))'
|
||||
- ' C(Vendor_Name) (C(0x11570))'
|
||||
- ' C(Description) (C(0x230017))'
|
||||
- Hex IDs are the direct identifiers in Spectrum and will always work.
|
||||
- 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> <enter any model> -> Attributes tab.'
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- Attribute value. Empty strings should be C("") or C(null).
|
||||
type: str
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Enforce maintenance mode for modelxyz01 with a note about why
|
||||
community.general.spectrum_model_attrs:
|
||||
url: "http://oneclick.url.com"
|
||||
username: "{{ oneclick_username }}"
|
||||
password: "{{ oneclick_password }}"
|
||||
name: "modelxyz01"
|
||||
type: "Host_Device"
|
||||
validate_certs: true
|
||||
attributes:
|
||||
- name: "isManaged"
|
||||
value: "false"
|
||||
- name: "Notes"
|
||||
value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}"
|
||||
delegate_to: localhost
|
||||
register: spectrum_model_attrs_status
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
description: Informational message on the job result.
|
||||
type: str
|
||||
returned: always
|
||||
sample: 'Success'
|
||||
changed_attrs:
|
||||
description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values.
|
||||
type: dict
|
||||
returned: always
|
||||
sample: {
|
||||
"Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates",
|
||||
"isManaged": "true"
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
import json
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
|
||||
class spectrum_model_attrs:
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = module.params['url']
|
||||
# If the user did not define a full path to the restul space in url:
|
||||
# params, add what we believe it to be.
|
||||
if not re.search('\\/.+', self.url.split('://')[1]):
|
||||
self.url = "%s/spectrum/restful" % self.url.rstrip('/')
|
||||
# Align these with what is defined in OneClick's UI under:
|
||||
# Locator -> Devices -> By Model Name -> <enter any model> ->
|
||||
# Attributes tab.
|
||||
self.attr_map = dict(App_Manufacturer=hex(0x230683),
|
||||
CollectionsModelNameString=hex(0x12adb),
|
||||
Condition=hex(0x1000a),
|
||||
Criticality=hex(0x1290c),
|
||||
DeviceType=hex(0x23000e),
|
||||
isManaged=hex(0x1295d),
|
||||
Model_Class=hex(0x11ee8),
|
||||
Model_Handle=hex(0x129fa),
|
||||
Model_Name=hex(0x1006e),
|
||||
Modeltype_Handle=hex(0x10001),
|
||||
Modeltype_Name=hex(0x10000),
|
||||
Network_Address=hex(0x12d7f),
|
||||
Notes=hex(0x11564),
|
||||
ServiceDesk_Asset_ID=hex(0x12db9),
|
||||
TopologyModelNameString=hex(0x129e7),
|
||||
sysDescr=hex(0x10052),
|
||||
sysName=hex(0x10b5b),
|
||||
Vendor_Name=hex(0x11570),
|
||||
Description=hex(0x230017))
|
||||
self.search_qualifiers = [
|
||||
"and", "or", "not", "greater-than", "greater-than-or-equals",
|
||||
"less-than", "less-than-or-equals", "equals", "equals-ignore-case",
|
||||
"does-not-equal", "does-not-equal-ignore-case", "has-prefix",
|
||||
"does-not-have-prefix", "has-prefix-ignore-case",
|
||||
"does-not-have-prefix-ignore-case", "has-substring",
|
||||
"does-not-have-substring", "has-substring-ignore-case",
|
||||
"does-not-have-substring-ignore-case", "has-suffix",
|
||||
"does-not-have-suffix", "has-suffix-ignore-case",
|
||||
"does-not-have-suffix-ignore-case", "has-pcre",
|
||||
"has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case",
|
||||
"is-derived-from", "not-is-derived-from"]
|
||||
|
||||
self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response")
|
||||
|
||||
self.result = dict(msg="", changed_attrs=dict())
|
||||
self.success_msg = "Success"
|
||||
|
||||
def build_url(self, path):
|
||||
"""
|
||||
Build a sane Spectrum restful API URL
|
||||
:param path: The path to append to the restful base
|
||||
:type path: str
|
||||
:returns: Complete restful API URL
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/'))
|
||||
|
||||
def attr_id(self, name):
|
||||
"""
|
||||
Get attribute hex ID
|
||||
:param name: The name of the attribute to retrieve the hex ID for
|
||||
:type name: str
|
||||
:returns: Translated hex ID of name, or None if no translation found
|
||||
:rtype: str or None
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.attr_map[name]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def attr_name(self, _id):
|
||||
"""
|
||||
Get attribute name from hex ID
|
||||
:param _id: The hex ID to lookup a name for
|
||||
:type _id: str
|
||||
:returns: Translated name of hex ID, or None if no translation found
|
||||
:rtype: str or None
|
||||
"""
|
||||
|
||||
for name, m_id in list(self.attr_map.items()):
|
||||
if _id == m_id:
|
||||
return name
|
||||
return None
|
||||
|
||||
def urlencode(self, string):
|
||||
"""
|
||||
URL Encode a string
|
||||
:param: string: The string to URL encode
|
||||
:type string: str
|
||||
:returns: URL encode version of supplied string
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
return quote(string, "<>%-_.!*'():?#/@&+,;=")
|
||||
|
||||
def update_model(self, model_handle, attrs):
|
||||
"""
|
||||
Update a model's attributes
|
||||
:param model_handle: The model's handle ID
|
||||
:type model_handle: str
|
||||
:param attrs: Model's attributes to update. {'<name/id>': '<attr>'}
|
||||
:type attrs: dict
|
||||
:returns: Nothing; exits on error or updates self.results
|
||||
:rtype: None
|
||||
"""
|
||||
|
||||
# Build the update URL
|
||||
update_url = self.build_url("/model/%s?" % model_handle)
|
||||
for name, val in list(attrs.items()):
|
||||
if val is None:
|
||||
# None values should be converted to empty strings
|
||||
val = ""
|
||||
val = self.urlencode(str(val))
|
||||
if not update_url.endswith('?'):
|
||||
update_url += "&"
|
||||
|
||||
update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val)
|
||||
|
||||
# POST to /model to update the attributes, or fail.
|
||||
resp, info = fetch_url(self.module, update_url, method="PUT",
|
||||
headers={"Content-Type": "application/json",
|
||||
"Accept": "application/json"},
|
||||
use_proxy=self.module.params['use_proxy'])
|
||||
status_code = info["status"]
|
||||
if status_code >= 400:
|
||||
body = info['body']
|
||||
else:
|
||||
body = "" if resp is None else resp.read()
|
||||
if status_code != 200:
|
||||
self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body)
|
||||
self.module.fail_json(**self.result)
|
||||
|
||||
# Load and parse the JSON response and either fail or set results.
|
||||
json_resp = json.loads(body)
|
||||
"""
|
||||
Example success response:
|
||||
{'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}"
|
||||
Example failure response:
|
||||
{'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}}
|
||||
""" # noqa
|
||||
model_resp = json_resp['model-update-response-list']['model-responses']['model']
|
||||
if model_resp['@error'] != "Success":
|
||||
# I'm not 100% confident on the expected failure structure so just
|
||||
# dump all of ['attribute'].
|
||||
self.result['msg'] = str(model_resp['attribute'])
|
||||
self.module.fail_json(**self.result)
|
||||
|
||||
# Should be OK if we get to here, set results.
|
||||
self.result['msg'] = self.success_msg
|
||||
self.result['changed_attrs'].update(attrs)
|
||||
self.result['changed'] = True
|
||||
|
||||
def find_model(self, search_criteria, ret_attrs=None):
|
||||
"""
|
||||
Search for a model in /models
|
||||
:param search_criteria: The XML <rs:search-criteria>
|
||||
:type search_criteria: str
|
||||
:param ret_attrs: List of attributes by name or ID to return back
|
||||
(default is Model_Handle)
|
||||
:type ret_attrs: list
|
||||
returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val}
|
||||
rtype: dict
|
||||
"""
|
||||
|
||||
# If no return attributes were asked for, return Model_Handle.
|
||||
if ret_attrs is None:
|
||||
ret_attrs = ['Model_Handle']
|
||||
|
||||
# Set the XML <rs:requested-attribute id=<id>> tags. If no hex ID
|
||||
# is found for the name, assume it is already in hex. {name: hex ID}
|
||||
rqstd_attrs = ""
|
||||
for ra in ret_attrs:
|
||||
_id = self.attr_id(ra) or ra
|
||||
rqstd_attrs += '<rs:requested-attribute id="%s" />' % (self.attr_id(ra) or ra)
|
||||
|
||||
# Build the complete XML search query for HTTP POST.
|
||||
xml = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rs:model-request throttlesize="5"
|
||||
xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
|
||||
<rs:target-models>
|
||||
<rs:models-search>
|
||||
<rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
|
||||
{0}
|
||||
</rs:search-criteria>
|
||||
</rs:models-search>
|
||||
</rs:target-models>
|
||||
{1}
|
||||
</rs:model-request>
|
||||
""".format(search_criteria, rqstd_attrs)
|
||||
|
||||
# POST to /models and fail on errors.
|
||||
url = self.build_url("/models")
|
||||
resp, info = fetch_url(self.module, url, data=xml, method="POST",
|
||||
use_proxy=self.module.params['use_proxy'],
|
||||
headers={"Content-Type": "application/xml",
|
||||
"Accept": "application/xml"})
|
||||
status_code = info["status"]
|
||||
if status_code >= 400:
|
||||
body = info['body']
|
||||
else:
|
||||
body = "" if resp is None else resp.read()
|
||||
if status_code != 200:
|
||||
self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body)
|
||||
self.module.fail_json(**self.result)
|
||||
|
||||
# Parse through the XML response and fail on any detected errors.
|
||||
root = ET.fromstring(body)
|
||||
total_models = int(root.attrib['total-models'])
|
||||
error = root.attrib['error']
|
||||
model_responses = root.find('ca:model-responses', self.resp_namespace)
|
||||
if total_models < 1:
|
||||
self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria
|
||||
self.module.fail_json(**self.result)
|
||||
elif total_models > 1:
|
||||
self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses,
|
||||
encoding='unicode'))
|
||||
self.module.fail_json(**self.result)
|
||||
if error != "EndOfResults":
|
||||
self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses,
|
||||
encoding='unicode'))
|
||||
self.module.fail_json(**self.result)
|
||||
model = model_responses.find('ca:model', self.resp_namespace)
|
||||
attrs = model.findall('ca:attribute', self.resp_namespace)
|
||||
if not attrs:
|
||||
self.result['msg'] = "No attributes returned."
|
||||
self.module.fail_json(**self.result)
|
||||
|
||||
# XML response should be successful. Iterate and set each returned
|
||||
# attribute ID/name and value for return.
|
||||
ret = dict()
|
||||
for attr in attrs:
|
||||
attr_id = attr.get('id')
|
||||
attr_name = self.attr_name(attr_id)
|
||||
# Note: all values except empty strings (None) are strings only!
|
||||
attr_val = attr.text
|
||||
key = attr_name if attr_name in ret_attrs else attr_id
|
||||
ret[key] = attr_val
|
||||
ret_attrs.remove(key)
|
||||
return ret
|
||||
|
||||
def find_model_by_name_type(self, mname, mtype, ret_attrs=None):
|
||||
"""
|
||||
Find a model by name and type
|
||||
:param mname: Model name
|
||||
:type mname: str
|
||||
:param mtype: Model type
|
||||
:type mtype: str
|
||||
:param ret_attrs: List of attributes by name or ID to return back
|
||||
(default is Model_Handle)
|
||||
:type ret_attrs: list
|
||||
returns: find_model(): Dictionary mapping of ret_attrs to values:
|
||||
{ret_attr: ret_val}
|
||||
rtype: dict
|
||||
"""
|
||||
|
||||
# If no return attributes were asked for, return Model_Handle.
|
||||
if ret_attrs is None:
|
||||
ret_attrs = ['Model_Handle']
|
||||
|
||||
"""This is basically as follows:
|
||||
<filtered-models>
|
||||
<and>
|
||||
<equals>
|
||||
<attribute id=...>
|
||||
<value>...</value>
|
||||
</attribute>
|
||||
</equals>
|
||||
<equals>
|
||||
<attribute...>
|
||||
</equals>
|
||||
</and>
|
||||
</filtered-models>
|
||||
"""
|
||||
|
||||
# Parent filter tag
|
||||
filtered_models = ET.Element('filtered-models')
|
||||
# Logically and
|
||||
_and = ET.SubElement(filtered_models, 'and')
|
||||
|
||||
# Model Name
|
||||
MN_equals = ET.SubElement(_and, 'equals')
|
||||
Model_Name = ET.SubElement(MN_equals, 'attribute',
|
||||
{'id': self.attr_map['Model_Name']})
|
||||
MN_value = ET.SubElement(Model_Name, 'value')
|
||||
MN_value.text = mname
|
||||
|
||||
# Model Type Name
|
||||
MTN_equals = ET.SubElement(_and, 'equals')
|
||||
Modeltype_Name = ET.SubElement(MTN_equals, 'attribute',
|
||||
{'id': self.attr_map['Modeltype_Name']})
|
||||
MTN_value = ET.SubElement(Modeltype_Name, 'value')
|
||||
MTN_value.text = mtype
|
||||
|
||||
return self.find_model(ET.tostring(filtered_models,
|
||||
encoding='unicode'),
|
||||
ret_attrs)
|
||||
|
||||
def ensure_model_attrs(self):
|
||||
|
||||
# Get a list of all requested attribute names/IDs plus Model_Handle and
|
||||
# use them to query the values currently set. Store finding in a
|
||||
# dictionary.
|
||||
req_attrs = []
|
||||
for attr in self.module.params['attributes']:
|
||||
req_attrs.append(attr['name'])
|
||||
if 'Model_Handle' not in req_attrs:
|
||||
req_attrs.append('Model_Handle')
|
||||
|
||||
# Survey attributes currently set and store in a dict.
|
||||
cur_attrs = self.find_model_by_name_type(self.module.params['name'],
|
||||
self.module.params['type'],
|
||||
req_attrs)
|
||||
|
||||
# Iterate through the requested attributes names/IDs values pair and
|
||||
# compare with those currently set. If different, attempt to change.
|
||||
Model_Handle = cur_attrs.pop("Model_Handle")
|
||||
for attr in self.module.params['attributes']:
|
||||
req_name = attr['name']
|
||||
req_val = attr['value']
|
||||
if req_val == "":
|
||||
# The API will return None on empty string
|
||||
req_val = None
|
||||
if cur_attrs[req_name] != req_val:
|
||||
if self.module.check_mode:
|
||||
self.result['changed_attrs'][req_name] = req_val
|
||||
self.result['msg'] = self.success_msg
|
||||
self.result['changed'] = True
|
||||
continue
|
||||
resp = self.update_model(Model_Handle, {req_name: req_val})
|
||||
|
||||
self.module.exit_json(**self.result)
|
||||
|
||||
|
||||
def run_module():
|
||||
argument_spec = dict(
|
||||
url=dict(type='str', required=True),
|
||||
url_username=dict(type='str', required=True, aliases=['username']),
|
||||
url_password=dict(type='str', required=True, aliases=['password'],
|
||||
no_log=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
use_proxy=dict(type='bool', default=True),
|
||||
name=dict(type='str', required=True),
|
||||
type=dict(type='str', required=True),
|
||||
attributes=dict(type='list',
|
||||
required=True,
|
||||
elements='dict',
|
||||
options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str', required=True)
|
||||
)),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
supports_check_mode=True,
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
try:
|
||||
sm = spectrum_model_attrs(module)
|
||||
sm.ensure_model_attrs()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to ensure attribute(s) on `%s' with "
|
||||
"exception: %s" % (module.params['name'],
|
||||
to_native(e)))
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -150,7 +150,7 @@ EXAMPLES = r'''
|
||||
backend: www
|
||||
wait: yes
|
||||
drain: yes
|
||||
wait_interval: 1
|
||||
wait_interval: 60
|
||||
wait_retries: 60
|
||||
|
||||
- name: Disable backend server in 'www' backend pool and drop open sessions to it
|
||||
@@ -367,10 +367,9 @@ class HAProxy(object):
|
||||
# We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
|
||||
# When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
|
||||
if status in state[0]['status']:
|
||||
if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state):
|
||||
if not self._drain or state[0]['scur'] == '0':
|
||||
return True
|
||||
else:
|
||||
time.sleep(self.wait_interval)
|
||||
time.sleep(self.wait_interval)
|
||||
|
||||
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
|
||||
(pxname, svname, status, self.wait_retries))
|
||||
@@ -409,15 +408,17 @@ class HAProxy(object):
|
||||
def drain(self, host, backend, status='DRAIN'):
|
||||
"""
|
||||
Drain action, sets the server to DRAIN mode.
|
||||
In this mode mode, the server will not accept any new connections
|
||||
In this mode, the server will not accept any new connections
|
||||
other than those that are accepted via persistence.
|
||||
"""
|
||||
haproxy_version = self.discover_version()
|
||||
|
||||
# check if haproxy version suppots DRAIN state (starting with 1.5)
|
||||
# check if haproxy version supports DRAIN state (starting with 1.5)
|
||||
if haproxy_version and (1, 5) <= haproxy_version:
|
||||
cmd = "set server $pxname/$svname state drain"
|
||||
self.execute_for_backends(cmd, backend, host, status)
|
||||
self.execute_for_backends(cmd, backend, host, "DRAIN")
|
||||
if status == "MAINT":
|
||||
self.disabled(host, backend, self.shutdown_sessions)
|
||||
|
||||
def act(self):
|
||||
"""
|
||||
@@ -426,7 +427,7 @@ class HAProxy(object):
|
||||
# Get the state before the run
|
||||
self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
|
||||
|
||||
# toggle enable/disbale server
|
||||
# toggle enable/disable server
|
||||
if self.state == 'enabled':
|
||||
self.enabled(self.host, self.backend, self.weight)
|
||||
elif self.state == 'disabled' and self._drain:
|
||||
|
||||
@@ -205,9 +205,11 @@ class ResourceRecord(object):
|
||||
def list_record(self, record):
|
||||
# check if the record exists via list on ipwcli
|
||||
search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
|
||||
cmd = [self.module.get_bin_path('ipwcli', True)]
|
||||
cmd.append('-user=%s' % (self.user))
|
||||
cmd.append('-password=%s' % (self.password))
|
||||
cmd = [
|
||||
self.module.get_bin_path('ipwcli', True),
|
||||
'-user=%s' % self.user,
|
||||
'-password=%s' % self.password,
|
||||
]
|
||||
rc, out, err = self.module.run_command(cmd, data=search)
|
||||
|
||||
if 'Invalid username or password' in out:
|
||||
@@ -222,9 +224,11 @@ class ResourceRecord(object):
|
||||
def deploy_record(self, record):
|
||||
# check what happens if create fails on ipworks
|
||||
stdin = 'create %s' % (record)
|
||||
cmd = [self.module.get_bin_path('ipwcli', True)]
|
||||
cmd.append('-user=%s' % (self.user))
|
||||
cmd.append('-password=%s' % (self.password))
|
||||
cmd = [
|
||||
self.module.get_bin_path('ipwcli', True),
|
||||
'-user=%s' % self.user,
|
||||
'-password=%s' % self.password,
|
||||
]
|
||||
rc, out, err = self.module.run_command(cmd, data=stdin)
|
||||
|
||||
if 'Invalid username or password' in out:
|
||||
@@ -238,9 +242,11 @@ class ResourceRecord(object):
|
||||
def delete_record(self, record):
|
||||
# check what happens if create fails on ipworks
|
||||
stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
|
||||
cmd = [self.module.get_bin_path('ipwcli', True)]
|
||||
cmd.append('-user=%s' % (self.user))
|
||||
cmd.append('-password=%s' % (self.password))
|
||||
cmd = [
|
||||
self.module.get_bin_path('ipwcli', True),
|
||||
'-user=%s' % self.user,
|
||||
'-password=%s' % self.password,
|
||||
]
|
||||
rc, out, err = self.module.run_command(cmd, data=stdin)
|
||||
|
||||
if 'Invalid username or password' in out:
|
||||
|
||||
@@ -255,7 +255,7 @@ def main():
|
||||
has_changed = True
|
||||
|
||||
except Exception as ex:
|
||||
module.fail_json(msg=ex.message)
|
||||
module.fail_json(msg=str(ex))
|
||||
|
||||
module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
|
||||
|
||||
|
||||
@@ -752,6 +752,7 @@ class Nmcli(object):
|
||||
})
|
||||
elif self.type == 'bridge-slave':
|
||||
options.update({
|
||||
'connection.slave-type': 'bridge',
|
||||
'bridge-port.path-cost': self.path_cost,
|
||||
'bridge-port.hairpin-mode': self.hairpin,
|
||||
'bridge-port.priority': self.slavepriority,
|
||||
@@ -1005,7 +1006,6 @@ class Nmcli(object):
|
||||
'con-name': 'connection.id',
|
||||
'autoconnect': 'connection.autoconnect',
|
||||
'ifname': 'connection.interface-name',
|
||||
'mac': self.mac_setting,
|
||||
'master': 'connection.master',
|
||||
'slave-type': 'connection.slave-type',
|
||||
'zone': 'connection.zone',
|
||||
@@ -1029,6 +1029,11 @@ class Nmcli(object):
|
||||
current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+),\s*mt\s*=\s*([^} ]+)\s*}', r'\1 \2 \3',
|
||||
route) for route in current_value]
|
||||
current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+)\s*}', r'\1 \2', route) for route in current_value]
|
||||
if key == self.mac_setting:
|
||||
# MAC addresses are case insensitive, nmcli always reports them in uppercase
|
||||
value = value.upper()
|
||||
# ensure current_value is also converted to uppercase in case nmcli changes behaviour
|
||||
current_value = current_value.upper()
|
||||
elif key in param_alias:
|
||||
real_key = param_alias[key]
|
||||
if real_key in conn_info:
|
||||
|
||||
0
plugins/modules/net_tools/pritunl/__init__.py
Normal file
0
plugins/modules/net_tools/pritunl/__init__.py
Normal file
199
plugins/modules/net_tools/pritunl/pritunl_org.py
Normal file
199
plugins/modules/net_tools/pritunl/pritunl_org.py
Normal file
@@ -0,0 +1,199 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: pritunl_org
|
||||
author: Florian Dambrine (@Lowess)
|
||||
version_added: 2.5.0
|
||||
short_description: Manages Pritunl Organizations using the Pritunl API
|
||||
description:
|
||||
- A module to manage Pritunl organizations using the Pritunl API.
|
||||
extends_documentation_fragment:
|
||||
- community.general.pritunl
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- org
|
||||
description:
|
||||
- The name of the organization to manage in Pritunl.
|
||||
|
||||
force:
|
||||
type: bool
|
||||
default: false
|
||||
description:
|
||||
- If I(force) is C(true) and I(state) is C(absent), the module
|
||||
will delete the organization, no matter if it contains users
|
||||
or not. By default I(force) is C(false), which will cause the
|
||||
module to fail the deletion of the organization when it contains
|
||||
users.
|
||||
|
||||
state:
|
||||
type: str
|
||||
default: 'present'
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
description:
|
||||
- If C(present), the module adds organization I(name) to
|
||||
Pritunl. If C(absent), attempt to delete the organization
|
||||
from Pritunl (please read about I(force) usage).
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Ensure the organization named MyOrg exists
|
||||
community.general.pritunl_org:
|
||||
state: present
|
||||
name: MyOrg
|
||||
|
||||
- name: Ensure the organization named MyOrg does not exist
|
||||
community.general.pritunl_org:
|
||||
state: absent
|
||||
name: MyOrg
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
response:
|
||||
description: JSON representation of a Pritunl Organization.
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"auth_api": False,
|
||||
"name": "Foo",
|
||||
"auth_token": None,
|
||||
"user_count": 0,
|
||||
"auth_secret": None,
|
||||
"id": "csftwlu6uhralzi2dpmhekz3",
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
|
||||
PritunlException,
|
||||
delete_pritunl_organization,
|
||||
post_pritunl_organization,
|
||||
list_pritunl_organizations,
|
||||
get_pritunl_settings,
|
||||
pritunl_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def add_pritunl_organization(module):
|
||||
result = {}
|
||||
|
||||
org_name = module.params.get("name")
|
||||
|
||||
org_obj_list = list_pritunl_organizations(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{"filters": {"name": org_name}},
|
||||
)
|
||||
)
|
||||
|
||||
# If the organization already exists
|
||||
if len(org_obj_list) > 0:
|
||||
result["changed"] = False
|
||||
result["response"] = org_obj_list[0]
|
||||
else:
|
||||
# Otherwise create it
|
||||
response = post_pritunl_organization(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{"organization_name": org_name},
|
||||
)
|
||||
)
|
||||
result["changed"] = True
|
||||
result["response"] = response
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def remove_pritunl_organization(module):
|
||||
result = {}
|
||||
|
||||
org_name = module.params.get("name")
|
||||
force = module.params.get("force")
|
||||
|
||||
org_obj_list = []
|
||||
|
||||
org_obj_list = list_pritunl_organizations(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"filters": {"name": org_name},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# No organization found
|
||||
if len(org_obj_list) == 0:
|
||||
result["changed"] = False
|
||||
result["response"] = {}
|
||||
|
||||
else:
|
||||
# Otherwise attempt to delete it
|
||||
org = org_obj_list[0]
|
||||
|
||||
# Only accept deletion under specific conditions
|
||||
if force or org["user_count"] == 0:
|
||||
response = delete_pritunl_organization(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{"organization_id": org["id"]},
|
||||
)
|
||||
)
|
||||
result["changed"] = True
|
||||
result["response"] = response
|
||||
else:
|
||||
module.fail_json(
|
||||
msg=(
|
||||
"Can not remove organization '%s' with %d attached users. "
|
||||
"Either set 'force' option to true or remove active users "
|
||||
"from the organization"
|
||||
)
|
||||
% (org_name, org["user_count"])
|
||||
)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pritunl_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(required=True, type="str", aliases=["org"]),
|
||||
force=dict(required=False, type="bool", default=False),
|
||||
state=dict(
|
||||
required=False, choices=["present", "absent"], default="present"
|
||||
),
|
||||
)
|
||||
),
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
state = module.params.get("state")
|
||||
|
||||
try:
|
||||
if state == "present":
|
||||
add_pritunl_organization(module)
|
||||
elif state == "absent":
|
||||
remove_pritunl_organization(module)
|
||||
except PritunlException as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
129
plugins/modules/net_tools/pritunl/pritunl_org_info.py
Normal file
129
plugins/modules/net_tools/pritunl/pritunl_org_info.py
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: pritunl_org_info
|
||||
author: Florian Dambrine (@Lowess)
|
||||
version_added: 2.5.0
|
||||
short_description: List Pritunl Organizations using the Pritunl API
|
||||
description:
|
||||
- A module to list Pritunl organizations using the Pritunl API.
|
||||
extends_documentation_fragment:
|
||||
- community.general.pritunl
|
||||
options:
|
||||
organization:
|
||||
type: str
|
||||
required: false
|
||||
aliases:
|
||||
- org
|
||||
default: null
|
||||
description:
|
||||
- Name of the Pritunl organization to search for.
|
||||
If none provided, the module will return all Pritunl
|
||||
organizations.
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: List all existing Pritunl organizations
|
||||
community.general.pritunl_org_info:
|
||||
|
||||
- name: Search for an organization named MyOrg
|
||||
community.general.pritunl_user_info:
|
||||
organization: MyOrg
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
organizations:
|
||||
description: List of Pritunl organizations.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"auth_api": False,
|
||||
"name": "FooOrg",
|
||||
"auth_token": None,
|
||||
"user_count": 0,
|
||||
"auth_secret": None,
|
||||
"id": "csftwlu6uhralzi2dpmhekz3",
|
||||
},
|
||||
{
|
||||
"auth_api": False,
|
||||
"name": "MyOrg",
|
||||
"auth_token": None,
|
||||
"user_count": 3,
|
||||
"auth_secret": None,
|
||||
"id": "58070daee63f3b2e6e472c36",
|
||||
},
|
||||
{
|
||||
"auth_api": False,
|
||||
"name": "BarOrg",
|
||||
"auth_token": None,
|
||||
"user_count": 0,
|
||||
"auth_secret": None,
|
||||
"id": "v1sncsxxybnsylc8gpqg85pg",
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
|
||||
PritunlException,
|
||||
get_pritunl_settings,
|
||||
list_pritunl_organizations,
|
||||
pritunl_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_organizations(module):
|
||||
org_name = module.params.get("organization")
|
||||
|
||||
organizations = []
|
||||
|
||||
organizations = list_pritunl_organizations(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{"filters": {"name": org_name} if org_name else None},
|
||||
)
|
||||
)
|
||||
|
||||
if org_name and len(organizations) == 0:
|
||||
# When an org_name is provided but no organization match return an error
|
||||
module.fail_json(msg="Organization '%s' does not exist" % org_name)
|
||||
|
||||
result = {}
|
||||
result["changed"] = False
|
||||
result["organizations"] = organizations
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pritunl_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
dict(
|
||||
organization=dict(required=False, type="str", default=None, aliases=["org"])
|
||||
)
|
||||
),
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
try:
|
||||
get_pritunl_organizations(module)
|
||||
except PritunlException as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -169,7 +169,7 @@ def has_changed(string):
|
||||
|
||||
def get_available_options(module, command='install'):
|
||||
# get all available options from a composer command using composer help to json
|
||||
rc, out, err = composer_command(module, "help %s --format=json" % command)
|
||||
rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json")
|
||||
if rc != 0:
|
||||
output = parse_out(err)
|
||||
module.fail_json(msg=output)
|
||||
|
||||
@@ -82,6 +82,12 @@ options:
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 2.0.0
|
||||
no_bin_links:
|
||||
description:
|
||||
- Use the C(--no-bin-links) flag when installing.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 2.5.0
|
||||
requirements:
|
||||
- npm installed in bin path (recommended /usr/local/bin)
|
||||
'''
|
||||
@@ -151,6 +157,7 @@ class Npm(object):
|
||||
self.unsafe_perm = kwargs['unsafe_perm']
|
||||
self.state = kwargs['state']
|
||||
self.no_optional = kwargs['no_optional']
|
||||
self.no_bin_links = kwargs['no_bin_links']
|
||||
|
||||
if kwargs['executable']:
|
||||
self.executable = kwargs['executable'].split(' ')
|
||||
@@ -181,6 +188,8 @@ class Npm(object):
|
||||
cmd.append(self.registry)
|
||||
if self.no_optional:
|
||||
cmd.append('--no-optional')
|
||||
if self.no_bin_links:
|
||||
cmd.append('--no-bin-links')
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
@@ -259,6 +268,7 @@ def main():
|
||||
unsafe_perm=dict(default=False, type='bool'),
|
||||
ci=dict(default=False, type='bool'),
|
||||
no_optional=dict(default=False, type='bool'),
|
||||
no_bin_links=dict(default=False, type='bool'),
|
||||
)
|
||||
arg_spec['global'] = dict(default=False, type='bool')
|
||||
module = AnsibleModule(
|
||||
@@ -278,6 +288,7 @@ def main():
|
||||
unsafe_perm = module.params['unsafe_perm']
|
||||
ci = module.params['ci']
|
||||
no_optional = module.params['no_optional']
|
||||
no_bin_links = module.params['no_bin_links']
|
||||
|
||||
if not path and not glbl:
|
||||
module.fail_json(msg='path must be specified when not using global')
|
||||
@@ -286,7 +297,7 @@ def main():
|
||||
|
||||
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
|
||||
executable=executable, registry=registry, ignore_scripts=ignore_scripts,
|
||||
unsafe_perm=unsafe_perm, state=state, no_optional=no_optional)
|
||||
unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)
|
||||
|
||||
changed = False
|
||||
if ci:
|
||||
|
||||
@@ -127,6 +127,11 @@ EXAMPLES = '''
|
||||
state: present
|
||||
install_options: with-baz,enable-debug
|
||||
|
||||
- name: Install formula foo with 'brew' from cask
|
||||
community.general.homebrew:
|
||||
name: homebrew/cask/foo
|
||||
state: present
|
||||
|
||||
- name: Use ignored-pinned option while upgrading all
|
||||
community.general.homebrew:
|
||||
upgrade_all: yes
|
||||
|
||||
@@ -130,7 +130,7 @@ def packages_not_latest(module, names, site, update_catalog):
|
||||
cmd.append('-U')
|
||||
cmd.append('-c')
|
||||
if site is not None:
|
||||
cmd.extend('-t', site)
|
||||
cmd.extend(['-t', site])
|
||||
if names != ['*']:
|
||||
cmd.extend(names)
|
||||
rc, out, err = run_command(module, cmd)
|
||||
@@ -159,7 +159,7 @@ def package_install(module, state, pkgs, site, update_catalog, force):
|
||||
if update_catalog:
|
||||
cmd.append('-U')
|
||||
if site is not None:
|
||||
cmd.extend('-t', site)
|
||||
cmd.extend(['-t', site])
|
||||
if force:
|
||||
cmd.append('-f')
|
||||
cmd.extend(pkgs)
|
||||
@@ -174,7 +174,7 @@ def package_upgrade(module, pkgs, site, update_catalog, force):
|
||||
if update_catalog:
|
||||
cmd.append('-U')
|
||||
if site is not None:
|
||||
cmd.extend('-t', site)
|
||||
cmd.extend(['-t', site])
|
||||
if force:
|
||||
cmd.append('-f')
|
||||
cmd += pkgs
|
||||
|
||||
@@ -56,9 +56,9 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
import re
|
||||
|
||||
# Matches release-like values such as 7.2, 6.10, 10Server,
|
||||
# but rejects unlikely values, like 100Server, 100.0, 1.100, etc.
|
||||
release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b')
|
||||
# Matches release-like values such as 7.2, 5.10, 6Server, 8
|
||||
# but rejects unlikely values, like 100Server, 1.100, 7server etc.
|
||||
release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b')
|
||||
|
||||
|
||||
def _sm_release(module, *args):
|
||||
|
||||
@@ -108,8 +108,7 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def package_installed(module, name, category):
|
||||
cmd = [module.get_bin_path('pkginfo', True)]
|
||||
cmd.append('-q')
|
||||
cmd = [module.get_bin_path('pkginfo', True), '-q']
|
||||
if category:
|
||||
cmd.append('-c')
|
||||
cmd.append(name)
|
||||
@@ -122,7 +121,7 @@ def package_installed(module, name, category):
|
||||
|
||||
def create_admin_file():
|
||||
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
|
||||
fullauto = '''
|
||||
fullauto = b'''
|
||||
mail=
|
||||
instance=unique
|
||||
partial=nocheck
|
||||
|
||||
@@ -175,7 +175,7 @@ def _parse_repos(module):
|
||||
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def _repo_changes(realrepo, repocmp):
|
||||
def _repo_changes(module, realrepo, repocmp):
|
||||
"Check whether the 2 given repos have different settings."
|
||||
for k in repocmp:
|
||||
if repocmp[k] and k not in realrepo:
|
||||
@@ -186,6 +186,16 @@ def _repo_changes(realrepo, repocmp):
|
||||
valold = str(repocmp[k] or "")
|
||||
valnew = v or ""
|
||||
if k == "url":
|
||||
if '$releasever' in valold or '$releasever' in valnew:
|
||||
cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release']
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
|
||||
valnew = valnew.replace('$releasever', stdout)
|
||||
valold = valold.replace('$releasever', stdout)
|
||||
if '$basearch' in valold or '$basearch' in valnew:
|
||||
cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release']
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
|
||||
valnew = valnew.replace('$basearch', stdout)
|
||||
valold = valold.replace('$basearch', stdout)
|
||||
valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
|
||||
if valold != valnew:
|
||||
return True
|
||||
@@ -215,7 +225,7 @@ def repo_exists(module, repodata, overwrite_multiple):
|
||||
return (False, False, None)
|
||||
elif len(repos) == 1:
|
||||
# Found an existing repo, look for changes
|
||||
has_changes = _repo_changes(repos[0], repodata)
|
||||
has_changes = _repo_changes(module, repos[0], repodata)
|
||||
return (True, has_changes, repos)
|
||||
elif len(repos) >= 2:
|
||||
if overwrite_multiple:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user