mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-30 18:36:28 +00:00
Compare commits
109 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1fd642008 | ||
|
|
8298b2c7c1 | ||
|
|
25ff8d4179 | ||
|
|
145ceb693b | ||
|
|
40d094e63a | ||
|
|
6988ea052d | ||
|
|
f26883f45f | ||
|
|
03b312c0ae | ||
|
|
a634cc2928 | ||
|
|
14f23fbebe | ||
|
|
77aabcd8f5 | ||
|
|
3a1f23323c | ||
|
|
6ccb9a9813 | ||
|
|
e6e4260926 | ||
|
|
7111edd631 | ||
|
|
a84a004308 | ||
|
|
ac5bbe666e | ||
|
|
0273420e70 | ||
|
|
ce8d9d56ca | ||
|
|
7f6623657f | ||
|
|
ccdec10c67 | ||
|
|
09abc09b52 | ||
|
|
c87ba3a626 | ||
|
|
cda6248cea | ||
|
|
2cbd8ba71e | ||
|
|
eb2b8f4409 | ||
|
|
a27b1a135d | ||
|
|
db6cb07028 | ||
|
|
6ccffc3de5 | ||
|
|
17cc574b04 | ||
|
|
1f2c352b83 | ||
|
|
b90f87f3d8 | ||
|
|
b23fdc3be3 | ||
|
|
bc83586c15 | ||
|
|
b765938b79 | ||
|
|
4ed5177d60 | ||
|
|
88ac419c0e | ||
|
|
cc63dd884c | ||
|
|
d817fc7215 | ||
|
|
64897d762c | ||
|
|
64cbf1900b | ||
|
|
7e23ef3801 | ||
|
|
27fc80895c | ||
|
|
0754449d60 | ||
|
|
460cd523fe | ||
|
|
ad2d899713 | ||
|
|
3182be1a2f | ||
|
|
b726110f1f | ||
|
|
24a4d6e685 | ||
|
|
1badcffe1c | ||
|
|
b87196348a | ||
|
|
21423ca6a0 | ||
|
|
4926f15d86 | ||
|
|
9466103a4a | ||
|
|
b26df2a008 | ||
|
|
cba4fa2fe8 | ||
|
|
833530ab47 | ||
|
|
1f0361a1c6 | ||
|
|
4df53dbacf | ||
|
|
f25519e308 | ||
|
|
1bb47ad73e | ||
|
|
fd3e84fcd6 | ||
|
|
f75471e7d2 | ||
|
|
aadf1d4f6a | ||
|
|
f5ff54979a | ||
|
|
957a74b525 | ||
|
|
29afed337a | ||
|
|
e748acdd51 | ||
|
|
eed45fe6aa | ||
|
|
cce52e1812 | ||
|
|
0bcce340ed | ||
|
|
da8a11b8d8 | ||
|
|
9ed0603072 | ||
|
|
e2fa11b381 | ||
|
|
36f7ff15e9 | ||
|
|
8eac491057 | ||
|
|
607f3d83a0 | ||
|
|
d6cd90838f | ||
|
|
22e0a6dac7 | ||
|
|
3751e188ca | ||
|
|
cd4f3ca445 | ||
|
|
1d05f81e53 | ||
|
|
4ef80ecd46 | ||
|
|
68e184eba8 | ||
|
|
5dcd2c7df5 | ||
|
|
134a0dc7e2 | ||
|
|
13e3e176fb | ||
|
|
eb98be580d | ||
|
|
49ef8b1900 | ||
|
|
19d22d605a | ||
|
|
f17b10bfa2 | ||
|
|
258eb68022 | ||
|
|
264c98189c | ||
|
|
7aec01190a | ||
|
|
00fd2847e4 | ||
|
|
94ea18f1cb | ||
|
|
0b42aca72f | ||
|
|
2658bf31cd | ||
|
|
869e1a1eab | ||
|
|
d25b6e7681 | ||
|
|
8beb5d70c5 | ||
|
|
f9fecf12e7 | ||
|
|
b165337bbe | ||
|
|
6572f46998 | ||
|
|
b4ae2ce44d | ||
|
|
baec510c40 | ||
|
|
96cda3a48a | ||
|
|
9dc2e2d032 | ||
|
|
86c0af6cbb |
@@ -30,13 +30,12 @@ schedules:
|
||||
branches:
|
||||
include:
|
||||
- stable-5
|
||||
- stable-4
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-3
|
||||
- stable-4
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -73,6 +72,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.14/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
@@ -129,6 +141,18 @@ stages:
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
@@ -139,9 +163,7 @@ stages:
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
@@ -152,7 +174,6 @@ stages:
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.5
|
||||
- test: 3.8
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
@@ -163,11 +184,8 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.9
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel
|
||||
@@ -192,6 +210,22 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/{0}
|
||||
targets:
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
@@ -238,8 +272,6 @@ stages:
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
#- name: FreeBSD 12.2
|
||||
# test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -270,6 +302,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/linux/{0}
|
||||
targets:
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
@@ -339,52 +385,62 @@ stages:
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Cloud
|
||||
- stage: Cloud_devel
|
||||
displayName: Cloud devel
|
||||
### Generic
|
||||
- stage: Generic_devel
|
||||
displayName: Generic devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
testFormat: devel/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.10'
|
||||
- stage: Cloud_2_13
|
||||
displayName: Cloud 2.13
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/cloud/{0}/1
|
||||
testFormat: 2.14/generic/{0}/1
|
||||
targets:
|
||||
- test: '3.10'
|
||||
- stage: Generic_2_13
|
||||
displayName: Generic 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Cloud_2_12
|
||||
displayName: Cloud 2.12
|
||||
- stage: Generic_2_12
|
||||
displayName: Generic 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/cloud/{0}/1
|
||||
testFormat: 2.12/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
displayName: Cloud 2.11
|
||||
- stage: Generic_2_11
|
||||
displayName: Generic 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
testFormat: 2.11/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
@@ -396,22 +452,27 @@ stages:
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Units_devel
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Remote_devel
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Docker_devel
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_community_devel
|
||||
- Cloud_devel
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
- Cloud_2_13
|
||||
- Generic_devel
|
||||
- Generic_2_11
|
||||
- Generic_2_12
|
||||
- Generic_2_13
|
||||
- Generic_2_14
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
139
.github/BOTMETA.yml
vendored
139
.github/BOTMETA.yml
vendored
@@ -327,13 +327,13 @@ files:
|
||||
$module_utils/xfconf.py:
|
||||
maintainers: russoz
|
||||
labels: xfconf
|
||||
$modules/cloud/alicloud/:
|
||||
$modules/cloud/alicloud/ali_:
|
||||
maintainers: xiaozhu36
|
||||
$modules/cloud/atomic/atomic_container.py:
|
||||
maintainers: giuseppe krsacme
|
||||
$modules/cloud/atomic/:
|
||||
$modules/cloud/atomic/atomic_:
|
||||
maintainers: krsacme
|
||||
$modules/cloud/centurylink/:
|
||||
$modules/cloud/centurylink/clc_:
|
||||
maintainers: clc-runner
|
||||
$modules/cloud/dimensiondata/dimensiondata_network.py:
|
||||
maintainers: aimonb tintoy
|
||||
@@ -342,22 +342,22 @@ files:
|
||||
maintainers: tintoy
|
||||
$modules/cloud/heroku/heroku_collaborator.py:
|
||||
maintainers: marns93
|
||||
$modules/cloud/huawei/:
|
||||
$modules/cloud/huawei/hwc_:
|
||||
maintainers: $team_huawei huaweicloud
|
||||
keywords: cloud huawei hwc
|
||||
$modules/cloud/linode/:
|
||||
$modules/cloud/linode/linode:
|
||||
maintainers: $team_linode
|
||||
$modules/cloud/linode/linode.py:
|
||||
maintainers: zbal
|
||||
$modules/cloud/lxc/lxc_container.py:
|
||||
maintainers: cloudnull
|
||||
$modules/cloud/lxd/:
|
||||
$modules/cloud/lxd/lxd_:
|
||||
ignore: hnakamur
|
||||
$modules/cloud/lxd/lxd_profile.py:
|
||||
maintainers: conloos
|
||||
$modules/cloud/lxd/lxd_project.py:
|
||||
maintainers: we10710aa
|
||||
$modules/cloud/memset/:
|
||||
$modules/cloud/memset/memset_:
|
||||
maintainers: glitchcrab
|
||||
$modules/cloud/misc/cloud_init_data_facts.py:
|
||||
maintainers: resmo
|
||||
@@ -378,46 +378,49 @@ files:
|
||||
$modules/cloud/misc/proxmox_template.py:
|
||||
maintainers: UnderGreen
|
||||
ignore: skvidal
|
||||
$modules/cloud/misc/proxmox_disk.py:
|
||||
maintainers: castorsky
|
||||
$modules/cloud/misc/rhevm.py:
|
||||
maintainers: $team_virt TimothyVandenbrande
|
||||
labels: rhevm virt
|
||||
ignore: skvidal
|
||||
keywords: kvm libvirt proxmox qemu
|
||||
$modules/cloud/misc/:
|
||||
$modules/cloud/misc/serverless.py:
|
||||
ignore: ryansb
|
||||
$modules/cloud/misc/terraform.py:
|
||||
maintainers: m-yosefpor rainerleber
|
||||
ignore: ryansb
|
||||
$modules/cloud/misc/xenserver_facts.py:
|
||||
maintainers: caphrim007 cheese
|
||||
labels: xenserver_facts
|
||||
ignore: andyhky
|
||||
$modules/cloud/oneandone/:
|
||||
ignore: andyhky ryansb
|
||||
$modules/cloud/oneandone/oneandone_:
|
||||
maintainers: aajdinov edevenport
|
||||
$modules/cloud/online/:
|
||||
$modules/cloud/online/online_:
|
||||
maintainers: remyleone
|
||||
$modules/cloud/opennebula/:
|
||||
$modules/cloud/opennebula/one_:
|
||||
maintainers: $team_opennebula
|
||||
$modules/cloud/opennebula/one_host.py:
|
||||
maintainers: rvalle
|
||||
$modules/cloud/oracle/oci_vcn.py:
|
||||
maintainers: $team_oracle rohitChaware
|
||||
$modules/cloud/ovh/:
|
||||
$modules/cloud/ovh/ovh_:
|
||||
maintainers: pascalheraud
|
||||
$modules/cloud/ovh/ovh_monthly_billing.py:
|
||||
maintainers: fraff
|
||||
$modules/cloud/packet/packet_device.py:
|
||||
maintainers: baldwinSPC t0mk teebes
|
||||
$modules/cloud/packet/:
|
||||
$modules/cloud/packet/packet_:
|
||||
maintainers: nurfet-becirevic t0mk
|
||||
$modules/cloud/packet/packet_sshkey.py:
|
||||
maintainers: t0mk
|
||||
$modules/cloud/profitbricks/:
|
||||
$modules/cloud/profitbricks/profitbricks:
|
||||
maintainers: baldwinSPC
|
||||
$modules/cloud/pubnub/pubnub_blocks.py:
|
||||
maintainers: parfeon pubnub
|
||||
$modules/cloud/rackspace/rax.py:
|
||||
maintainers: omgjlk sivel
|
||||
$modules/cloud/rackspace/:
|
||||
$modules/cloud/rackspace/rax:
|
||||
ignore: ryansb sivel
|
||||
$modules/cloud/rackspace/rax_cbs.py:
|
||||
maintainers: claco
|
||||
@@ -455,10 +458,14 @@ files:
|
||||
maintainers: smashwilson
|
||||
$modules/cloud/rackspace/rax_queue.py:
|
||||
maintainers: claco
|
||||
$modules/cloud/scaleway/:
|
||||
$modules/cloud/scaleway/scaleway_:
|
||||
maintainers: $team_scaleway
|
||||
$modules/cloud/scaleway/scaleway_compute_private_network.py:
|
||||
maintainers: pastral
|
||||
$modules/cloud/scaleway/scaleway_container_registry.py:
|
||||
maintainers: Lunik
|
||||
$modules/cloud/scaleway/scaleway_container_registry_info.py:
|
||||
maintainers: Lunik
|
||||
$modules/cloud/scaleway/scaleway_database_backup.py:
|
||||
maintainers: guillaume_ro_fr
|
||||
$modules/cloud/scaleway/scaleway_image_info.py:
|
||||
@@ -484,29 +491,39 @@ files:
|
||||
ignore: hekonsek
|
||||
$modules/cloud/scaleway/scaleway_volume_info.py:
|
||||
maintainers: Spredzy
|
||||
$modules/cloud/smartos/:
|
||||
$modules/cloud/smartos/imgadm.py:
|
||||
maintainers: $team_solaris
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/cloud/smartos/nictagadm.py:
|
||||
maintainers: SmithX10
|
||||
maintainers: $team_solaris SmithX10
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/cloud/smartos/smartos_image_info.py:
|
||||
maintainers: $team_solaris
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/cloud/smartos/vmadm.py:
|
||||
maintainers: $team_solaris
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/cloud/softlayer/sl_vm.py:
|
||||
maintainers: mcltn
|
||||
$modules/cloud/spotinst/spotinst_aws_elastigroup.py:
|
||||
maintainers: talzur
|
||||
$modules/cloud/univention/:
|
||||
$modules/cloud/univention/udm_:
|
||||
maintainers: keachi
|
||||
$modules/cloud/webfaction/:
|
||||
$modules/cloud/webfaction/webfaction_:
|
||||
maintainers: quentinsf
|
||||
$modules/cloud/xenserver/:
|
||||
$modules/cloud/xenserver/xenserver_:
|
||||
maintainers: bvitnik
|
||||
$modules/clustering/consul/:
|
||||
$modules/clustering/consul/consul:
|
||||
maintainers: $team_consul
|
||||
ignore: colin-nolan
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
$modules/clustering/nomad/:
|
||||
$modules/clustering/nomad/nomad_:
|
||||
maintainers: chris93111
|
||||
$modules/clustering/pacemaker_cluster.py:
|
||||
maintainers: matbu
|
||||
@@ -514,7 +531,7 @@ files:
|
||||
maintainers: treyperry
|
||||
$modules/database/aerospike/aerospike_migrations.py:
|
||||
maintainers: Alb0t
|
||||
$modules/database/influxdb/:
|
||||
$modules/database/influxdb/influxdb_:
|
||||
maintainers: kamsz
|
||||
$modules/database/influxdb/influxdb_query.py:
|
||||
maintainers: resmo
|
||||
@@ -548,7 +565,7 @@ files:
|
||||
labels: mssql_script
|
||||
$modules/database/saphana/hana_query.py:
|
||||
maintainers: rainerleber
|
||||
$modules/database/vertica/:
|
||||
$modules/database/vertica/vertica_:
|
||||
maintainers: dareko
|
||||
$modules/files/archive.py:
|
||||
maintainers: bendoh
|
||||
@@ -558,6 +575,8 @@ files:
|
||||
maintainers: jpmens noseka1
|
||||
$modules/files/iso_create.py:
|
||||
maintainers: Tomorrow9
|
||||
$modules/files/iso_customize.py:
|
||||
maintainers: ZouYuhua
|
||||
$modules/files/iso_extract.py:
|
||||
maintainers: dagwieers jhoekx ribbons
|
||||
$modules/files/read_csv.py:
|
||||
@@ -571,7 +590,7 @@ files:
|
||||
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
|
||||
labels: m:xml xml
|
||||
ignore: magnus919
|
||||
$modules/identity/ipa/:
|
||||
$modules/identity/ipa/ipa_:
|
||||
maintainers: $team_ipa
|
||||
$modules/identity/ipa/ipa_pwpolicy.py:
|
||||
maintainers: adralioh
|
||||
@@ -579,7 +598,7 @@ files:
|
||||
maintainers: cprh
|
||||
$modules/identity/ipa/ipa_vault.py:
|
||||
maintainers: jparrill
|
||||
$modules/identity/keycloak/:
|
||||
$modules/identity/keycloak/keycloak_:
|
||||
maintainers: $team_keycloak
|
||||
$modules/identity/keycloak/keycloak_authentication.py:
|
||||
maintainers: elfelip Gaetan2907
|
||||
@@ -587,6 +606,8 @@ files:
|
||||
maintainers: Gaetan2907
|
||||
$modules/identity/keycloak/keycloak_client_rolemapping.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/identity/keycloak/keycloak_user_rolemapping.py:
|
||||
maintainers: bratwurzt
|
||||
$modules/identity/keycloak/keycloak_group.py:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||
@@ -655,10 +676,10 @@ files:
|
||||
maintainers: thaumos
|
||||
$modules/monitoring/rollbar_deployment.py:
|
||||
maintainers: kavu
|
||||
$modules/monitoring/sensu/sensu_:
|
||||
maintainers: dmsimard
|
||||
$modules/monitoring/sensu/sensu_check.py:
|
||||
maintainers: andsens
|
||||
$modules/monitoring/sensu/:
|
||||
maintainers: dmsimard
|
||||
$modules/monitoring/sensu/sensu_silence.py:
|
||||
maintainers: smbambling
|
||||
$modules/monitoring/sensu/sensu_subscription.py:
|
||||
@@ -715,7 +736,7 @@ files:
|
||||
maintainers: nerzhul
|
||||
$modules/net_tools/omapi_host.py:
|
||||
maintainers: amasolov nerzhul
|
||||
$modules/net_tools/pritunl/:
|
||||
$modules/net_tools/pritunl/pritunl_:
|
||||
maintainers: Lowess
|
||||
$modules/net_tools/nmcli.py:
|
||||
maintainers: alcamie101
|
||||
@@ -764,7 +785,8 @@ files:
|
||||
labels: rocketchat
|
||||
ignore: ramondelafuente
|
||||
$modules/notification/say.py:
|
||||
maintainers: $team_ansible_core mpdehaan
|
||||
maintainers: $team_ansible_core
|
||||
ignore: mpdehaan
|
||||
$modules/notification/sendgrid.py:
|
||||
maintainers: makaimc
|
||||
$modules/notification/slack.py:
|
||||
@@ -951,21 +973,24 @@ files:
|
||||
maintainers: $team_suse
|
||||
labels: zypper
|
||||
ignore: matze
|
||||
$modules/remote_management/cobbler/:
|
||||
$modules/remote_management/cobbler/cobbler_:
|
||||
maintainers: dagwieers
|
||||
$modules/remote_management/hpilo/:
|
||||
$modules/remote_management/hpilo/hpilo_:
|
||||
maintainers: haad
|
||||
ignore: dagwieers
|
||||
$modules/remote_management/hpilo/hponcfg.py:
|
||||
maintainers: haad
|
||||
ignore: dagwieers
|
||||
$modules/remote_management/imc/imc_rest.py:
|
||||
maintainers: dagwieers
|
||||
labels: cisco
|
||||
$modules/remote_management/ipmi/:
|
||||
$modules/remote_management/ipmi/ipmi_:
|
||||
maintainers: bgaifullin cloudnull
|
||||
$modules/remote_management/lenovoxcc/:
|
||||
$modules/remote_management/lenovoxcc/xcc_:
|
||||
maintainers: panyy3 renxulei
|
||||
$modules/remote_management/lxca/:
|
||||
$modules/remote_management/lxca/lxca_:
|
||||
maintainers: navalkp prabhosa
|
||||
$modules/remote_management/manageiq/:
|
||||
$modules/remote_management/manageiq/manageiq_:
|
||||
labels: manageiq
|
||||
maintainers: $team_manageiq
|
||||
$modules/remote_management/manageiq/manageiq_alert_profiles.py:
|
||||
@@ -974,9 +999,13 @@ files:
|
||||
maintainers: elad661
|
||||
$modules/remote_management/manageiq/manageiq_group.py:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/manageiq/manageiq_policies_info.py:
|
||||
maintainers: russoz $team_manageiq
|
||||
$modules/remote_management/manageiq/manageiq_tags_info.py:
|
||||
maintainers: russoz $team_manageiq
|
||||
$modules/remote_management/manageiq/manageiq_tenant.py:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/oneview/:
|
||||
$modules/remote_management/oneview/oneview_:
|
||||
maintainers: adriane-cardozo fgbulsoni tmiotto
|
||||
$modules/remote_management/oneview/oneview_datacenter_info.py:
|
||||
maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
|
||||
@@ -984,7 +1013,16 @@ files:
|
||||
maintainers: fgbulsoni
|
||||
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
||||
maintainers: fgbulsoni
|
||||
$modules/remote_management/redfish/:
|
||||
$modules/remote_management/redfish/idrac_:
|
||||
maintainers: $team_redfish
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/redfish/ilo_:
|
||||
maintainers: $team_redfish
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/redfish/redfish_:
|
||||
maintainers: $team_redfish
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/redfish/wdc_:
|
||||
maintainers: $team_redfish
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/redfish/wdc_redfish_command.py:
|
||||
@@ -996,7 +1034,7 @@ files:
|
||||
labels: stacki_host
|
||||
$modules/remote_management/wakeonlan.py:
|
||||
maintainers: dagwieers
|
||||
$modules/source_control/bitbucket/:
|
||||
$modules/source_control/bitbucket/bitbucket_:
|
||||
maintainers: catcombo
|
||||
$modules/source_control/bzr.py:
|
||||
maintainers: andreparames
|
||||
@@ -1014,9 +1052,9 @@ files:
|
||||
maintainers: adrianmoisey
|
||||
$modules/source_control/github/github_repo.py:
|
||||
maintainers: atorrescogollo
|
||||
$modules/source_control/github/:
|
||||
$modules/source_control/github/github_:
|
||||
maintainers: stpierre
|
||||
$modules/source_control/gitlab/:
|
||||
$modules/source_control/gitlab/gitlab_:
|
||||
notify: jlozadad
|
||||
maintainers: $team_gitlab
|
||||
keywords: gitlab source_control
|
||||
@@ -1034,13 +1072,13 @@ files:
|
||||
maintainers: remixtj
|
||||
$modules/storage/hpe3par/ss_3par_cpg.py:
|
||||
maintainers: farhan7500 gautamphegde
|
||||
$modules/storage/ibm/:
|
||||
$modules/storage/ibm/ibm_sa_:
|
||||
maintainers: tzure
|
||||
$modules/storage/pmem/pmem.py:
|
||||
maintainers: mizumm
|
||||
$modules/storage/vexata/:
|
||||
$modules/storage/vexata/vexata_:
|
||||
maintainers: vexata
|
||||
$modules/storage/zfs/:
|
||||
$modules/storage/zfs/zfs:
|
||||
maintainers: $team_solaris
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
@@ -1048,6 +1086,10 @@ files:
|
||||
maintainers: johanwiren
|
||||
$modules/storage/zfs/zfs_delegate_admin.py:
|
||||
maintainers: natefoo
|
||||
$modules/storage/zfs/zpool_facts:
|
||||
maintainers: $team_solaris
|
||||
labels: solaris
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/system/aix:
|
||||
maintainers: $team_aix
|
||||
labels: aix
|
||||
@@ -1129,7 +1171,8 @@ files:
|
||||
$modules/system/nosh.py:
|
||||
maintainers: tacatac
|
||||
$modules/system/ohai.py:
|
||||
maintainers: $team_ansible_core mpdehaan
|
||||
maintainers: $team_ansible_core
|
||||
ignore: mpdehaan
|
||||
labels: ohai
|
||||
$modules/system/open_iscsi.py:
|
||||
maintainers: srvg
|
||||
@@ -1245,7 +1288,7 @@ files:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/rundeck_job_executions_info.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/sophos_utm/:
|
||||
$modules/web_infrastructure/sophos_utm/utm_:
|
||||
maintainers: $team_e_spirit
|
||||
keywords: sophos utm
|
||||
$modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -509,3 +509,6 @@ $RECYCLE.BIN/
|
||||
*.lnk
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks
|
||||
|
||||
# Integration tests cloud configs
|
||||
tests/integration/cloud-config-*.ini
|
||||
|
||||
215
CHANGELOG.rst
215
CHANGELOG.rst
@@ -6,6 +6,221 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 4.0.0.
|
||||
|
||||
v5.8.4
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gconftool2 - fix crash that prevents setting a key (https://github.com/ansible-collections/community.general/issues/5591, https://github.com/ansible-collections/community.general/pull/5687).
|
||||
- gitlab_group_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
|
||||
- gitlab_project_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
|
||||
- lxc_container - fix the arguments of the lxc command which broke the creation and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578).
|
||||
- opkg - fix issue that ``force=reinstall`` would not reinstall an existing package (https://github.com/ansible-collections/community.general/pull/5705).
|
||||
- proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672).
|
||||
- proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672).
|
||||
- unixy callback plugin - fix typo introduced when updating to use Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
|
||||
|
||||
v5.8.3
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return values properly (https://github.com/ansible-collections/community.general/pull/5619).
|
||||
- keycloak_client_rolemapping - remove only listed mappings with ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5619).
|
||||
- proxmox inventory plugin - handle tags delimited by semicolon instead of comma, which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602).
|
||||
- vdo - now uses ``yaml.safe_load()`` to parse command output instead of the deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()`` without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632).
|
||||
- vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628).
|
||||
|
||||
v5.8.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570).
|
||||
- cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()`` (https://github.com/ansible-collections/community.general/pull/5538).
|
||||
- cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()`` was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538).
|
||||
- unixy callback plugin - fix plugin to work with ansible-core 2.14 by using Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
|
||||
|
||||
v5.8.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436)
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- Please note that some tools, like the VScode plugin (https://github.com/ansible/vscode-ansible/issues/573), or ``ansible-doc --list --type module``, suggest to replace the correct FQCNs for modules and actions in community.general with internal names that have more than three components. For example, ``community.general.ufw`` is suggested to be replaced by ``community.general.system.ufw``. While these longer names do work, they are considered **internal names** by the collection and are subject to change and be removed at all time. They **will** be removed in community.general 6.0.0 and result in deprecation messages. Avoid using these internal names, and use general three-component FQCNs (``community.general.<name_of_module>``) instead (https://github.com/ansible-collections/community.general/pull/5373).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()`` (https://github.com/ansible-collections/community.general/pull/5543).
|
||||
- iso_create - the module somtimes failed to add folders for Joliet and UDF formats (https://github.com/ansible-collections/community.general/issues/5275).
|
||||
- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error was occuring when the ldap attribute value contained special characters such as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434, https://github.com/ansible-collections/community.general/pull/5435).
|
||||
- nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998).
|
||||
- nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed DNS zones (https://github.com/ansible-collections/community.general/issues/4657).
|
||||
- one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489).
|
||||
- proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5493).
|
||||
- snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475).
|
||||
|
||||
v5.8.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Major Changes
|
||||
-------------
|
||||
|
||||
- newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367).
|
||||
- lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358).
|
||||
- nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361).
|
||||
- opentelemetry callback plugin - send logs. This can be disabled by setting ``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175).
|
||||
- portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options (https://github.com/ansible-collections/community.general/pull/5349).
|
||||
- portage - use Portage's python module instead of calling gentoolkit-provided program in shell (https://github.com/ansible-collections/community.general/pull/5349).
|
||||
- znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306).
|
||||
|
||||
Breaking Changes / Porting Guide
|
||||
--------------------------------
|
||||
|
||||
- newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt`` module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- django_manage - support for Django releases older than 4.1 has been deprecated and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
|
||||
- django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate`` that have been deprecated in Django long time ago will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
|
||||
- django_manage - the behavior of "creating the virtual environment when missing" is being deprecated and will be removed in community.general version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5405).
|
||||
- newrelic_deployment - ``appname`` and ``environment`` are no longer valid options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz`` (https://github.com/ansible-collections/community.general/pull/5393).
|
||||
- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute values (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5385).
|
||||
- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
|
||||
- pfexec become plugin - remove superflous quotes preventing exe wrap from working as expected (https://github.com/ansible-collections/community.general/issues/3671, https://github.com/ansible-collections/community.general/pull/3889).
|
||||
- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
|
||||
- proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198).
|
||||
- virtualbox inventory plugin - skip parsing values with keys that have both a value and nested data. Skip parsing values that are nested more than two keys deep (https://github.com/ansible-collections/community.general/issues/5332, https://github.com/ansible-collections/community.general/pull/5348).
|
||||
- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module from working at all (https://github.com/ansible-collections/community.general/pull/5383).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
scaleway
|
||||
^^^^^^^^
|
||||
|
||||
- scaleway_container_registry - Scaleway Container registry management module
|
||||
- scaleway_container_registry_info - Scaleway Container registry info module
|
||||
|
||||
Files
|
||||
~~~~~
|
||||
|
||||
- iso_customize - Add/remove/change files in ISO file
|
||||
|
||||
Remote Management
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
manageiq
|
||||
^^^^^^^^
|
||||
|
||||
- manageiq_policies_info - Listing of resource policy_profiles in ManageIQ
|
||||
- manageiq_tags_info - Retrieve resource tags in ManageIQ
|
||||
|
||||
v5.7.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- bitwarden lookup plugin - add option ``search`` to search for other attributes than name (https://github.com/ansible-collections/community.general/pull/5297).
|
||||
- machinectl become plugin - combine the success command when building the become command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
|
||||
- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
|
||||
- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- proxmox_snap - add ``unbind`` param to support snapshotting containers with configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- redfish_config - add ``SetSessionService`` to set default session timeout policy (https://github.com/ansible-collections/community.general/issues/5008).
|
||||
- terraform - adds capability to handle complex variable structures for ``variables`` parameter in the module. This must be enabled with the new ``complex_vars`` parameter (https://github.com/ansible-collections/community.general/pull/4797).
|
||||
- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout of the task (https://github.com/ansible-collections/community.general/pull/5147).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- lxc_container - the module will no longer make any effort to support Python 2 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
|
||||
- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
|
||||
- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
- nmcli - fix error when setting previously unset MAC address, ``gsm.apn`` or ``vpn.data``: current values were being normalized without checking if they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_disk - Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_user_rolemapping - Allows administration of Keycloak user_rolemapping with the Keycloak API
|
||||
|
||||
v5.6.0
|
||||
======
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
* Try committing your changes with an informative but short commit message.
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
@@ -23,7 +23,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
|
||||
Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
|
||||
|
||||
@@ -64,6 +64,10 @@ ansible-galaxy collection install community.general:==X.Y.Z
|
||||
|
||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||
|
||||
### FQCNs for modules and actions
|
||||
|
||||
⚠️ The collection uses a similar directory structure for modules as the Ansible repository used for Ansible 2.9 and before. This directory structure was never exposed to the user. Due to changes in community.general 5.0.0 (using `meta/runtime.yml` redirects instead of symbolic links) some tooling started exposing the internal module names to end-users. These **internal names**, like `community.general.system.ufw` for the UFW firewall managing module, do work, but should be avoided since they are treated as an implementation detail that can change at any time, even in bugfix releases. Always use the three-component FQCN form, for example `community.general.ufw` for the UFW module. ⚠️
|
||||
|
||||
## Contributing to this collection
|
||||
|
||||
The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
|
||||
|
||||
@@ -1160,3 +1160,263 @@ releases:
|
||||
name: pipx_info
|
||||
namespace: packaging.language
|
||||
release_date: '2022-09-13'
|
||||
5.7.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
|
||||
- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
|
||||
- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
- 'nmcli - fix error when setting previously unset MAC address, ``gsm.apn``
|
||||
or ``vpn.data``: current values were being normalized without checking if
|
||||
they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).'
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
deprecated_features:
|
||||
- lxc_container - the module will no longer make any effort to support Python
|
||||
2 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
minor_changes:
|
||||
- bitwarden lookup plugin - add option ``search`` to search for other attributes
|
||||
than name (https://github.com/ansible-collections/community.general/pull/5297).
|
||||
- machinectl become plugin - combine the success command when building the become
|
||||
command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
|
||||
- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
|
||||
- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to
|
||||
standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- proxmox_snap - add ``unbind`` param to support snapshotting containers with
|
||||
configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- redfish_config - add ``SetSessionService`` to set default session timeout
|
||||
policy (https://github.com/ansible-collections/community.general/issues/5008).
|
||||
- terraform - adds capability to handle complex variable structures for ``variables``
|
||||
parameter in the module. This must be enabled with the new ``complex_vars``
|
||||
parameter (https://github.com/ansible-collections/community.general/pull/4797).
|
||||
- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout
|
||||
of the task (https://github.com/ansible-collections/community.general/pull/5147).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 4797-terraform-complex-variables.yml
|
||||
- 5.7.0.yml
|
||||
- 5008-addSetSessionService.yml
|
||||
- 5147-terraform-init-no-color.yml
|
||||
- 5274-proxmox-snap-container-with-mountpoints.yml
|
||||
- 5280-lxc_container-py3.yaml
|
||||
- 5282-locale_gen.yaml
|
||||
- 5287-machinectl-become-success.yml
|
||||
- 5291-fix-nmcli-error-when-setting-unset-mac-address.yaml
|
||||
- 5297-bitwarden-add-search-field.yml
|
||||
- 5301-netcup_dnsapi-timeout.yml
|
||||
- 5307-ini_file-lint.yaml
|
||||
- 5313-fix-redhat_subscription-idempotency-pool_ids.yml
|
||||
modules:
|
||||
- description: Allows administration of Keycloak user_rolemapping with the Keycloak
|
||||
API
|
||||
name: keycloak_user_rolemapping
|
||||
namespace: identity.keycloak
|
||||
- description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
name: proxmox_disk
|
||||
namespace: cloud.misc
|
||||
release_date: '2022-10-04'
|
||||
5.8.0:
|
||||
changes:
|
||||
breaking_changes:
|
||||
- newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
bugfixes:
|
||||
- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz``
|
||||
(https://github.com/ansible-collections/community.general/pull/5393).
|
||||
- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute
|
||||
values (https://github.com/ansible-collections/community.general/issues/977,
|
||||
https://github.com/ansible-collections/community.general/pull/5385).
|
||||
- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed
|
||||
support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
|
||||
- pfexec become plugin - remove superflous quotes preventing exe wrap from working
|
||||
as expected (https://github.com/ansible-collections/community.general/issues/3671,
|
||||
https://github.com/ansible-collections/community.general/pull/3889).
|
||||
- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
|
||||
- proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198).
|
||||
- virtualbox inventory plugin - skip parsing values with keys that have both
|
||||
a value and nested data. Skip parsing values that are nested more than two
|
||||
keys deep (https://github.com/ansible-collections/community.general/issues/5332,
|
||||
https://github.com/ansible-collections/community.general/pull/5348).
|
||||
- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module
|
||||
from working at all (https://github.com/ansible-collections/community.general/pull/5383).
|
||||
deprecated_features:
|
||||
- ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt``
|
||||
module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util
|
||||
(https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module
|
||||
util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner``
|
||||
module util (https://github.com/ansible-collections/community.general/pull/5370).
|
||||
- django_manage - support for Django releases older than 4.1 has been deprecated
|
||||
and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
|
||||
- django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate``
|
||||
that have been deprecated in Django long time ago will be removed in community.general
|
||||
9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
|
||||
- django_manage - the behavior of "creating the virtual environment when missing"
|
||||
is being deprecated and will be removed in community.general version 9.0.0
|
||||
(https://github.com/ansible-collections/community.general/pull/5405).
|
||||
- newrelic_deployment - ``appname`` and ``environment`` are no longer valid
|
||||
options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
major_changes:
|
||||
- newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
|
||||
minor_changes:
|
||||
- consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367).
|
||||
- lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358).
|
||||
- nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361).
|
||||
- opentelemetry callback plugin - send logs. This can be disabled by setting
|
||||
``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175).
|
||||
- portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options
|
||||
(https://github.com/ansible-collections/community.general/pull/5349).
|
||||
- portage - use Portage's python module instead of calling gentoolkit-provided
|
||||
program in shell (https://github.com/ansible-collections/community.general/pull/5349).
|
||||
- znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 3671-illumos-pfexec.yml
|
||||
- 4175-opentelemetry_logs.yml
|
||||
- 5.8.0.yml
|
||||
- 5198-proxmox.yml
|
||||
- 5306-add-options-for-authentication.yml
|
||||
- 5341-newrelic-v2-api-changes.yml
|
||||
- 5342-opentelemetry_bug_fix_opentelemetry-api-1.13.yml
|
||||
- 5348-fix-vbox-deeply-nested-hostvars.yml
|
||||
- 5349-drop-gentoolkit-more-knobs.yml
|
||||
- 5358-lxc-container-refactor.yml
|
||||
- 5361-nmcli-add-infiniband-transport-mode.yaml
|
||||
- 5367-consul-refactor.yaml
|
||||
- 5369-pkgng-fix-update-all.yaml
|
||||
- 5370-mh-cmdmixin-deprecation.yaml
|
||||
- 5383-xenserver_facts.yml
|
||||
- 5385-search_s-based-_is_value_present.yaml
|
||||
- 5393-archive.yml
|
||||
- 5400-django-manage-deprecations.yml
|
||||
- 5404-django-manage-venv-deprecation.yml
|
||||
modules:
|
||||
- description: Add/remove/change files in ISO file
|
||||
name: iso_customize
|
||||
namespace: files
|
||||
- description: Listing of resource policy_profiles in ManageIQ
|
||||
name: manageiq_policies_info
|
||||
namespace: remote_management.manageiq
|
||||
- description: Retrieve resource tags in ManageIQ
|
||||
name: manageiq_tags_info
|
||||
namespace: remote_management.manageiq
|
||||
- description: Scaleway Container registry management module
|
||||
name: scaleway_container_registry
|
||||
namespace: cloud.scaleway
|
||||
- description: Scaleway Container registry info module
|
||||
name: scaleway_container_registry_info
|
||||
namespace: cloud.scaleway
|
||||
release_date: '2022-10-25'
|
||||
5.8.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()``
|
||||
(https://github.com/ansible-collections/community.general/pull/5543).
|
||||
- iso_create - the module somtimes failed to add folders for Joliet and UDF
|
||||
formats (https://github.com/ansible-collections/community.general/issues/5275).
|
||||
- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error
|
||||
was occuring when the ldap attribute value contained special characters such
|
||||
as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434,
|
||||
https://github.com/ansible-collections/community.general/pull/5435).
|
||||
- nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998).
|
||||
- nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed
|
||||
DNS zones (https://github.com/ansible-collections/community.general/issues/4657).
|
||||
- one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489).
|
||||
- proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492,
|
||||
https://github.com/ansible-collections/community.general/pull/5493).
|
||||
- snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475).
|
||||
deprecated_features:
|
||||
- Please note that some tools, like the VScode plugin (https://github.com/ansible/vscode-ansible/issues/573),
|
||||
or ``ansible-doc --list --type module``, suggest to replace the correct FQCNs
|
||||
for modules and actions in community.general with internal names that have
|
||||
more than three components. For example, ``community.general.ufw`` is suggested
|
||||
to be replaced by ``community.general.system.ufw``. While these longer names
|
||||
do work, they are considered **internal names** by the collection and are
|
||||
subject to change and be removed at all time. They **will** be removed in
|
||||
community.general 6.0.0 and result in deprecation messages. Avoid using these
|
||||
internal names, and use general three-component FQCNs (``community.general.<name_of_module>``)
|
||||
instead (https://github.com/ansible-collections/community.general/pull/5373).
|
||||
minor_changes:
|
||||
- passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436)
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 4998-nmcli-fix-int-options-idempotence.yml
|
||||
- 5.8.1.yml
|
||||
- 5377-nsupdate-ns-records-with-bind.yml
|
||||
- 5435-escape-ldap-param.yml
|
||||
- 5436-passwordstore-errors.yml
|
||||
- 5468-iso-create-not-add-folders.yml
|
||||
- 5475-snap-option-value-whitespace.yml
|
||||
- 5489-nonetype-in-get-vm-by-label.yml
|
||||
- 5493-proxmox.yml
|
||||
- 5543-dependent-template.yml
|
||||
- fqcn-warnings.yml
|
||||
release_date: '2022-11-15'
|
||||
5.8.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``.
|
||||
This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570).
|
||||
- cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()``
|
||||
(https://github.com/ansible-collections/community.general/pull/5538).
|
||||
- cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()``
|
||||
was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538).
|
||||
- unixy callback plugin - fix plugin to work with ansible-core 2.14 by using
|
||||
Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5.8.2.yml
|
||||
- 5538-cmd-runner-as-fixed.yml
|
||||
- 5570-chroot-plugin-fix-default-inventory_hostname.yml
|
||||
- 5601-unixy-callback-use-config-manager.yml
|
||||
release_date: '2022-11-29'
|
||||
5.8.3:
|
||||
changes:
|
||||
bugfixes:
|
||||
- keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return
|
||||
values properly (https://github.com/ansible-collections/community.general/pull/5619).
|
||||
- keycloak_client_rolemapping - remove only listed mappings with ``state=absent``
|
||||
(https://github.com/ansible-collections/community.general/pull/5619).
|
||||
- proxmox inventory plugin - handle tags delimited by semicolon instead of comma,
|
||||
which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602).
|
||||
- vdo - now uses ``yaml.safe_load()`` to parse command output instead of the
|
||||
deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()``
|
||||
without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632).
|
||||
- vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5.8.3.yml
|
||||
- 5602-proxmox-tags.yml
|
||||
- 5619-keycloak-improvements.yml
|
||||
- 5628-fix-vmadm-off-by-one.yml
|
||||
- 5632-vdo-Use-yaml-safe-load-instead-of-yaml-load.yml
|
||||
release_date: '2022-12-05'
|
||||
5.8.4:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gconftool2 - fix crash that prevents setting a key (https://github.com/ansible-collections/community.general/issues/5591,
|
||||
https://github.com/ansible-collections/community.general/pull/5687).
|
||||
- gitlab_group_variables - fix dropping variables accidentally when GitLab introduced
|
||||
new properties (https://github.com/ansible-collections/community.general/pull/5667).
|
||||
- gitlab_project_variables - fix dropping variables accidentally when GitLab
|
||||
introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
|
||||
- lxc_container - fix the arguments of the lxc command which broke the creation
|
||||
and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578).
|
||||
- opkg - fix issue that ``force=reinstall`` would not reinstall an existing
|
||||
package (https://github.com/ansible-collections/community.general/pull/5705).
|
||||
- proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492,
|
||||
https://github.com/ansible-collections/community.general/pull/5672).
|
||||
- proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492,
|
||||
https://github.com/ansible-collections/community.general/pull/5672).
|
||||
- unixy callback plugin - fix typo introduced when updating to use Ansible's
|
||||
configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5.8.4.yml
|
||||
- 5659-fix-lxc_container-command.yml
|
||||
- 5666-gitlab-variables.yml
|
||||
- 5672-proxmox.yml
|
||||
- 5687-gconftool2.yml
|
||||
- 5705-opkg-fix-force-reinstall.yml
|
||||
- 5744-unixy-callback-fix-config-manager-typo.yml
|
||||
release_date: '2023-01-04'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 5.6.0
|
||||
version: 5.8.4
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -560,6 +560,8 @@ plugin_routing:
|
||||
redirect: community.general.files.iso_create
|
||||
iso_extract:
|
||||
redirect: community.general.files.iso_extract
|
||||
iso_customize:
|
||||
redirect: community.general.files.iso_customize
|
||||
jabber:
|
||||
redirect: community.general.notification.jabber
|
||||
java_cert:
|
||||
@@ -612,6 +614,8 @@ plugin_routing:
|
||||
redirect: community.general.identity.keycloak.keycloak_role
|
||||
keycloak_user_federation:
|
||||
redirect: community.general.identity.keycloak.keycloak_user_federation
|
||||
keycloak_user_rolemapping:
|
||||
redirect: community.general.identity.keycloak.keycloak_user_rolemapping
|
||||
keyring:
|
||||
redirect: community.general.system.keyring
|
||||
keyring_info:
|
||||
@@ -706,10 +710,14 @@ plugin_routing:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_group
|
||||
manageiq_policies:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_policies
|
||||
manageiq_policies_info:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_policies_info
|
||||
manageiq_provider:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_provider
|
||||
manageiq_tags:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_tags
|
||||
manageiq_tags_info:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_tags_info
|
||||
manageiq_tenant:
|
||||
redirect: community.general.remote_management.manageiq.manageiq_tenant
|
||||
manageiq_user:
|
||||
@@ -1213,6 +1221,8 @@ plugin_routing:
|
||||
redirect: community.general.cloud.profitbricks.profitbricks_volume_attachments
|
||||
proxmox:
|
||||
redirect: community.general.cloud.misc.proxmox
|
||||
proxmox_disk:
|
||||
redirect: community.general.cloud.misc.proxmox_disk
|
||||
proxmox_domain_info:
|
||||
redirect: community.general.cloud.misc.proxmox_domain_info
|
||||
proxmox_group_info:
|
||||
@@ -1369,6 +1379,10 @@ plugin_routing:
|
||||
redirect: community.general.cloud.scaleway.scaleway_compute
|
||||
scaleway_compute_private_network:
|
||||
redirect: community.general.cloud.scaleway.scaleway_compute_private_network
|
||||
scaleway_container_registry:
|
||||
redirect: community.general.cloud.scaleway.scaleway_container_registry
|
||||
scaleway_container_registry_info:
|
||||
redirect: community.general.cloud.scaleway.scaleway_container_registry_info
|
||||
scaleway_database_backup:
|
||||
redirect: community.general.cloud.scaleway.scaleway_database_backup
|
||||
scaleway_image_facts:
|
||||
|
||||
@@ -117,7 +117,7 @@ class BecomeModule(BecomeBase):
|
||||
|
||||
flags = self.get_option('become_flags')
|
||||
user = self.get_option('become_user')
|
||||
return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
|
||||
return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell))
|
||||
|
||||
def check_success(self, b_output):
|
||||
b_output = self.remove_ansi_codes(b_output)
|
||||
|
||||
@@ -102,4 +102,4 @@ class BecomeModule(BecomeBase):
|
||||
|
||||
flags = self.get_option('become_flags')
|
||||
noexe = not self.get_option('wrap_exe')
|
||||
return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
|
||||
return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
|
||||
|
||||
@@ -62,6 +62,17 @@ DOCUMENTATION = '''
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
disable_logs:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Disable sending logs.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS
|
||||
ini:
|
||||
- section: callback_opentelemetry
|
||||
key: disable_logs
|
||||
version_added: 5.8.0
|
||||
requirements:
|
||||
- opentelemetry-api (Python library)
|
||||
- opentelemetry-exporter-otlp (Python library)
|
||||
@@ -110,13 +121,32 @@ try:
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
|
||||
# Support for opentelemetry-api <= 1.12
|
||||
try:
|
||||
from opentelemetry.util._time import _time_ns
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = None
|
||||
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
if sys.version_info >= (3, 7):
|
||||
time_ns = time.time_ns
|
||||
elif not OTEL_LIBRARY_TIME_NS_ERROR:
|
||||
time_ns = _time_ns
|
||||
else:
|
||||
def time_ns():
|
||||
# Support versions older than 3.7 with opentelemetry-api > 1.12
|
||||
return int(time.time() * 1e9)
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
@@ -128,12 +158,10 @@ class TaskData:
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
if sys.version_info >= (3, 7):
|
||||
self.start = time.time_ns()
|
||||
else:
|
||||
self.start = _time_ns()
|
||||
self.start = time_ns()
|
||||
self.action = action
|
||||
self.args = args
|
||||
self.dump = None
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
@@ -156,10 +184,7 @@ class HostData:
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
if sys.version_info >= (3, 7):
|
||||
self.finish = time.time_ns()
|
||||
else:
|
||||
self.finish = _time_ns()
|
||||
self.finish = time_ns()
|
||||
|
||||
|
||||
class OpenTelemetrySource(object):
|
||||
@@ -199,7 +224,7 @@ class OpenTelemetrySource(object):
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
def finish_task(self, tasks_data, status, result, dump):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
@@ -216,9 +241,10 @@ class OpenTelemetrySource(object):
|
||||
if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.dump = dump
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
|
||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
@@ -254,9 +280,9 @@ class OpenTelemetrySource(object):
|
||||
for task in tasks:
|
||||
for host_uuid, host_data in task.host_data.items():
|
||||
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
|
||||
self.update_span_data(task, host_data, span)
|
||||
self.update_span_data(task, host_data, span, disable_logs)
|
||||
|
||||
def update_span_data(self, task_data, host_data, span):
|
||||
def update_span_data(self, task_data, host_data, span, disable_logs):
|
||||
""" update the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
@@ -302,6 +328,9 @@ class OpenTelemetrySource(object):
|
||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||
# This will allow to enrich the service map
|
||||
self.add_attributes_for_service_map_if_possible(span, task_data)
|
||||
# Send logs
|
||||
if not disable_logs:
|
||||
span.add_event(task_data.dump)
|
||||
span.end(end_time=host_data.finish)
|
||||
|
||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||
@@ -405,6 +434,7 @@ class CallbackModule(CallbackBase):
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.disable_logs = None
|
||||
self.otel_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.play_name = None
|
||||
@@ -435,6 +465,8 @@ class CallbackModule(CallbackBase):
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.disable_logs = self.get_option('disable_logs')
|
||||
|
||||
self.otel_service_name = self.get_option('otel_service_name')
|
||||
|
||||
if not self.otel_service_name:
|
||||
@@ -491,28 +523,32 @@ class CallbackModule(CallbackBase):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
status,
|
||||
result
|
||||
result,
|
||||
self._dump_results(result._result)
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
result,
|
||||
self._dump_results(result._result)
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
result,
|
||||
self._dump_results(result._result)
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
included_file,
|
||||
""
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
@@ -525,7 +561,8 @@ class CallbackModule(CallbackBase):
|
||||
self.ansible_playbook,
|
||||
self.tasks_data,
|
||||
status,
|
||||
self.traceparent
|
||||
self.traceparent,
|
||||
self.disable_logs
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
|
||||
@@ -63,7 +63,7 @@ class CallbackModule(CallbackModule_default):
|
||||
|
||||
def _preprocess_result(self, result):
|
||||
self.delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
|
||||
self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def _process_result_output(self, result, msg):
|
||||
@@ -109,7 +109,7 @@ class CallbackModule(CallbackModule_default):
|
||||
self._display.display(msg)
|
||||
|
||||
def v2_runner_on_skipped(self, result, ignore_errors=False):
|
||||
if self.display_skipped_hosts:
|
||||
if self.get_option('display_skipped_hosts'):
|
||||
self._preprocess_result(result)
|
||||
display_color = C.COLOR_SKIP
|
||||
msg = "skipped"
|
||||
@@ -128,7 +128,7 @@ class CallbackModule(CallbackModule_default):
|
||||
msg += " | item: %s" % (item_value,)
|
||||
|
||||
task_result = self._process_result_output(result, msg)
|
||||
self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
|
||||
self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
|
||||
|
||||
def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
|
||||
self._preprocess_result(result)
|
||||
@@ -142,7 +142,7 @@ class CallbackModule(CallbackModule_default):
|
||||
display_color = C.COLOR_CHANGED
|
||||
task_result = self._process_result_output(result, msg)
|
||||
self._display.display(" " + task_result, display_color)
|
||||
elif self.display_ok_hosts:
|
||||
elif self.get_option('display_ok_hosts'):
|
||||
task_result = self._process_result_output(result, msg)
|
||||
self._display.display(" " + task_result, display_color)
|
||||
|
||||
@@ -162,7 +162,7 @@ class CallbackModule(CallbackModule_default):
|
||||
display_color = C.COLOR_UNREACHABLE
|
||||
task_result = self._process_result_output(result, msg)
|
||||
|
||||
self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
|
||||
self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if result._task.loop and 'results' in result._result:
|
||||
@@ -205,7 +205,7 @@ class CallbackModule(CallbackModule_default):
|
||||
colorize(u'ignored', t['ignored'], None)),
|
||||
log_only=True
|
||||
)
|
||||
if stats.custom and self.show_custom_stats:
|
||||
if stats.custom and self.get_option('show_custom_stats'):
|
||||
self._display.banner("CUSTOM STATS: ")
|
||||
# per host
|
||||
# TODO: come up with 'pretty format'
|
||||
|
||||
@@ -22,6 +22,7 @@ DOCUMENTATION = '''
|
||||
- The path of the chroot you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
executable:
|
||||
description:
|
||||
|
||||
@@ -43,6 +43,7 @@ options:
|
||||
- The path on which InfluxDB server is accessible
|
||||
- Only available when using python-influxdb >= 5.1.0
|
||||
type: str
|
||||
default: ''
|
||||
version_added: '0.2.0'
|
||||
validate_certs:
|
||||
description:
|
||||
@@ -80,4 +81,5 @@ options:
|
||||
description:
|
||||
- HTTP(S) proxy to use for Requests to connect to InfluxDB server.
|
||||
type: dict
|
||||
default: {}
|
||||
'''
|
||||
|
||||
@@ -23,6 +23,7 @@ options:
|
||||
description:
|
||||
- The password to use with I(bind_dn).
|
||||
type: str
|
||||
default: ''
|
||||
dn:
|
||||
required: true
|
||||
description:
|
||||
|
||||
33
plugins/doc_fragments/scaleway_waitable_resource.py
Normal file
33
plugins/doc_fragments/scaleway_waitable_resource.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
wait:
|
||||
description:
|
||||
- Wait for the resource to reach its desired state before returning.
|
||||
type: bool
|
||||
default: true
|
||||
wait_timeout:
|
||||
type: int
|
||||
description:
|
||||
- Time to wait for the resource to reach the expected state.
|
||||
required: false
|
||||
default: 300
|
||||
wait_sleep_time:
|
||||
type: int
|
||||
description:
|
||||
- Time to wait before every attempt to check the state of the resource.
|
||||
required: false
|
||||
default: 3
|
||||
'''
|
||||
@@ -17,6 +17,7 @@ options:
|
||||
- Is needed for some modules
|
||||
type: dict
|
||||
required: false
|
||||
default: {}
|
||||
utm_host:
|
||||
description:
|
||||
- The REST Endpoint of the Sophos UTM.
|
||||
|
||||
@@ -26,6 +26,7 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- The correct parser for the input data.
|
||||
- For example C(ifconfig).
|
||||
- "Note: use underscores instead of dashes (if any) in the parser module name."
|
||||
- See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
|
||||
type: string
|
||||
required: true
|
||||
@@ -38,10 +39,16 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: false
|
||||
requirements:
|
||||
- jc (https://github.com/kellyjonbrazil/jc)
|
||||
- jc installed as a Python library (U(https://pypi.org/project/jc/))
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller
|
||||
delegate_to: localhost
|
||||
ansible.builtin.pip:
|
||||
name: jc
|
||||
state: present
|
||||
|
||||
- name: Run command
|
||||
ansible.builtin.command: uname -a
|
||||
register: result
|
||||
@@ -94,15 +101,19 @@ def jc(data, parser, quiet=True, raw=False):
|
||||
dictionary or list of dictionaries
|
||||
|
||||
Example:
|
||||
|
||||
- name: run date command
|
||||
hosts: ubuntu
|
||||
tasks:
|
||||
- shell: date
|
||||
- name: install the prereqs of the jc filter (jc Python package) on the Ansible controller
|
||||
delegate_to: localhost
|
||||
ansible.builtin.pip:
|
||||
name: jc
|
||||
state: present
|
||||
- ansible.builtin.shell: date
|
||||
register: result
|
||||
- set_fact:
|
||||
- ansible.builtin.set_fact:
|
||||
myvar: "{{ result.stdout | community.general.jc('date') }}"
|
||||
- debug:
|
||||
- ansible.builtin.debug:
|
||||
msg: "{{ myvar }}"
|
||||
|
||||
produces:
|
||||
@@ -124,7 +135,7 @@ def jc(data, parser, quiet=True, raw=False):
|
||||
"""
|
||||
|
||||
if not HAS_LIB:
|
||||
raise AnsibleError('You need to install "jc" prior to running jc filter')
|
||||
raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
|
||||
|
||||
try:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
|
||||
@@ -410,7 +410,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
stripped_value = value.strip()
|
||||
if stripped_value:
|
||||
parsed_key = key + "_parsed"
|
||||
properties[parsed_key] = [tag.strip() for tag in stripped_value.split(",")]
|
||||
properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")]
|
||||
|
||||
# The first field in the agent string tells you whether the agent is enabled
|
||||
# the rest of the comma separated string is extra config for the agent.
|
||||
|
||||
@@ -186,10 +186,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
else:
|
||||
# found vars, accumulate in hostvars for clean inventory set
|
||||
pref_k = 'vbox_' + k.strip().replace(' ', '_')
|
||||
if k.startswith(' '):
|
||||
if prevkey not in hostvars[current_host]:
|
||||
leading_spaces = len(k) - len(k.lstrip(' '))
|
||||
if 0 < leading_spaces <= 2:
|
||||
if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
|
||||
hostvars[current_host][prevkey] = {}
|
||||
hostvars[current_host][prevkey][pref_k] = v
|
||||
elif leading_spaces > 2:
|
||||
continue
|
||||
else:
|
||||
if v != '':
|
||||
hostvars[current_host][pref_k] = v
|
||||
|
||||
@@ -22,6 +22,11 @@ DOCUMENTATION = """
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
search:
|
||||
description: Field to retrieve, for example C(name) or C(id).
|
||||
type: str
|
||||
default: name
|
||||
version_added: 5.7.0
|
||||
field:
|
||||
description: Field to fetch; leave unset to fetch whole response.
|
||||
type: str
|
||||
@@ -33,6 +38,11 @@ EXAMPLES = """
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
|
||||
|
||||
- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
|
||||
|
||||
- name: "Get full Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
@@ -81,7 +91,7 @@ class Bitwarden(object):
|
||||
raise BitwardenException(err)
|
||||
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
|
||||
|
||||
def _get_matches(self, search_value, search_field="name"):
|
||||
def _get_matches(self, search_value, search_field):
|
||||
"""Return matching records whose search_field is equal to key.
|
||||
"""
|
||||
out, err = self._run(['list', 'items', '--search', search_value])
|
||||
@@ -97,7 +107,7 @@ class Bitwarden(object):
|
||||
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value)
|
||||
matches = self._get_matches(search_value, search_field)
|
||||
|
||||
if field:
|
||||
return [match['login'][field] for match in matches]
|
||||
@@ -110,10 +120,11 @@ class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
field = self.get_option('field')
|
||||
search_field = self.get_option('search')
|
||||
if not _bitwarden.logged_in:
|
||||
raise AnsibleError("Not logged into Bitwarden. Run 'bw login'.")
|
||||
|
||||
return [_bitwarden.get_field(field, term) for term in terms]
|
||||
return [_bitwarden.get_field(field, term, search_field) for term in terms]
|
||||
|
||||
|
||||
_bitwarden = Bitwarden()
|
||||
|
||||
@@ -125,8 +125,16 @@ from ansible.errors import AnsibleLookupError
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.release import __version__ as ansible_version
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option.
|
||||
# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419)
|
||||
_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0')
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __evaluate(self, expression, templar, variables):
|
||||
@@ -136,7 +144,10 @@ class LookupModule(LookupBase):
|
||||
``variables`` are the variables to use.
|
||||
"""
|
||||
templar.available_variables = variables or {}
|
||||
return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False)
|
||||
expression = "{0}{1}{2}".format("{{", expression, "}}")
|
||||
if _TEMPLAR_HAS_TEMPLATE_CACHE:
|
||||
return templar.template(expression, cache=False)
|
||||
return templar.template(expression)
|
||||
|
||||
def __process(self, result, terms, index, current, templar, variables):
|
||||
"""Fills ``result`` list with evaluated items.
|
||||
|
||||
@@ -268,7 +268,7 @@ class LookupModule(LookupBase):
|
||||
)
|
||||
self.realpass = 'pass: the standard unix password manager' in passoutput
|
||||
except (subprocess.CalledProcessError) as e:
|
||||
raise AnsibleError(e)
|
||||
raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
|
||||
|
||||
return self.realpass
|
||||
|
||||
@@ -354,7 +354,7 @@ class LookupModule(LookupBase):
|
||||
except (subprocess.CalledProcessError) as e:
|
||||
# 'not in password store' is the expected error if a password wasn't found
|
||||
if 'not in the password store' not in e.output:
|
||||
raise AnsibleError(e)
|
||||
raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
|
||||
|
||||
if self.paramvals['missing'] == 'error':
|
||||
raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname))
|
||||
@@ -387,7 +387,7 @@ class LookupModule(LookupBase):
|
||||
try:
|
||||
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
|
||||
except (subprocess.CalledProcessError) as e:
|
||||
raise AnsibleError(e)
|
||||
raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
|
||||
return newpass
|
||||
|
||||
def generate_password(self):
|
||||
@@ -399,7 +399,7 @@ class LookupModule(LookupBase):
|
||||
try:
|
||||
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
|
||||
except (subprocess.CalledProcessError) as e:
|
||||
raise AnsibleError(e)
|
||||
raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
|
||||
return newpass
|
||||
|
||||
def get_passresult(self):
|
||||
|
||||
@@ -88,9 +88,10 @@ class FormatError(CmdRunnerException):
|
||||
|
||||
|
||||
class _ArgFormat(object):
|
||||
def __init__(self, func, ignore_none=None):
|
||||
def __init__(self, func, ignore_none=None, ignore_missing_value=False):
|
||||
self.func = func
|
||||
self.ignore_none = ignore_none
|
||||
self.ignore_missing_value = ignore_missing_value
|
||||
|
||||
def __call__(self, value, ctx_ignore_none):
|
||||
ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none
|
||||
@@ -127,7 +128,7 @@ class _Format(object):
|
||||
|
||||
@staticmethod
|
||||
def as_fixed(args):
|
||||
return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False)
|
||||
return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True)
|
||||
|
||||
@staticmethod
|
||||
def as_func(func, ignore_none=None):
|
||||
@@ -135,14 +136,15 @@ class _Format(object):
|
||||
|
||||
@staticmethod
|
||||
def as_map(_map, default=None, ignore_none=None):
|
||||
if default is None:
|
||||
default = []
|
||||
return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none)
|
||||
|
||||
@staticmethod
|
||||
def as_default_type(_type, arg="", ignore_none=None):
|
||||
fmt = _Format
|
||||
if _type == "dict":
|
||||
return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)],
|
||||
ignore_none=ignore_none)
|
||||
return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none)
|
||||
if _type == "list":
|
||||
return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none)
|
||||
if _type == "bool":
|
||||
@@ -261,10 +263,13 @@ class _CmdRunnerContext(object):
|
||||
for arg_name in self.args_order:
|
||||
value = None
|
||||
try:
|
||||
value = named_args[arg_name]
|
||||
if arg_name in named_args:
|
||||
value = named_args[arg_name]
|
||||
elif not runner.arg_formats[arg_name].ignore_missing_value:
|
||||
raise MissingArgumentValue(self.args_order, arg_name)
|
||||
self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none))
|
||||
except KeyError:
|
||||
raise MissingArgumentValue(self.args_order, arg_name)
|
||||
except MissingArgumentValue:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise FormatError(arg_name, value, runner.arg_formats[arg_name], e)
|
||||
|
||||
|
||||
@@ -110,3 +110,14 @@ def gitlab_authentication(module):
|
||||
GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
|
||||
|
||||
return gitlab_instance
|
||||
|
||||
|
||||
def filter_returned_variables(gitlab_variables):
|
||||
# pop properties we don't know
|
||||
existing_variables = [dict(x.attributes) for x in gitlab_variables]
|
||||
KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope']
|
||||
for item in existing_variables:
|
||||
for key in list(item.keys()):
|
||||
if key not in KNOWN:
|
||||
item.pop(key)
|
||||
return existing_variables
|
||||
|
||||
@@ -29,8 +29,15 @@ URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{nam
|
||||
|
||||
URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
|
||||
URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}"
|
||||
URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm"
|
||||
URL_REALM_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/available"
|
||||
URL_REALM_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/composite"
|
||||
URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites"
|
||||
|
||||
URL_ROLES_BY_ID = "{url}/admin/realms/{realm}/roles-by-id/{id}"
|
||||
URL_ROLES_BY_ID_COMPOSITES_CLIENTS = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites/clients/{cid}"
|
||||
URL_ROLES_BY_ID_COMPOSITES = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites"
|
||||
|
||||
URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
|
||||
URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
|
||||
URL_GROUPS = "{url}/admin/realms/{realm}/groups"
|
||||
@@ -41,9 +48,15 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
|
||||
URL_USERS = "{url}/admin/realms/{realm}/users"
|
||||
URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite"
|
||||
|
||||
URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
|
||||
URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}"
|
||||
@@ -446,10 +459,9 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s"
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_client_role_by_name(self, gid, cid, name, realm="master"):
|
||||
def get_client_role_id_by_name(self, cid, name, realm="master"):
|
||||
""" Get the role ID of a client.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param name: Name of the role.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
@@ -461,7 +473,7 @@ class KeycloakAPI(object):
|
||||
return role['id']
|
||||
return None
|
||||
|
||||
def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'):
|
||||
def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'):
|
||||
""" Obtain client representation by id
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -470,7 +482,7 @@ class KeycloakAPI(object):
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
@@ -483,7 +495,7 @@ class KeycloakAPI(object):
|
||||
% (cid, gid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_client_available_rolemappings(self, gid, cid, realm="master"):
|
||||
def get_client_group_available_rolemappings(self, gid, cid, realm="master"):
|
||||
""" Fetch the available role of a client in a specified goup on the Keycloak server.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -491,7 +503,7 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
@@ -500,7 +512,7 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_client_composite_rolemappings(self, gid, cid, realm="master"):
|
||||
def get_client_group_composite_rolemappings(self, gid, cid, realm="master"):
|
||||
""" Fetch the composite role of a client in a specified group on the Keycloak server.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -508,15 +520,64 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_role_by_id(self, rid, realm="master"):
|
||||
""" Fetch a role by its id on the Keycloak server.
|
||||
|
||||
:param rid: ID of the role.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The role.
|
||||
"""
|
||||
client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch role for id %s in realm %s: %s"
|
||||
% (rid, realm, str(e)))
|
||||
|
||||
def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"):
|
||||
""" Fetch a role by its id on the Keycloak server.
|
||||
|
||||
:param rid: ID of the composite role.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The role.
|
||||
"""
|
||||
client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch role for id %s and cid %s in realm %s: %s"
|
||||
% (rid, cid, realm, str(e)))
|
||||
|
||||
def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"):
|
||||
""" Assign roles to composite role
|
||||
|
||||
:param rid: ID of the composite role.
|
||||
:param roles_rep: Representation of the roles to assign.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not assign roles to composite role %s and realm %s: %s"
|
||||
% (rid, realm, str(e)))
|
||||
|
||||
def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
|
||||
""" Fetch the composite role of a client in a specified goup on the Keycloak server.
|
||||
|
||||
@@ -526,7 +587,7 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
@@ -543,14 +604,214 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders,
|
||||
open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'):
|
||||
""" Obtain client representation by id
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param rid: ID of the role.
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for role in rolemappings:
|
||||
if rid == role['id']:
|
||||
return role
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
|
||||
% (cid, uid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_client_user_available_rolemappings(self, uid, cid, realm="master"):
|
||||
""" Fetch the available role of a client for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The effective rollemappings of specified client and user of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
|
||||
% (cid, uid, realm, str(e)))
|
||||
|
||||
def get_client_user_composite_rolemappings(self, uid, cid, realm="master"):
|
||||
""" Fetch the composite role of a client for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'):
|
||||
""" Obtain role representation by id
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param rid: ID of the role.
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for role in rolemappings:
|
||||
if rid == role['id']:
|
||||
return role
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for user %s, realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_realm_user_available_rolemappings(self, uid, realm="master"):
|
||||
""" Fetch the available role of a realm for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_realm_user_composite_rolemappings(self, uid, realm="master"):
|
||||
""" Fetch the composite role of a realm for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The effective rollemappings of specified client and user of the realm (default "master").
|
||||
"""
|
||||
composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_user_by_username(self, username, realm="master"):
|
||||
""" Fetch a keycloak user within a realm based on its username.
|
||||
|
||||
If the user does not exist, None is returned.
|
||||
:param username: Username of the user to fetch.
|
||||
:param realm: Realm in which the user resides; default 'master'
|
||||
"""
|
||||
users_url = URL_USERS.format(url=self.baseurl, realm=realm)
|
||||
users_url += '?username=%s&exact=true' % username
|
||||
try:
|
||||
return json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
|
||||
def get_service_account_user_by_client_id(self, client_id, realm="master"):
|
||||
""" Fetch a keycloak service account user within a realm based on its client_id.
|
||||
|
||||
If the user does not exist, None is returned.
|
||||
:param client_id: clientId of the service account user to fetch.
|
||||
:param realm: Realm in which the user resides; default 'master'
|
||||
"""
|
||||
cid = self.get_client_id(client_id, realm=realm)
|
||||
|
||||
service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
|
||||
% (realm, client_id, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
|
||||
% (realm, client_id, str(e)))
|
||||
|
||||
def add_user_rolemapping(self, uid, cid, role_rep, realm="master"):
|
||||
""" Assign a realm or client role to a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user roles are assigned to.
|
||||
:param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm
|
||||
:param role_rep: Representation of the role to assign.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
if cid is None:
|
||||
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not map roles to userId %s for realm %s and roles %s: %s"
|
||||
% (uid, realm, json.dumps(role_rep), str(e)))
|
||||
else:
|
||||
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
|
||||
% (cid, uid, realm, json.dumps(role_rep), str(e)))
|
||||
|
||||
def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"):
|
||||
""" Delete the rolemapping of a client in a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to remove the rolemappings.
|
||||
:param cid: ID of the client from which to remove the rolemappings.
|
||||
:param role_rep: Representation of the role to remove from rolemappings.
|
||||
:param realm: Realm from which to remove the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
if cid is None:
|
||||
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not remove roles %s from userId %s, realm %s: %s"
|
||||
% (json.dumps(role_rep), uid, realm, str(e)))
|
||||
else:
|
||||
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
|
||||
% (json.dumps(role_rep), cid, uid, realm, str(e)))
|
||||
|
||||
def get_client_templates(self, realm='master'):
|
||||
""" Obtains client template representations for client templates in a realm
|
||||
|
||||
@@ -930,7 +1191,6 @@ class KeycloakAPI(object):
|
||||
return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
|
||||
@@ -156,3 +156,315 @@ class ManageIQ(object):
|
||||
msg = "{collection_name} where {params} does not exist in manageiq".format(
|
||||
collection_name=collection_name, params=str(params))
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
def policies(self, resource_id, resource_type, resource_name):
|
||||
manageiq = ManageIQ(self.module)
|
||||
|
||||
# query resource id, fail if resource does not exist
|
||||
if resource_id is None:
|
||||
resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
|
||||
|
||||
return ManageIQPolicies(manageiq, resource_type, resource_id)
|
||||
|
||||
def query_resource_id(self, resource_type, resource_name):
|
||||
""" Query the resource name in ManageIQ.
|
||||
|
||||
Returns:
|
||||
the resource ID if it exists in ManageIQ, Fail otherwise.
|
||||
"""
|
||||
resource = self.find_collection_resource_by(resource_type, name=resource_name)
|
||||
if resource:
|
||||
return resource["id"]
|
||||
else:
|
||||
msg = "{resource_name} {resource_type} does not exist in manageiq".format(
|
||||
resource_name=resource_name, resource_type=resource_type)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
|
||||
class ManageIQPolicies(object):
|
||||
"""
|
||||
Object to execute policies management operations of manageiq resources.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq, resource_type, resource_id):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
self.resource_type = resource_type
|
||||
self.resource_id = resource_id
|
||||
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
|
||||
api_url=self.api_url,
|
||||
resource_type=resource_type,
|
||||
resource_id=resource_id)
|
||||
|
||||
def query_profile_href(self, profile):
|
||||
""" Add or Update the policy_profile href field
|
||||
|
||||
Example:
|
||||
{name: STR, ...} => {name: STR, href: STR}
|
||||
"""
|
||||
resource = self.manageiq.find_collection_resource_or_fail(
|
||||
"policy_profiles", **profile)
|
||||
return dict(name=profile['name'], href=resource['href'])
|
||||
|
||||
def query_resource_profiles(self):
|
||||
""" Returns a set of the profile objects objects assigned to the resource
|
||||
"""
|
||||
url = '{resource_url}/policy_profiles?expand=resources'
|
||||
try:
|
||||
response = self.client.get(url.format(resource_url=self.resource_url))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} policies: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('resources', [])
|
||||
|
||||
# clean the returned rest api profile object to look like:
|
||||
# {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
|
||||
profiles = [self.clean_profile_object(profile) for profile in resources]
|
||||
|
||||
return profiles
|
||||
|
||||
def query_profile_policies(self, profile_id):
|
||||
""" Returns a set of the policy objects assigned to the resource
|
||||
"""
|
||||
url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
|
||||
try:
|
||||
response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} policies: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('policies', [])
|
||||
|
||||
# clean the returned rest api policy object to look like:
|
||||
# {name: STR, description: STR, active: BOOL}
|
||||
policies = [self.clean_policy_object(policy) for policy in resources]
|
||||
|
||||
return policies
|
||||
|
||||
def clean_policy_object(self, policy):
|
||||
""" Clean a policy object to have human readable form of:
|
||||
{
|
||||
name: STR,
|
||||
description: STR,
|
||||
active: BOOL
|
||||
}
|
||||
"""
|
||||
name = policy.get('name')
|
||||
description = policy.get('description')
|
||||
active = policy.get('active')
|
||||
|
||||
return dict(
|
||||
name=name,
|
||||
description=description,
|
||||
active=active)
|
||||
|
||||
def clean_profile_object(self, profile):
|
||||
""" Clean a profile object to have human readable form of:
|
||||
{
|
||||
profile_name: STR,
|
||||
profile_description: STR,
|
||||
policies: ARR<POLICIES>
|
||||
}
|
||||
"""
|
||||
profile_id = profile['id']
|
||||
name = profile.get('name')
|
||||
description = profile.get('description')
|
||||
policies = self.query_profile_policies(profile_id)
|
||||
|
||||
return dict(
|
||||
profile_name=name,
|
||||
profile_description=description,
|
||||
policies=policies)
|
||||
|
||||
def profiles_to_update(self, profiles, action):
|
||||
""" Create a list of policies we need to update in ManageIQ.
|
||||
|
||||
Returns:
|
||||
Whether or not a change took place and a message describing the
|
||||
operation executed.
|
||||
"""
|
||||
profiles_to_post = []
|
||||
assigned_profiles = self.query_resource_profiles()
|
||||
|
||||
# make a list of assigned full profile names strings
|
||||
# e.g. ['openscap profile', ...]
|
||||
assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
|
||||
|
||||
for profile in profiles:
|
||||
assigned = profile.get('name') in assigned_profiles_set
|
||||
|
||||
if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
|
||||
# add/update the policy profile href field
|
||||
# {name: STR, ...} => {name: STR, href: STR}
|
||||
profile = self.query_profile_href(profile)
|
||||
profiles_to_post.append(profile)
|
||||
|
||||
return profiles_to_post
|
||||
|
||||
def assign_or_unassign_profiles(self, profiles, action):
|
||||
""" Perform assign/unassign action
|
||||
"""
|
||||
# get a list of profiles needed to be changed
|
||||
profiles_to_post = self.profiles_to_update(profiles, action)
|
||||
if not profiles_to_post:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="Profiles {profiles} already {action}ed, nothing to do".format(
|
||||
action=action,
|
||||
profiles=profiles))
|
||||
|
||||
# try to assign or unassign profiles to resource
|
||||
url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
|
||||
try:
|
||||
response = self.client.post(url, action=action, resources=profiles_to_post)
|
||||
except Exception as e:
|
||||
msg = "Failed to {action} profile: {error}".format(
|
||||
action=action,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# check all entities in result to be successful
|
||||
for result in response['results']:
|
||||
if not result['success']:
|
||||
msg = "Failed to {action}: {message}".format(
|
||||
action=action,
|
||||
message=result['message'])
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# successfully changed all needed profiles
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="Successfully {action}ed profiles: {profiles}".format(
|
||||
action=action,
|
||||
profiles=profiles))
|
||||
|
||||
|
||||
class ManageIQTags(object):
|
||||
"""
|
||||
Object to execute tags management operations of manageiq resources.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq, resource_type, resource_id):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
self.resource_type = resource_type
|
||||
self.resource_id = resource_id
|
||||
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
|
||||
api_url=self.api_url,
|
||||
resource_type=resource_type,
|
||||
resource_id=resource_id)
|
||||
|
||||
def full_tag_name(self, tag):
|
||||
""" Returns the full tag name in manageiq
|
||||
"""
|
||||
return '/managed/{tag_category}/{tag_name}'.format(
|
||||
tag_category=tag['category'],
|
||||
tag_name=tag['name'])
|
||||
|
||||
def clean_tag_object(self, tag):
|
||||
""" Clean a tag object to have human readable form of:
|
||||
{
|
||||
full_name: STR,
|
||||
name: STR,
|
||||
display_name: STR,
|
||||
category: STR
|
||||
}
|
||||
"""
|
||||
full_name = tag.get('name')
|
||||
categorization = tag.get('categorization', {})
|
||||
|
||||
return dict(
|
||||
full_name=full_name,
|
||||
name=categorization.get('name'),
|
||||
display_name=categorization.get('display_name'),
|
||||
category=categorization.get('category', {}).get('name'))
|
||||
|
||||
def query_resource_tags(self):
|
||||
""" Returns a set of the tag objects assigned to the resource
|
||||
"""
|
||||
url = '{resource_url}/tags?expand=resources&attributes=categorization'
|
||||
try:
|
||||
response = self.client.get(url.format(resource_url=self.resource_url))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} tags: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('resources', [])
|
||||
|
||||
# clean the returned rest api tag object to look like:
|
||||
# {full_name: STR, name: STR, display_name: STR, category: STR}
|
||||
tags = [self.clean_tag_object(tag) for tag in resources]
|
||||
|
||||
return tags
|
||||
|
||||
def tags_to_update(self, tags, action):
|
||||
""" Create a list of tags we need to update in ManageIQ.
|
||||
|
||||
Returns:
|
||||
Whether or not a change took place and a message describing the
|
||||
operation executed.
|
||||
"""
|
||||
tags_to_post = []
|
||||
assigned_tags = self.query_resource_tags()
|
||||
|
||||
# make a list of assigned full tag names strings
|
||||
# e.g. ['/managed/environment/prod', ...]
|
||||
assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
|
||||
|
||||
for tag in tags:
|
||||
assigned = self.full_tag_name(tag) in assigned_tags_set
|
||||
|
||||
if assigned and action == 'unassign':
|
||||
tags_to_post.append(tag)
|
||||
elif (not assigned) and action == 'assign':
|
||||
tags_to_post.append(tag)
|
||||
|
||||
return tags_to_post
|
||||
|
||||
def assign_or_unassign_tags(self, tags, action):
|
||||
""" Perform assign/unassign action
|
||||
"""
|
||||
# get a list of tags needed to be changed
|
||||
tags_to_post = self.tags_to_update(tags, action)
|
||||
if not tags_to_post:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="Tags already {action}ed, nothing to do".format(action=action))
|
||||
|
||||
# try to assign or unassign tags to resource
|
||||
url = '{resource_url}/tags'.format(resource_url=self.resource_url)
|
||||
try:
|
||||
response = self.client.post(url, action=action, resources=tags)
|
||||
except Exception as e:
|
||||
msg = "Failed to {action} tag: {error}".format(
|
||||
action=action,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# check all entities in result to be successful
|
||||
for result in response['results']:
|
||||
if not result['success']:
|
||||
msg = "Failed to {action}: {message}".format(
|
||||
action=action,
|
||||
message=result['message'])
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# successfully changed all needed tags
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="Successfully {action}ed tags".format(action=action))
|
||||
|
||||
@@ -34,6 +34,10 @@ class ArgFormat(object):
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
THIS CLASS IS BEING DEPRECATED.
|
||||
It was never meant to be used outside the scope of CmdMixin, and CmdMixin is being deprecated.
|
||||
See the deprecation notice in ``CmdMixin.__init__()`` below.
|
||||
|
||||
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
|
||||
the CLI command execution.
|
||||
:param name: Name of the argument to be formatted
|
||||
@@ -88,6 +92,9 @@ class ArgFormat(object):
|
||||
|
||||
class CmdMixin(object):
|
||||
"""
|
||||
THIS CLASS IS BEING DEPRECATED.
|
||||
See the deprecation notice in ``CmdMixin.__init__()`` below.
|
||||
|
||||
Mixin for mapping module options to running a CLI command with its arguments.
|
||||
"""
|
||||
command = None
|
||||
@@ -110,6 +117,15 @@ class CmdMixin(object):
|
||||
result[param] = ArgFormat(param, **fmt_spec)
|
||||
return result
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CmdMixin, self).__init__(*args, **kwargs)
|
||||
self.module.deprecate(
|
||||
'The CmdMixin used in classes CmdModuleHelper and CmdStateModuleHelper is being deprecated. '
|
||||
'Modules should use community.general.plugins.module_utils.cmd_runner.CmdRunner instead.',
|
||||
version='8.0.0',
|
||||
collection_name='community.general',
|
||||
)
|
||||
|
||||
def _calculate_args(self, extra_params=None, params=None):
|
||||
def add_arg_formatted_param(_cmd_args, arg_format, _value):
|
||||
args = list(arg_format.to_text(_value))
|
||||
|
||||
@@ -84,8 +84,16 @@ class StateModuleHelper(StateMixin, ModuleHelper):
|
||||
|
||||
|
||||
class CmdModuleHelper(CmdMixin, ModuleHelper):
|
||||
"""
|
||||
THIS CLASS IS BEING DEPRECATED.
|
||||
See the deprecation notice in ``CmdMixin.__init__()``.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
|
||||
"""
|
||||
THIS CLASS IS BEING DEPRECATED.
|
||||
See the deprecation notice in ``CmdMixin.__init__()``.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
|
||||
|
||||
|
||||
|
||||
@@ -137,3 +137,7 @@ class ProxmoxAnsible(object):
|
||||
return None
|
||||
|
||||
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
||||
|
||||
def api_task_ok(self, node, taskid):
|
||||
status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
|
||||
|
||||
@@ -240,6 +240,7 @@ class RedfishUtils(object):
|
||||
return {'ret': False, 'msg': "SessionService resource not found"}
|
||||
else:
|
||||
session_service = data["SessionService"]["@odata.id"]
|
||||
self.session_service_uri = session_service
|
||||
response = self.get_request(self.root_uri + session_service)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
@@ -3081,3 +3082,60 @@ class RedfishUtils(object):
|
||||
|
||||
def get_multi_manager_inventory(self):
|
||||
return self.aggregate_managers(self.get_manager_inventory)
|
||||
|
||||
def set_session_service(self, sessions_config):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + self.session_service_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
current_sessions_config = response['data']
|
||||
payload = {}
|
||||
for property, value in sessions_config.items():
|
||||
value = sessions_config[property]
|
||||
if property not in current_sessions_config:
|
||||
return {'ret': False, 'msg': "Property %s in sessions_config is invalid" % property}
|
||||
if isinstance(value, dict):
|
||||
if isinstance(current_sessions_config[property], dict):
|
||||
payload[property] = value
|
||||
elif isinstance(current_sessions_config[property], list):
|
||||
payload[property] = [value]
|
||||
else:
|
||||
return {'ret': False, 'msg': "Value of property %s in sessions_config is invalid" % property}
|
||||
else:
|
||||
payload[property] = value
|
||||
|
||||
need_change = False
|
||||
for property, set_value in payload.items():
|
||||
cur_value = current_sessions_config[property]
|
||||
if not isinstance(set_value, (dict, list)):
|
||||
if set_value != cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, dict):
|
||||
for subprop in set_value.keys():
|
||||
if subprop not in current_sessions_config[property]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = set_value[subprop]
|
||||
sub_cur_value = current_sessions_config[property][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in set_value[i].keys():
|
||||
if subprop not in current_sessions_config[property][i]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = set_value[i][subprop]
|
||||
sub_cur_value = current_sessions_config[property][i][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if not need_change:
|
||||
return {'ret': True, 'changed': False, 'msg': "SessionService already configured"}
|
||||
|
||||
response = self.patch_request(self.root_uri + self.session_service_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified SessionService"}
|
||||
|
||||
@@ -9,6 +9,8 @@ __metaclass__ = type
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
@@ -26,6 +28,14 @@ def scaleway_argument_spec():
|
||||
)
|
||||
|
||||
|
||||
def scaleway_waitable_resource_argument_spec():
|
||||
return dict(
|
||||
wait=dict(type="bool", default=True),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
wait_sleep_time=dict(type="int", default=3),
|
||||
)
|
||||
|
||||
|
||||
def payload_from_object(scw_object):
|
||||
return dict(
|
||||
(k, v)
|
||||
@@ -63,6 +73,25 @@ def parse_pagination_link(header):
|
||||
return parsed_relations
|
||||
|
||||
|
||||
def filter_sensitive_attributes(container, attributes):
|
||||
for attr in attributes:
|
||||
container[attr] = "SENSITIVE_VALUE"
|
||||
|
||||
return container
|
||||
|
||||
|
||||
def resource_attributes_should_be_changed(target, wished, verifiable_mutable_attributes, mutable_attributes):
|
||||
diff = dict()
|
||||
for attr in verifiable_mutable_attributes:
|
||||
if wished[attr] is not None and target[attr] != wished[attr]:
|
||||
diff[attr] = wished[attr]
|
||||
|
||||
if diff:
|
||||
return dict((attr, wished[attr]) for attr in mutable_attributes)
|
||||
else:
|
||||
return diff
|
||||
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, info):
|
||||
@@ -169,6 +198,78 @@ class Scaleway(object):
|
||||
def warn(self, x):
|
||||
self.module.warn(str(x))
|
||||
|
||||
def fetch_state(self, resource):
|
||||
self.module.debug("fetch_state of resource: %s" % resource["id"])
|
||||
response = self.get(path=self.api_path + "/%s" % resource["id"])
|
||||
|
||||
if response.status_code == 404:
|
||||
return "absent"
|
||||
|
||||
if not response.ok:
|
||||
msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
try:
|
||||
self.module.debug("Resource %s in state: %s" % (resource["id"], response.json["status"]))
|
||||
return response.json["status"]
|
||||
except KeyError:
|
||||
self.module.fail_json(msg="Could not fetch state in %s" % response.json)
|
||||
|
||||
def fetch_paginated_resources(self, resource_key, **pagination_kwargs):
|
||||
response = self.get(
|
||||
path=self.api_path,
|
||||
params=pagination_kwargs)
|
||||
|
||||
status_code = response.status_code
|
||||
if not response.ok:
|
||||
self.module.fail_json(msg='Error getting {0} [{1}: {2}]'.format(
|
||||
resource_key,
|
||||
response.status_code, response.json['message']))
|
||||
|
||||
return response.json[resource_key]
|
||||
|
||||
def fetch_all_resources(self, resource_key, **pagination_kwargs):
|
||||
resources = []
|
||||
|
||||
result = [None]
|
||||
while len(result) != 0:
|
||||
result = self.fetch_paginated_resources(resource_key, **pagination_kwargs)
|
||||
resources += result
|
||||
if 'page' in pagination_kwargs:
|
||||
pagination_kwargs['page'] += 1
|
||||
else:
|
||||
pagination_kwargs['page'] = 2
|
||||
|
||||
return resources
|
||||
|
||||
def wait_to_complete_state_transition(self, resource, stable_states, force_wait=False):
|
||||
wait = self.module.params["wait"]
|
||||
|
||||
if not (wait or force_wait):
|
||||
return
|
||||
|
||||
wait_timeout = self.module.params["wait_timeout"]
|
||||
wait_sleep_time = self.module.params["wait_sleep_time"]
|
||||
|
||||
# Prevent requesting the ressource status too soon
|
||||
time.sleep(wait_sleep_time)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
end = start + datetime.timedelta(seconds=wait_timeout)
|
||||
|
||||
while datetime.datetime.utcnow() < end:
|
||||
self.module.debug("We are going to wait for the resource to finish its transition")
|
||||
|
||||
state = self.fetch_state(resource)
|
||||
if state in stable_states:
|
||||
self.module.debug("It seems that the resource is not in transition anymore.")
|
||||
self.module.debug("load-balancer in state: %s" % self.fetch_state(resource))
|
||||
break
|
||||
|
||||
time.sleep(wait_sleep_time)
|
||||
else:
|
||||
self.module.fail_json(msg="Server takes too long to finish its transition")
|
||||
|
||||
|
||||
SCALEWAY_LOCATION = {
|
||||
'par1': {
|
||||
|
||||
@@ -27,7 +27,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ali_instance
|
||||
short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
|
||||
short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security Group
|
||||
description:
|
||||
- Create, start, stop, restart, modify or terminate ecs instances.
|
||||
- Add or remove ecs instances to/from security group.
|
||||
|
||||
@@ -27,7 +27,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ali_instance_info
|
||||
short_description: Gather information on instances of Alibaba Cloud ECS.
|
||||
short_description: Gather information on instances of Alibaba Cloud ECS
|
||||
description:
|
||||
- This module fetches data from the Open API in Alicloud.
|
||||
The module must be called from within the ECS instance itself.
|
||||
|
||||
@@ -61,6 +61,7 @@ options:
|
||||
- The values specified here will be used at installation time as --set arguments for atomic install.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_aa_policy
|
||||
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
|
||||
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -12,7 +12,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_alert_policy
|
||||
short_description: Create or Delete Alert Policies at CenturyLink Cloud.
|
||||
short_description: Create or Delete Alert Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_blueprint_package
|
||||
short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
|
||||
short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -12,7 +12,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_loadbalancer
|
||||
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
|
||||
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_modify_server
|
||||
short_description: modify servers in CenturyLink Cloud.
|
||||
short_description: Modify servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to modify servers in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_publicip
|
||||
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
|
||||
short_description: Add and Delete public ips on servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_server
|
||||
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
|
||||
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_server_snapshot
|
||||
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
|
||||
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
|
||||
options:
|
||||
|
||||
@@ -13,7 +13,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dimensiondata_vlan
|
||||
short_description: Manage a VLAN in a Cloud Control network domain.
|
||||
short_description: Manage a VLAN in a Cloud Control network domain
|
||||
extends_documentation_fragment:
|
||||
- community.general.dimensiondata
|
||||
- community.general.dimensiondata_wait
|
||||
@@ -31,6 +31,7 @@ options:
|
||||
description:
|
||||
- A description of the VLAN.
|
||||
type: str
|
||||
default: ''
|
||||
network_domain:
|
||||
description:
|
||||
- The Id or name of the target network domain.
|
||||
@@ -40,11 +41,13 @@ options:
|
||||
description:
|
||||
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
|
||||
type: str
|
||||
default: ''
|
||||
private_ipv4_prefix_size:
|
||||
description:
|
||||
- The size of the IPv4 address space, e.g 24.
|
||||
- Required, if C(private_ipv4_base_address) is specified.
|
||||
type: int
|
||||
default: 0
|
||||
state:
|
||||
description:
|
||||
- The desired state for the target VLAN.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: heroku_collaborator
|
||||
short_description: "Add or delete app collaborators on Heroku"
|
||||
short_description: Add or delete app collaborators on Heroku
|
||||
description:
|
||||
- Manages collaborators for Heroku apps.
|
||||
- If set to C(present) and heroku user is already collaborator, then do nothing.
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -34,6 +34,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
description:
|
||||
- Add the instance to a Display Group in Linode Manager.
|
||||
type: str
|
||||
default: ''
|
||||
linode_id:
|
||||
description:
|
||||
- Unique ID of a linode server. This value is read-only in the sense that
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: linode_v4
|
||||
short_description: Manage instances on the Linode cloud.
|
||||
short_description: Manage instances on the Linode cloud
|
||||
description: Manage instances on the Linode cloud.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
|
||||
@@ -85,7 +85,7 @@ options:
|
||||
type: str
|
||||
lxc_path:
|
||||
description:
|
||||
- Place container under PATH.
|
||||
- Place container under C(PATH).
|
||||
type: path
|
||||
container_log:
|
||||
description:
|
||||
@@ -104,7 +104,7 @@ options:
|
||||
- debug
|
||||
- DEBUG
|
||||
description:
|
||||
- Set the log level for a container where *container_log* was set.
|
||||
- Set the log level for a container where I(container_log) was set.
|
||||
type: str
|
||||
required: false
|
||||
default: INFO
|
||||
@@ -164,30 +164,30 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
requirements:
|
||||
- 'lxc >= 1.0 # OS package'
|
||||
- 'python >= 2.6 # OS Package'
|
||||
- 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
|
||||
- 'lxc >= 2.0 # OS package'
|
||||
- 'python3 >= 3.5 # OS Package'
|
||||
- 'python3-lxc # OS Package'
|
||||
notes:
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
with a name that already exists in the users namespace the module will
|
||||
simply return as "unchanged".
|
||||
- The "container_command" can be used with any state except "absent". If
|
||||
used with state "stopped" the container will be "started", the command
|
||||
executed, and then the container "stopped" again. Likewise if the state
|
||||
is "stopped" and the container does not exist it will be first created,
|
||||
"started", the command executed, and then "stopped". If you use a "|"
|
||||
- The I(container_command) can be used with any state except C(absent). If
|
||||
used with state C(stopped) the container will be C(started), the command
|
||||
executed, and then the container C(stopped) again. Likewise if I(state=stopped)
|
||||
and the container does not exist it will be first created,
|
||||
C(started), the command executed, and then C(stopped). If you use a "|"
|
||||
in the variable you can use common script formatting within the variable
|
||||
itself The "container_command" option will always execute as BASH.
|
||||
When using "container_command" a log file is created in the /tmp/ directory
|
||||
which contains both stdout and stderr of any command executed.
|
||||
- If "archive" is **true** the system will attempt to create a compressed
|
||||
tarball of the running container. The "archive" option supports LVM backed
|
||||
itself. The I(container_command) option will always execute as BASH.
|
||||
When using I(container_command), a log file is created in the C(/tmp/) directory
|
||||
which contains both C(stdout) and C(stderr) of any command executed.
|
||||
- If I(archive=true) the system will attempt to create a compressed
|
||||
tarball of the running container. The I(archive) option supports LVM backed
|
||||
containers and will create a snapshot of the running container when
|
||||
creating the archive.
|
||||
- If your distro does not have a package for "python2-lxc", which is a
|
||||
- If your distro does not have a package for C(python3-lxc), which is a
|
||||
requirement for this module, it can be installed from source at
|
||||
"https://github.com/lxc/python2-lxc" or installed via pip using the package
|
||||
name lxc-python2.
|
||||
U(https://github.com/lxc/python3-lxc) or installed via pip using the
|
||||
package name C(lxc).
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
@@ -433,8 +433,7 @@ else:
|
||||
HAS_LXC = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
from ansible.module_utils.six.moves import xrange
|
||||
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_FALSE
|
||||
from ansible.module_utils.common.text.converters import to_text, to_bytes
|
||||
|
||||
|
||||
@@ -559,7 +558,7 @@ popd
|
||||
def create_script(command):
|
||||
"""Write out a script onto a target.
|
||||
|
||||
This method should be backward compatible with Python 2.4+ when executing
|
||||
This method should be backward compatible with Python when executing
|
||||
from within the container.
|
||||
|
||||
:param command: command to run, this can be a script and can use spacing
|
||||
@@ -608,10 +607,10 @@ class LxcContainerManagement(object):
|
||||
:type module: ``object``
|
||||
"""
|
||||
self.module = module
|
||||
self.state = self.module.params.get('state', None)
|
||||
self.state = self.module.params['state']
|
||||
self.state_change = False
|
||||
self.lxc_vg = None
|
||||
self.lxc_path = self.module.params.get('lxc_path', None)
|
||||
self.lxc_path = self.module.params['lxc_path']
|
||||
self.container_name = self.module.params['name']
|
||||
self.container = self.get_container_bind()
|
||||
self.archive_info = None
|
||||
@@ -644,10 +643,7 @@ class LxcContainerManagement(object):
|
||||
:returns: True or False if the container is found.
|
||||
:rtype: ``bol``
|
||||
"""
|
||||
if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return any(c == container_name for c in lxc.list_containers(config_path=lxc_path))
|
||||
|
||||
@staticmethod
|
||||
def _add_variables(variables_dict, build_command):
|
||||
@@ -679,13 +675,13 @@ class LxcContainerManagement(object):
|
||||
for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
|
||||
variables.pop(v, None)
|
||||
|
||||
return_dict = dict()
|
||||
false_values = BOOLEANS_FALSE.union([None, ''])
|
||||
for k, v in variables.items():
|
||||
_var = self.module.params.get(k)
|
||||
if _var not in false_values:
|
||||
return_dict[v] = _var
|
||||
return return_dict
|
||||
result = dict(
|
||||
(v, self.module.params[k])
|
||||
for k, v in variables.items()
|
||||
if self.module.params[k] not in false_values
|
||||
)
|
||||
return result
|
||||
|
||||
def _config(self):
|
||||
"""Configure an LXC container.
|
||||
@@ -695,7 +691,7 @@ class LxcContainerManagement(object):
|
||||
restart the container upon completion.
|
||||
"""
|
||||
|
||||
_container_config = self.module.params.get('container_config')
|
||||
_container_config = self.module.params['container_config']
|
||||
if not _container_config:
|
||||
return False
|
||||
|
||||
@@ -785,12 +781,12 @@ class LxcContainerManagement(object):
|
||||
)
|
||||
|
||||
# Load logging for the instance when creating it.
|
||||
if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
|
||||
if self.module.params['clone_snapshot']:
|
||||
build_command.append('--snapshot')
|
||||
# Check for backing_store == overlayfs if so force the use of snapshot
|
||||
# If overlay fs is used and snapshot is unset the clone command will
|
||||
# fail with an unsupported type.
|
||||
elif self.module.params.get('backing_store') == 'overlayfs':
|
||||
elif self.module.params['backing_store'] == 'overlayfs':
|
||||
build_command.append('--snapshot')
|
||||
|
||||
rc, return_data, err = self.module.run_command(build_command)
|
||||
@@ -838,7 +834,7 @@ class LxcContainerManagement(object):
|
||||
)
|
||||
|
||||
# Load logging for the instance when creating it.
|
||||
if self.module.params.get('container_log') in BOOLEANS_TRUE:
|
||||
if self.module.params['container_log']:
|
||||
# Set the logging path to the /var/log/lxc if uid is root. else
|
||||
# set it to the home folder of the user executing.
|
||||
try:
|
||||
@@ -863,7 +859,7 @@ class LxcContainerManagement(object):
|
||||
])
|
||||
|
||||
# Add the template commands to the end of the command if there are any
|
||||
template_options = self.module.params.get('template_options', None)
|
||||
template_options = self.module.params['template_options']
|
||||
if template_options:
|
||||
build_command.append('--')
|
||||
build_command += shlex.split(template_options)
|
||||
@@ -920,7 +916,7 @@ class LxcContainerManagement(object):
|
||||
def _execute_command(self):
|
||||
"""Execute a shell command."""
|
||||
|
||||
container_command = self.module.params.get('container_command')
|
||||
container_command = self.module.params['container_command']
|
||||
if container_command:
|
||||
container_state = self._get_state()
|
||||
if container_state == 'frozen':
|
||||
@@ -939,18 +935,17 @@ class LxcContainerManagement(object):
|
||||
"""
|
||||
|
||||
self.container = self.get_container_bind()
|
||||
for dummy in xrange(timeout):
|
||||
if self._get_state() != 'running':
|
||||
self.container.start()
|
||||
self.state_change = True
|
||||
# post startup sleep for 1 second.
|
||||
time.sleep(1)
|
||||
else:
|
||||
for dummy in range(timeout):
|
||||
if self._get_state() == 'running':
|
||||
return True
|
||||
|
||||
self.container.start()
|
||||
self.state_change = True
|
||||
# post startup sleep for 1 second.
|
||||
time.sleep(1)
|
||||
self.failure(
|
||||
lxc_container=self._container_data(),
|
||||
error='Failed to start container'
|
||||
' [ %s ]' % self.container_name,
|
||||
error='Failed to start container [ %s ]' % self.container_name,
|
||||
rc=1,
|
||||
msg='The container [ %s ] failed to start. Check to lxc is'
|
||||
' available and that the container is in a functional'
|
||||
@@ -963,7 +958,7 @@ class LxcContainerManagement(object):
|
||||
This will store archive_info in as self.archive_info
|
||||
"""
|
||||
|
||||
if self.module.params.get('archive') in BOOLEANS_TRUE:
|
||||
if self.module.params['archive']:
|
||||
self.archive_info = {
|
||||
'archive': self._container_create_tar()
|
||||
}
|
||||
@@ -974,7 +969,7 @@ class LxcContainerManagement(object):
|
||||
This will store archive_info in as self.archive_info
|
||||
"""
|
||||
|
||||
clone_name = self.module.params.get('clone_name')
|
||||
clone_name = self.module.params['clone_name']
|
||||
if clone_name:
|
||||
if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
|
||||
self.clone_info = {
|
||||
@@ -992,7 +987,7 @@ class LxcContainerManagement(object):
|
||||
:type timeout: ``int``
|
||||
"""
|
||||
|
||||
for dummy in xrange(timeout):
|
||||
for dummy in range(timeout):
|
||||
if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
|
||||
break
|
||||
|
||||
@@ -1340,11 +1335,11 @@ class LxcContainerManagement(object):
|
||||
|
||||
old_umask = os.umask(int('0077', 8))
|
||||
|
||||
archive_path = self.module.params.get('archive_path')
|
||||
archive_path = self.module.params['archive_path']
|
||||
if not os.path.isdir(archive_path):
|
||||
os.makedirs(archive_path)
|
||||
|
||||
archive_compression = self.module.params.get('archive_compression')
|
||||
archive_compression = self.module.params['archive_compression']
|
||||
compression_type = LXC_COMPRESSION_MAP[archive_compression]
|
||||
|
||||
# remove trailing / if present.
|
||||
@@ -1358,9 +1353,7 @@ class LxcContainerManagement(object):
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('tar', True),
|
||||
'--directory=%s' % os.path.realpath(
|
||||
os.path.expanduser(source_dir)
|
||||
),
|
||||
'--directory=%s' % os.path.realpath(source_dir),
|
||||
compression_type['argument'],
|
||||
archive_name,
|
||||
'.'
|
||||
@@ -1703,7 +1696,6 @@ def main():
|
||||
),
|
||||
clone_name=dict(
|
||||
type='str',
|
||||
required=False
|
||||
),
|
||||
clone_snapshot=dict(
|
||||
type='bool',
|
||||
@@ -1732,9 +1724,8 @@ def main():
|
||||
msg='The `lxc` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
lv_name = module.params.get('lv_name')
|
||||
if not lv_name:
|
||||
module.params['lv_name'] = module.params.get('name')
|
||||
if not module.params['lv_name']:
|
||||
module.params['lv_name'] = module.params['name']
|
||||
|
||||
lxc_manage = LxcContainerManagement(module=module)
|
||||
lxc_manage.run()
|
||||
|
||||
@@ -12,7 +12,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: memset_memstore_info
|
||||
author: "Simon Weald (@glitchcrab)"
|
||||
short_description: Retrieve Memstore product usage information.
|
||||
short_description: Retrieve Memstore product usage information
|
||||
notes:
|
||||
- An API key generated via the Memset customer control panel is needed with the
|
||||
following minimum scope - I(memstore.usage).
|
||||
|
||||
@@ -12,7 +12,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: memset_server_info
|
||||
author: "Simon Weald (@glitchcrab)"
|
||||
short_description: Retrieve server information.
|
||||
short_description: Retrieve server information
|
||||
notes:
|
||||
- An API key generated via the Memset customer control panel is needed with the
|
||||
following minimum scope - I(server.info).
|
||||
|
||||
@@ -12,7 +12,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: memset_zone
|
||||
author: "Simon Weald (@glitchcrab)"
|
||||
short_description: Creates and deletes Memset DNS zones.
|
||||
short_description: Creates and deletes Memset DNS zones
|
||||
notes:
|
||||
- Zones can be thought of as a logical group of domains, all of which share the
|
||||
same DNS records (i.e. they point to the same IP). An API key generated via the
|
||||
@@ -44,6 +44,7 @@ options:
|
||||
- The default TTL for all records created in the zone. This must be a
|
||||
valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
|
||||
type: int
|
||||
default: 0
|
||||
choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
|
||||
force:
|
||||
required: false
|
||||
|
||||
@@ -12,7 +12,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: memset_zone_domain
|
||||
author: "Simon Weald (@glitchcrab)"
|
||||
short_description: Create and delete domains in Memset DNS zones.
|
||||
short_description: Create and delete domains in Memset DNS zones
|
||||
notes:
|
||||
- Zone domains can be thought of as a collection of domains, all of which share the
|
||||
same DNS records (i.e. they point to the same IP). An API key generated via the
|
||||
|
||||
@@ -12,7 +12,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: memset_zone_record
|
||||
author: "Simon Weald (@glitchcrab)"
|
||||
short_description: Create and delete records in Memset DNS zones.
|
||||
short_description: Create and delete records in Memset DNS zones
|
||||
notes:
|
||||
- Zones can be thought of as a logical group of domains, all of which share the
|
||||
same DNS records (i.e. they point to the same IP). An API key generated via the
|
||||
@@ -44,11 +44,13 @@ options:
|
||||
description:
|
||||
- C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
|
||||
type: int
|
||||
default: 0
|
||||
record:
|
||||
required: false
|
||||
description:
|
||||
- The subdomain to create.
|
||||
type: str
|
||||
default: ''
|
||||
type:
|
||||
required: true
|
||||
description:
|
||||
@@ -65,6 +67,7 @@ options:
|
||||
description:
|
||||
- The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
|
||||
valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
|
||||
default: 0
|
||||
choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
|
||||
type: int
|
||||
zone:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloud_init_data_facts
|
||||
short_description: Retrieve facts of cloud-init.
|
||||
short_description: Retrieve facts of cloud-init
|
||||
description:
|
||||
- Gathers facts by reading the status.json and result.json of cloud-init.
|
||||
author: René Moser (@resmo)
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox
|
||||
short_description: management of instances in Proxmox VE cluster
|
||||
short_description: Management of instances in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
||||
@@ -482,8 +482,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(node, taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -496,8 +495,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
def start_instance(self, vm, vmid, timeout):
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -513,8 +511,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
else:
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -527,8 +524,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
def umount_instance(self, vm, vmid, timeout):
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -775,8 +771,7 @@ def main():
|
||||
taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
|
||||
|
||||
while timeout:
|
||||
task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'):
|
||||
if proxmox.api_task_ok(vm['node'], taskid):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
|
||||
744
plugins/modules/cloud/misc/proxmox_disk.py
Normal file
744
plugins/modules/cloud/misc/proxmox_disk.py
Normal file
@@ -0,0 +1,744 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2022, Castor Sky (@castorsky) <csky57@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_disk
|
||||
short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster
|
||||
version_added: 5.7.0
|
||||
description:
|
||||
- Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
|
||||
author: "Castor Sky (@castorsky) <csky57@gmail.com>"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The unique name of the VM.
|
||||
- You can specify either I(name) or I(vmid) or both of them.
|
||||
type: str
|
||||
vmid:
|
||||
description:
|
||||
- The unique ID of the VM.
|
||||
- You can specify either I(vmid) or I(name) or both of them.
|
||||
type: int
|
||||
disk:
|
||||
description:
|
||||
- The disk key (C(unused[n]), C(ide[n]), C(sata[n]), C(scsi[n]) or C(virtio[n])) you want to operate on.
|
||||
- Disk buses (IDE, SATA and so on) have fixed ranges of C(n) that accepted by Proxmox API.
|
||||
- >
|
||||
For IDE: 0-3;
|
||||
for SCSI: 0-30;
|
||||
for SATA: 0-5;
|
||||
for VirtIO: 0-15;
|
||||
for Unused: 0-255.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the disk.
|
||||
- >
|
||||
I(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
|
||||
disk or update options in existing one by default. See the I(create) parameter description to control behavior
|
||||
of this option.
|
||||
- Some updates on options (like I(cache)) are not being applied instantly and require VM restart.
|
||||
- >
|
||||
Use I(state=detached) to detach existing disk from VM but do not remove it entirely.
|
||||
When I(state=detached) and disk is C(unused[n]) it will be left in same state (not removed).
|
||||
- >
|
||||
I(state=moved) may be used to change backing storage for the disk in bounds of the same VM
|
||||
or to send the disk to another VM (using the same backing storage).
|
||||
- >
|
||||
I(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
|
||||
because shrinking disks is not supported by the PVE API and has to be done manually.
|
||||
- To entirely remove the disk from backing storage use I(state=absent).
|
||||
type: str
|
||||
choices: ['present', 'resized', 'detached', 'moved', 'absent']
|
||||
default: present
|
||||
create:
|
||||
description:
|
||||
- With I(create) flag you can control behavior of I(state=present).
|
||||
- When I(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
|
||||
- When I(create=regular) it will either create new disk (if not exists) or update options in existing disk.
|
||||
- When I(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
|
||||
type: str
|
||||
choices: ['disabled', 'regular', 'forced']
|
||||
default: regular
|
||||
storage:
|
||||
description:
|
||||
- The drive's backing storage.
|
||||
- Used only when I(state) is C(present).
|
||||
type: str
|
||||
size:
|
||||
description:
|
||||
- Desired volume size in GB to allocate when I(state=present) (specify I(size) without suffix).
|
||||
- >
|
||||
New (or additional) size of volume when I(state=resized). With the C(+) sign
|
||||
the value is added to the actual size of the volume
|
||||
and without it, the value is taken as an absolute one.
|
||||
type: str
|
||||
bwlimit:
|
||||
description:
|
||||
- Override I/O bandwidth limit (in KB/s).
|
||||
- Used only when I(state=moved).
|
||||
type: int
|
||||
delete_moved:
|
||||
description:
|
||||
- Delete the original disk after successful copy.
|
||||
- By default the original disk is kept as unused disk.
|
||||
- Used only when I(state=moved).
|
||||
type: bool
|
||||
target_disk:
|
||||
description:
|
||||
- The config key the disk will be moved to on the target VM (for example, C(ide0) or C(scsi1)).
|
||||
- Default is the source disk key.
|
||||
- Used only when I(state=moved).
|
||||
type: str
|
||||
target_storage:
|
||||
description:
|
||||
- Move the disk to this storage when I(state=moved).
|
||||
- You can move between storages only in scope of one VM.
|
||||
- Mutually exclusive with I(target_vmid).
|
||||
type: str
|
||||
target_vmid:
|
||||
description:
|
||||
- The (unique) ID of the VM where disk will be placed when I(state=moved).
|
||||
- You can move disk between VMs only when the same storage is used.
|
||||
- Mutually exclusive with I(target_vmid).
|
||||
type: int
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds to wait when moving disk.
|
||||
- Used only when I(state=moved).
|
||||
type: int
|
||||
default: 600
|
||||
aio:
|
||||
description:
|
||||
- AIO type to use.
|
||||
type: str
|
||||
choices: ['native', 'threads', 'io_uring']
|
||||
backup:
|
||||
description:
|
||||
- Whether the drive should be included when making backups.
|
||||
type: bool
|
||||
bps_max_length:
|
||||
description:
|
||||
- Maximum length of total r/w I/O bursts in seconds.
|
||||
type: int
|
||||
bps_rd_max_length:
|
||||
description:
|
||||
- Maximum length of read I/O bursts in seconds.
|
||||
type: int
|
||||
bps_wr_max_length:
|
||||
description:
|
||||
- Maximum length of write I/O bursts in seconds.
|
||||
type: int
|
||||
cache:
|
||||
description:
|
||||
- The drive's cache mode.
|
||||
type: str
|
||||
choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync']
|
||||
cyls:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific cylinder count.
|
||||
type: int
|
||||
detect_zeroes:
|
||||
description:
|
||||
- Control whether to detect and try to optimize writes of zeroes.
|
||||
type: bool
|
||||
discard:
|
||||
description:
|
||||
- Control whether to pass discard/trim requests to the underlying storage.
|
||||
type: str
|
||||
choices: ['ignore', 'on']
|
||||
format:
|
||||
description:
|
||||
- The drive's backing file's data format.
|
||||
type: str
|
||||
choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']
|
||||
heads:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific head count.
|
||||
type: int
|
||||
import_from:
|
||||
description:
|
||||
- Import volume from this existing one.
|
||||
- Volume string format
|
||||
- C(<STORAGE>:<VMID>/<FULL_NAME>) or C(<ABSOLUTE_PATH>/<FULL_NAME>)
|
||||
- Attention! Only root can use absolute paths.
|
||||
- This parameter is mutually exclusive with I(size).
|
||||
type: str
|
||||
iops:
|
||||
description:
|
||||
- Maximum total r/w I/O in operations per second.
|
||||
- You can specify either total limit or per operation (mutually exclusive with I(iops_rd) and I(iops_wr)).
|
||||
type: int
|
||||
iops_max:
|
||||
description:
|
||||
- Maximum unthrottled total r/w I/O pool in operations per second.
|
||||
type: int
|
||||
iops_max_length:
|
||||
description:
|
||||
- Maximum length of total r/w I/O bursts in seconds.
|
||||
type: int
|
||||
iops_rd:
|
||||
description:
|
||||
- Maximum read I/O in operations per second.
|
||||
- You can specify either read or total limit (mutually exclusive with I(iops)).
|
||||
type: int
|
||||
iops_rd_max:
|
||||
description:
|
||||
- Maximum unthrottled read I/O pool in operations per second.
|
||||
type: int
|
||||
iops_rd_max_length:
|
||||
description:
|
||||
- Maximum length of read I/O bursts in seconds.
|
||||
type: int
|
||||
iops_wr:
|
||||
description:
|
||||
- Maximum write I/O in operations per second.
|
||||
- You can specify either write or total limit (mutually exclusive with I(iops)).
|
||||
type: int
|
||||
iops_wr_max:
|
||||
description:
|
||||
- Maximum unthrottled write I/O pool in operations per second.
|
||||
type: int
|
||||
iops_wr_max_length:
|
||||
description:
|
||||
- Maximum length of write I/O bursts in seconds.
|
||||
type: int
|
||||
iothread:
|
||||
description:
|
||||
- Whether to use iothreads for this drive (only for SCSI and VirtIO)
|
||||
type: bool
|
||||
mbps:
|
||||
description:
|
||||
- Maximum total r/w speed in megabytes per second.
|
||||
- Can be fractional but use with caution - fractionals less than 1 are not supported officially.
|
||||
- You can specify either total limit or per operation (mutually exclusive with I(mbps_rd) and I(mbps_wr)).
|
||||
type: float
|
||||
mbps_max:
|
||||
description:
|
||||
- Maximum unthrottled total r/w pool in megabytes per second.
|
||||
type: float
|
||||
mbps_rd:
|
||||
description:
|
||||
- Maximum read speed in megabytes per second.
|
||||
- You can specify either read or total limit (mutually exclusive with I(mbps)).
|
||||
type: float
|
||||
mbps_rd_max:
|
||||
description:
|
||||
- Maximum unthrottled read pool in megabytes per second.
|
||||
type: float
|
||||
mbps_wr:
|
||||
description:
|
||||
- Maximum write speed in megabytes per second.
|
||||
- You can specify either write or total limit (mutually exclusive with I(mbps)).
|
||||
type: float
|
||||
mbps_wr_max:
|
||||
description:
|
||||
- Maximum unthrottled write pool in megabytes per second.
|
||||
type: float
|
||||
media:
|
||||
description:
|
||||
- The drive's media type.
|
||||
type: str
|
||||
choices: ['cdrom', 'disk']
|
||||
queues:
|
||||
description:
|
||||
- Number of queues (SCSI only).
|
||||
type: int
|
||||
replicate:
|
||||
description:
|
||||
- Whether the drive should considered for replication jobs.
|
||||
type: bool
|
||||
rerror:
|
||||
description:
|
||||
- Read error action.
|
||||
type: str
|
||||
choices: ['ignore', 'report', 'stop']
|
||||
ro:
|
||||
description:
|
||||
- Whether the drive is read-only.
|
||||
type: bool
|
||||
scsiblock:
|
||||
description:
|
||||
- Whether to use scsi-block for full passthrough of host block device.
|
||||
- Can lead to I/O errors in combination with low memory or high memory fragmentation on host.
|
||||
type: bool
|
||||
secs:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific sector count.
|
||||
type: int
|
||||
serial:
|
||||
description:
|
||||
- The drive's reported serial number, url-encoded, up to 20 bytes long.
|
||||
type: str
|
||||
shared:
|
||||
description:
|
||||
- Mark this locally-managed volume as available on all nodes.
|
||||
- This option does not share the volume automatically, it assumes it is shared already!
|
||||
type: bool
|
||||
snapshot:
|
||||
description:
|
||||
- Control qemu's snapshot mode feature.
|
||||
- If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown.
|
||||
type: bool
|
||||
ssd:
|
||||
description:
|
||||
- Whether to expose this drive as an SSD, rather than a rotational hard disk.
|
||||
type: bool
|
||||
trans:
|
||||
description:
|
||||
- Force disk geometry bios translation mode.
|
||||
type: str
|
||||
choices: ['auto', 'lba', 'none']
|
||||
werror:
|
||||
description:
|
||||
- Write error action.
|
||||
type: str
|
||||
choices: ['enospc', 'ignore', 'report', 'stop']
|
||||
wwn:
|
||||
description:
|
||||
- The drive's worldwide name, encoded as 16 bytes hex string, prefixed by C(0x).
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create new disk in VM (do not rewrite in case it exists already)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
name: vm-name
|
||||
disk: scsi3
|
||||
backup: true
|
||||
cache: none
|
||||
storage: local-zfs
|
||||
size: 5
|
||||
state: present
|
||||
|
||||
- name: Create new disk in VM (force rewrite in case it exists already)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: scsi3
|
||||
format: qcow2
|
||||
storage: local
|
||||
size: 16
|
||||
create: forced
|
||||
state: present
|
||||
|
||||
- name: Update existing disk
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: ide0
|
||||
backup: false
|
||||
ro: true
|
||||
aio: native
|
||||
state: present
|
||||
|
||||
- name: Grow existing disk
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: sata4
|
||||
size: +5G
|
||||
state: resized
|
||||
|
||||
- name: Detach disk (leave it unused)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
name: vm-name
|
||||
disk: virtio0
|
||||
state: detached
|
||||
|
||||
- name: Move disk to another storage
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
vmid: 101
|
||||
disk: scsi7
|
||||
target_storage: local
|
||||
format: qcow2
|
||||
state: moved
|
||||
|
||||
- name: Move disk from one VM to another
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: scsi7
|
||||
target_vmid: 201
|
||||
state: moved
|
||||
|
||||
- name: Remove disk permanently
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
vmid: 101
|
||||
disk: scsi4
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
vmid:
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 101
|
||||
msg:
|
||||
description: A short message on what the module did.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Disk scsi3 created in VM 101"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec,
|
||||
ProxmoxAnsible)
|
||||
from re import compile, match, sub
|
||||
from time import sleep
|
||||
|
||||
|
||||
def disk_conf_str_to_dict(config_string):
|
||||
config = config_string.split(',')
|
||||
storage_volume = config.pop(0).split(':')
|
||||
config.sort()
|
||||
storage_name = storage_volume[0]
|
||||
volume_name = storage_volume[1]
|
||||
config_current = dict(
|
||||
volume='%s:%s' % (storage_name, volume_name),
|
||||
storage_name=storage_name,
|
||||
volume_name=volume_name
|
||||
)
|
||||
|
||||
for option in config:
|
||||
k, v = option.split('=')
|
||||
config_current[k] = v
|
||||
|
||||
return config_current
|
||||
|
||||
|
||||
class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
create_update_fields = [
|
||||
'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length',
|
||||
'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max',
|
||||
'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max',
|
||||
'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max',
|
||||
'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot',
|
||||
'ssd', 'trans', 'werror', 'wwn'
|
||||
]
|
||||
supported_bus_num_ranges = dict(
|
||||
ide=range(0, 4),
|
||||
scsi=range(0, 31),
|
||||
sata=range(0, 6),
|
||||
virtio=range(0, 16),
|
||||
unused=range(0, 256)
|
||||
)
|
||||
|
||||
def get_create_attributes(self):
|
||||
# Sanitize parameters dictionary:
|
||||
# - Remove not defined args
|
||||
# - Ensure True and False converted to int.
|
||||
# - Remove unnecessary parameters
|
||||
params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields)
|
||||
params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool)))
|
||||
return params
|
||||
|
||||
def create_disk(self, disk, vmid, vm, vm_config):
|
||||
create = self.module.params['create']
|
||||
if create == 'disabled' and disk not in vm_config:
|
||||
# NOOP
|
||||
return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid)
|
||||
|
||||
if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
|
||||
# CREATE
|
||||
attributes = self.get_create_attributes()
|
||||
import_string = attributes.pop('import_from', None)
|
||||
|
||||
if import_string:
|
||||
config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
|
||||
else:
|
||||
config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
|
||||
|
||||
for k, v in attributes.items():
|
||||
config_str += ',%s=%s' % (k, v)
|
||||
|
||||
create_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**create_disk)
|
||||
return True, "Disk %s created in VM %s" % (disk, vmid)
|
||||
|
||||
if create in ['disabled', 'regular'] and disk in vm_config:
|
||||
# UPDATE
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
config_str = disk_config["volume"]
|
||||
attributes = self.get_create_attributes()
|
||||
# 'import_from' fails on disk updates
|
||||
attributes.pop('import_from', None)
|
||||
|
||||
for k, v in attributes.items():
|
||||
config_str += ',%s=%s' % (k, v)
|
||||
|
||||
# Now compare old and new config to detect if changes are needed
|
||||
for option in ['size', 'storage_name', 'volume', 'volume_name']:
|
||||
attributes.update({option: disk_config[option]})
|
||||
# Values in params are numbers, but strings are needed to compare with disk_config
|
||||
attributes = dict((k, str(v)) for k, v in attributes.items())
|
||||
if disk_config == attributes:
|
||||
return False, "Disk %s is up to date in VM %s" % (disk, vmid)
|
||||
|
||||
update_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**update_disk)
|
||||
return True, "Disk %s updated in VM %s" % (disk, vmid)
|
||||
|
||||
def move_disk(self, disk, vmid, vm, vm_config):
|
||||
params = dict()
|
||||
params['disk'] = disk
|
||||
params['vmid'] = vmid
|
||||
params['bwlimit'] = self.module.params['bwlimit']
|
||||
params['storage'] = self.module.params['target_storage']
|
||||
params['target-disk'] = self.module.params['target_disk']
|
||||
params['target-vmid'] = self.module.params['target_vmid']
|
||||
params['format'] = self.module.params['format']
|
||||
params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0
|
||||
# Remove not defined args
|
||||
params = dict((k, v) for k, v in params.items() if v is not None)
|
||||
|
||||
if params.get('storage', False):
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
if params['storage'] == disk_config['storage_name']:
|
||||
return False
|
||||
|
||||
taskid = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
|
||||
timeout = self.module.params['timeout']
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
return True
|
||||
if timeout <= 0:
|
||||
self.module.fail_json(
|
||||
msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
sleep(1)
|
||||
timeout -= 1
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
disk_args = dict(
|
||||
# Proxmox native parameters
|
||||
aio=dict(type='str', choices=['native', 'threads', 'io_uring']),
|
||||
backup=dict(type='bool'),
|
||||
bps_max_length=dict(type='int'),
|
||||
bps_rd_max_length=dict(type='int'),
|
||||
bps_wr_max_length=dict(type='int'),
|
||||
cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']),
|
||||
cyls=dict(type='int'),
|
||||
detect_zeroes=dict(type='bool'),
|
||||
discard=dict(type='str', choices=['ignore', 'on']),
|
||||
format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']),
|
||||
heads=dict(type='int'),
|
||||
import_from=dict(type='str'),
|
||||
iops=dict(type='int'),
|
||||
iops_max=dict(type='int'),
|
||||
iops_max_length=dict(type='int'),
|
||||
iops_rd=dict(type='int'),
|
||||
iops_rd_max=dict(type='int'),
|
||||
iops_rd_max_length=dict(type='int'),
|
||||
iops_wr=dict(type='int'),
|
||||
iops_wr_max=dict(type='int'),
|
||||
iops_wr_max_length=dict(type='int'),
|
||||
iothread=dict(type='bool'),
|
||||
mbps=dict(type='float'),
|
||||
mbps_max=dict(type='float'),
|
||||
mbps_rd=dict(type='float'),
|
||||
mbps_rd_max=dict(type='float'),
|
||||
mbps_wr=dict(type='float'),
|
||||
mbps_wr_max=dict(type='float'),
|
||||
media=dict(type='str', choices=['cdrom', 'disk']),
|
||||
queues=dict(type='int'),
|
||||
replicate=dict(type='bool'),
|
||||
rerror=dict(type='str', choices=['ignore', 'report', 'stop']),
|
||||
ro=dict(type='bool'),
|
||||
scsiblock=dict(type='bool'),
|
||||
secs=dict(type='int'),
|
||||
serial=dict(type='str'),
|
||||
shared=dict(type='bool'),
|
||||
snapshot=dict(type='bool'),
|
||||
ssd=dict(type='bool'),
|
||||
trans=dict(type='str', choices=['auto', 'lba', 'none']),
|
||||
werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']),
|
||||
wwn=dict(type='str'),
|
||||
|
||||
# Disk moving relates parameters
|
||||
bwlimit=dict(type='int'),
|
||||
target_storage=dict(type='str'),
|
||||
target_disk=dict(type='str'),
|
||||
target_vmid=dict(type='int'),
|
||||
delete_moved=dict(type='bool'),
|
||||
timeout=dict(type='int', default='600'),
|
||||
|
||||
# Module related parameters
|
||||
name=dict(type='str'),
|
||||
vmid=dict(type='int'),
|
||||
disk=dict(type='str', required=True),
|
||||
storage=dict(type='str'),
|
||||
size=dict(type='str'),
|
||||
state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'],
|
||||
default='present'),
|
||||
create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'),
|
||||
)
|
||||
|
||||
module_args.update(disk_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
|
||||
required_if=[
|
||||
('create', 'forced', ['storage']),
|
||||
('state', 'resized', ['size']),
|
||||
],
|
||||
required_by={
|
||||
'target_disk': 'target_vmid',
|
||||
'mbps_max': 'mbps',
|
||||
'mbps_rd_max': 'mbps_rd',
|
||||
'mbps_wr_max': 'mbps_wr',
|
||||
'bps_max_length': 'mbps_max',
|
||||
'bps_rd_max_length': 'mbps_rd_max',
|
||||
'bps_wr_max_length': 'mbps_wr_max',
|
||||
'iops_max': 'iops',
|
||||
'iops_rd_max': 'iops_rd',
|
||||
'iops_wr_max': 'iops_wr',
|
||||
'iops_max_length': 'iops_max',
|
||||
'iops_rd_max_length': 'iops_rd_max',
|
||||
'iops_wr_max_length': 'iops_wr_max',
|
||||
},
|
||||
supports_check_mode=False,
|
||||
mutually_exclusive=[
|
||||
('target_vmid', 'target_storage'),
|
||||
('mbps', 'mbps_rd'),
|
||||
('mbps', 'mbps_wr'),
|
||||
('iops', 'iops_rd'),
|
||||
('iops', 'iops_wr'),
|
||||
('import_from', 'size'),
|
||||
]
|
||||
)
|
||||
|
||||
proxmox = ProxmoxDiskAnsible(module)
|
||||
|
||||
disk = module.params['disk']
|
||||
# Verify disk name has appropriate name
|
||||
disk_regex = compile(r'^([a-z]+)([0-9]+)$')
|
||||
disk_bus = sub(disk_regex, r'\1', disk)
|
||||
disk_number = int(sub(disk_regex, r'\2', disk))
|
||||
if disk_bus not in proxmox.supported_bus_num_ranges:
|
||||
proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus)
|
||||
elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]:
|
||||
bus_range = proxmox.supported_bus_num_ranges[disk_bus]
|
||||
proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1]))
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
vmid = module.params['vmid'] or proxmox.get_vmid(name)
|
||||
|
||||
# Ensure VM id exists and retrieve its config
|
||||
vm = None
|
||||
vm_config = None
|
||||
try:
|
||||
vm = proxmox.get_vm(vmid)
|
||||
vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
|
||||
except Exception as e:
|
||||
proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e)))
|
||||
|
||||
# Do not try to perform actions on missing disk
|
||||
if disk not in vm_config and state in ['resized', 'moved']:
|
||||
module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid))
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
success, message = proxmox.create_disk(disk, vmid, vm, vm_config)
|
||||
if success:
|
||||
module.exit_json(changed=True, vmid=vmid, msg=message)
|
||||
else:
|
||||
module.exit_json(changed=False, vmid=vmid, msg=message)
|
||||
except Exception as e:
|
||||
module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'detached':
|
||||
try:
|
||||
if disk_bus == 'unused':
|
||||
module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid))
|
||||
if disk not in vm_config:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'moved':
|
||||
try:
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
disk_storage = disk_config["storage_name"]
|
||||
if proxmox.move_disk(disk, vmid, vm, vm_config):
|
||||
module.exit_json(changed=True, vmid=vmid,
|
||||
msg="Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage))
|
||||
else:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s already at %s storage" % (disk, disk_storage))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'resized':
|
||||
try:
|
||||
size = module.params['size']
|
||||
if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size):
|
||||
module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size))
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
actual_size = disk_config['size']
|
||||
if size == actual_size:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already %s size" % (disk, size))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s resized in VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
if disk not in vm_config:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_kvm
|
||||
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster
|
||||
description:
|
||||
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
|
||||
- Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
|
||||
@@ -866,8 +866,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||
timeout = self.module.params['timeout']
|
||||
|
||||
while timeout:
|
||||
task = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(node, taskid):
|
||||
# Wait an extra second as the API can be a ahead of the hypervisor
|
||||
time.sleep(1)
|
||||
return True
|
||||
@@ -964,7 +963,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||
if 'agent' in kwargs:
|
||||
try:
|
||||
# The API also allows booleans instead of e.g. `enabled=1` for backward-compatibility.
|
||||
kwargs['agent'] = boolean(kwargs['agent'], strict=True)
|
||||
kwargs['agent'] = int(boolean(kwargs['agent'], strict=True))
|
||||
except TypeError:
|
||||
# Not something that Ansible would parse as a boolean.
|
||||
pass
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_nic
|
||||
short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster
|
||||
version_added: 3.1.0
|
||||
description:
|
||||
- Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
|
||||
@@ -223,7 +223,7 @@ class ProxmoxNicAnsible(ProxmoxAnsible):
|
||||
|
||||
if interface in vminfo:
|
||||
if not self.module.check_mode:
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(vmid=vmid, delete=interface)
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(delete=interface)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -38,6 +38,17 @@ options:
|
||||
- For removal from config file, even if removing disk snapshot fails.
|
||||
default: false
|
||||
type: bool
|
||||
unbind:
|
||||
description:
|
||||
- This option only applies to LXC containers.
|
||||
- Allows to snapshot a container even if it has configured mountpoints.
|
||||
- Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
|
||||
- If running, the container will be stopped and restarted to apply config changes.
|
||||
- Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either.
|
||||
- See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 5.7.0
|
||||
vmstate:
|
||||
description:
|
||||
- Snapshot includes RAM.
|
||||
@@ -78,6 +89,16 @@ EXAMPLES = r'''
|
||||
state: present
|
||||
snapname: pre-updates
|
||||
|
||||
- name: Create new snapshot for a container with configured mountpoints
|
||||
community.general.proxmox_snap:
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
vmid: 100
|
||||
state: present
|
||||
unbind: true # requires root@pam+password auth, API tokens are not supported
|
||||
snapname: pre-updates
|
||||
|
||||
- name: Remove container snapshot
|
||||
community.general.proxmox_snap:
|
||||
api_user: root@pam
|
||||
@@ -110,17 +131,89 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
def snapshot(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
|
||||
|
||||
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate):
|
||||
def vmconfig(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config
|
||||
|
||||
def vmstatus(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status
|
||||
|
||||
def _container_mp_get(self, vm, vmid):
|
||||
cfg = self.vmconfig(vm, vmid).get()
|
||||
mountpoints = {}
|
||||
for key, value in cfg.items():
|
||||
if key.startswith('mp'):
|
||||
mountpoints[key] = value
|
||||
return mountpoints
|
||||
|
||||
def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||
# shutdown container if running
|
||||
if vmstatus == 'running':
|
||||
self.shutdown_instance(vm, vmid, timeout)
|
||||
# delete all mountpoints configs
|
||||
self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints))
|
||||
|
||||
def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||
# NOTE: requires auth as `root@pam`, API tokens are not supported
|
||||
# see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config
|
||||
# restore original config
|
||||
self.vmconfig(vm, vmid).put(**mountpoints)
|
||||
# start container (if was running before snap)
|
||||
if vmstatus == 'running':
|
||||
self.start_instance(vm, vmid, timeout)
|
||||
|
||||
def start_instance(self, vm, vmid, timeout):
|
||||
taskid = self.vmstatus(vm, vmid).start.post()
|
||||
while timeout:
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def shutdown_instance(self, vm, vmid, timeout):
|
||||
taskid = self.vmstatus(vm, vmid).shutdown.post()
|
||||
while timeout:
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||
if self.module.check_mode:
|
||||
return True
|
||||
|
||||
if vm['type'] == 'lxc':
|
||||
if unbind is True:
|
||||
# check if credentials will work
|
||||
# WARN: it is crucial this check runs here!
|
||||
# The correct permissions are required only to reconfig mounts.
|
||||
# Not checking now would allow to remove the configuration BUT
|
||||
# fail later, leaving the container in a misconfigured state.
|
||||
if (
|
||||
self.module.params['api_user'] != 'root@pam'
|
||||
or not self.module.params['api_password']
|
||||
):
|
||||
self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.')
|
||||
return False
|
||||
mountpoints = self._container_mp_get(vm, vmid)
|
||||
vmstatus = self.vmstatus(vm, vmid).current().get()['status']
|
||||
if mountpoints:
|
||||
self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
|
||||
else:
|
||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
|
||||
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -128,6 +221,8 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
time.sleep(1)
|
||||
timeout -= 1
|
||||
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
return False
|
||||
|
||||
def snapshot_remove(self, vm, vmid, timeout, snapname, force):
|
||||
@@ -136,8 +231,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -153,8 +247,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -175,6 +268,7 @@ def main():
|
||||
description=dict(type='str'),
|
||||
snapname=dict(type='str', default='ansible_snap'),
|
||||
force=dict(type='bool', default=False),
|
||||
unbind=dict(type='bool', default=False),
|
||||
vmstate=dict(type='bool', default=False),
|
||||
)
|
||||
module_args.update(snap_args)
|
||||
@@ -193,6 +287,7 @@ def main():
|
||||
snapname = module.params['snapname']
|
||||
timeout = module.params['timeout']
|
||||
force = module.params['force']
|
||||
unbind = module.params['unbind']
|
||||
vmstate = module.params['vmstate']
|
||||
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
@@ -209,7 +304,7 @@ def main():
|
||||
if i['name'] == snapname:
|
||||
module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
|
||||
|
||||
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate):
|
||||
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
|
||||
else:
|
||||
|
||||
@@ -12,7 +12,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_template
|
||||
short_description: management of OS templates in Proxmox VE cluster
|
||||
short_description: Management of OS templates in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to upload/delete templates in Proxmox VE cluster
|
||||
options:
|
||||
@@ -131,8 +131,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible):
|
||||
Check the task status and wait until the task is completed or the timeout is reached.
|
||||
"""
|
||||
while timeout:
|
||||
task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(node, taskid):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
|
||||
@@ -54,6 +54,7 @@ options:
|
||||
description:
|
||||
- The RHEV/oVirt cluster in which you want you VM to start.
|
||||
type: str
|
||||
default: ''
|
||||
datacenter:
|
||||
description:
|
||||
- The RHEV/oVirt datacenter in which you want you VM to start.
|
||||
|
||||
@@ -35,11 +35,13 @@ options:
|
||||
- The name of the serverless framework project stage to deploy to.
|
||||
- This uses the serverless framework default "dev".
|
||||
type: str
|
||||
default: ''
|
||||
region:
|
||||
description:
|
||||
- AWS region to deploy the service to.
|
||||
- This parameter defaults to C(us-east-1).
|
||||
type: str
|
||||
default: ''
|
||||
deploy:
|
||||
description:
|
||||
- Whether or not to deploy artifacts after building them.
|
||||
|
||||
@@ -80,15 +80,32 @@ options:
|
||||
aliases: [ 'variables_file' ]
|
||||
variables:
|
||||
description:
|
||||
- A group of key-values to override template variables or those in
|
||||
variables files.
|
||||
- A group of key-values pairs to override template variables or those in variables files.
|
||||
By default, only string and number values are allowed, which are passed on unquoted.
|
||||
- Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when I(complex_vars=true).
|
||||
- Ansible integers or floats are mapped to terraform numbers.
|
||||
- Ansible strings are mapped to terraform strings.
|
||||
- Ansible dictionaries are mapped to terraform objects.
|
||||
- Ansible lists are mapped to terraform lists.
|
||||
- Ansible booleans are mapped to terraform booleans.
|
||||
- "B(Note) passwords passed as variables will be visible in the log output. Make sure to use I(no_log=true) in production!"
|
||||
type: dict
|
||||
complex_vars:
|
||||
description:
|
||||
- Enable/disable capability to handle complex variable structures for C(terraform).
|
||||
- If C(true) the I(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform).
|
||||
Strings that are passed are correctly quoted.
|
||||
- When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 5.7.0
|
||||
targets:
|
||||
description:
|
||||
- A list of specific resources to target in this plan/application. The
|
||||
resources selected here will also auto-include any dependencies.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
lock:
|
||||
description:
|
||||
- Enable statefile locking, if you use a service that accepts locks (such
|
||||
@@ -188,6 +205,26 @@ EXAMPLES = """
|
||||
- /path/to/plugins_dir_1
|
||||
- /path/to/plugins_dir_2
|
||||
|
||||
- name: Complex variables example
|
||||
community.general.terraform:
|
||||
project_path: '{{ project_dir }}'
|
||||
state: present
|
||||
camplex_vars: true
|
||||
variables:
|
||||
vm_name: "{{ inventory_hostname }}"
|
||||
vm_vcpus: 2
|
||||
vm_mem: 2048
|
||||
vm_additional_disks:
|
||||
- label: "Third Disk"
|
||||
size: 40
|
||||
thin_provisioned: true
|
||||
unit_number: 2
|
||||
- label: "Fourth Disk"
|
||||
size: 22
|
||||
thin_provisioned: true
|
||||
unit_number: 3
|
||||
force_init: true
|
||||
|
||||
### Example directory structure for plugin_paths example
|
||||
# $ tree /path/to/plugins_dir_1
|
||||
# /path/to/plugins_dir_1/
|
||||
@@ -237,6 +274,7 @@ import os
|
||||
import json
|
||||
import tempfile
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.six import integer_types
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -273,7 +311,7 @@ def _state_args(state_file):
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths):
|
||||
command = [bin_path, 'init', '-input=false']
|
||||
command = [bin_path, 'init', '-input=false', '-no-color']
|
||||
if backend_config:
|
||||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
@@ -298,7 +336,7 @@ def get_workspace_context(bin_path, project_path):
|
||||
command = [bin_path, 'workspace', 'list', '-no-color']
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
|
||||
module.warn("Failed to list Terraform workspaces:\n{0}".format(err))
|
||||
for item in out.split('\n'):
|
||||
stripped_item = item.strip()
|
||||
if not stripped_item:
|
||||
@@ -360,12 +398,25 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
|
||||
return plan_path, False, out, err, plan_command if state == 'planned' else command
|
||||
elif rc == 1:
|
||||
# failure to plan
|
||||
module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
|
||||
module.fail_json(
|
||||
msg='Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
)
|
||||
)
|
||||
elif rc == 2:
|
||||
# changes, but successful
|
||||
return plan_path, True, out, err, plan_command if state == 'planned' else command
|
||||
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
rc=rc,
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
))
|
||||
|
||||
|
||||
def main():
|
||||
@@ -379,6 +430,7 @@ def main():
|
||||
purge_workspace=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'planned']),
|
||||
variables=dict(type='dict'),
|
||||
complex_vars=dict(type='bool', default=False),
|
||||
variables_files=dict(aliases=['variables_file'], type='list', elements='path'),
|
||||
plan_file=dict(type='path'),
|
||||
state_file=dict(type='path'),
|
||||
@@ -405,6 +457,7 @@ def main():
|
||||
purge_workspace = module.params.get('purge_workspace')
|
||||
state = module.params.get('state')
|
||||
variables = module.params.get('variables') or {}
|
||||
complex_vars = module.params.get('complex_vars')
|
||||
variables_files = module.params.get('variables_files')
|
||||
plan_file = module.params.get('plan_file')
|
||||
state_file = module.params.get('state_file')
|
||||
@@ -449,12 +502,77 @@ def main():
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
|
||||
def format_args(vars):
|
||||
if isinstance(vars, str):
|
||||
return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
elif isinstance(vars, bool):
|
||||
if vars:
|
||||
return 'true'
|
||||
else:
|
||||
return 'false'
|
||||
return str(vars)
|
||||
|
||||
def process_complex_args(vars):
|
||||
ret_out = []
|
||||
if isinstance(vars, dict):
|
||||
for k, v in vars.items():
|
||||
if isinstance(v, dict):
|
||||
ret_out.append('{0}={{{1}}}'.format(k, process_complex_args(v)))
|
||||
elif isinstance(v, list):
|
||||
ret_out.append("{0}={1}".format(k, process_complex_args(v)))
|
||||
elif isinstance(v, (integer_types, float, str, bool)):
|
||||
ret_out.append('{0}={1}'.format(k, format_args(v)))
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
|
||||
if isinstance(vars, list):
|
||||
l_out = []
|
||||
for item in vars:
|
||||
if isinstance(item, dict):
|
||||
l_out.append("{{{0}}}".format(process_complex_args(item)))
|
||||
elif isinstance(item, list):
|
||||
l_out.append("{0}".format(process_complex_args(item)))
|
||||
elif isinstance(item, (str, integer_types, float, bool)):
|
||||
l_out.append(format_args(item))
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
|
||||
|
||||
ret_out.append("[{0}]".format(",".join(l_out)))
|
||||
return ",".join(ret_out)
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
])
|
||||
if complex_vars:
|
||||
for k, v in variables.items():
|
||||
if isinstance(v, dict):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={{{1}}}'.format(k, process_complex_args(v))
|
||||
])
|
||||
elif isinstance(v, list):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, process_complex_args(v))
|
||||
])
|
||||
# on the top-level we need to pass just the python string with necessary
|
||||
# terraform string escape sequences
|
||||
elif isinstance(v, str):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
"{0}={1}".format(k, v)
|
||||
])
|
||||
else:
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, format_args(v))
|
||||
])
|
||||
else:
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
])
|
||||
|
||||
if variables_files:
|
||||
for f in variables_files:
|
||||
variables_args.extend(['-var-file', f])
|
||||
|
||||
@@ -12,7 +12,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: xenserver_facts
|
||||
short_description: get facts reported on xenserver
|
||||
short_description: Get facts reported on xenserver
|
||||
description:
|
||||
- Reads data out of XenAPI, can be used instead of multiple xe commands.
|
||||
author:
|
||||
@@ -162,9 +162,7 @@ def get_srs(session):
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
supports_check_mode=True,
|
||||
)
|
||||
module = AnsibleModule({}, supports_check_mode=True)
|
||||
|
||||
if not HAVE_XENAPI:
|
||||
module.fail_json(changed=False, msg="python xen api required for this module")
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_firewall_policy
|
||||
short_description: Configure 1&1 firewall policy.
|
||||
short_description: Configure 1&1 firewall policy
|
||||
description:
|
||||
- Create, remove, reconfigure, update firewall policies.
|
||||
This module has a dependency on 1and1 >= 1.0
|
||||
@@ -48,6 +48,7 @@ options:
|
||||
(port_from, port_to, and source)
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
add_server_ips:
|
||||
description:
|
||||
- A list of server identifiers (id or name) to be assigned to a firewall policy.
|
||||
@@ -55,12 +56,14 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing firewall policy.
|
||||
@@ -68,12 +71,14 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
description:
|
||||
description:
|
||||
- Firewall policy description. maxLength=256
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_load_balancer
|
||||
short_description: Configure 1&1 load balancer.
|
||||
short_description: Configure 1&1 load balancer
|
||||
description:
|
||||
- Create, remove, update load balancers.
|
||||
This module has a dependency on 1and1 >= 1.0
|
||||
@@ -86,6 +86,7 @@ options:
|
||||
port_balancer, and port_server parameters, in addition to source parameter, which is optional.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
description:
|
||||
description:
|
||||
- Description of the load balancer. maxLength=256
|
||||
@@ -98,12 +99,14 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing load balancer.
|
||||
@@ -111,12 +114,14 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_monitoring_policy
|
||||
short_description: Configure 1&1 monitoring policy.
|
||||
short_description: Configure 1&1 monitoring policy
|
||||
description:
|
||||
- Create, remove, update monitoring policies
|
||||
(and add/remove ports, processes, and servers).
|
||||
@@ -62,6 +62,7 @@ options:
|
||||
and value is used to advise when the value is exceeded.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
cpu:
|
||||
description:
|
||||
@@ -88,6 +89,7 @@ options:
|
||||
- Array of ports that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
protocol:
|
||||
description:
|
||||
@@ -112,6 +114,7 @@ options:
|
||||
- Array of processes that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
process:
|
||||
description:
|
||||
@@ -128,48 +131,56 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
add_processes:
|
||||
description:
|
||||
- Processes to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
add_servers:
|
||||
description:
|
||||
- Servers to add to the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_ports:
|
||||
description:
|
||||
- Ports to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_processes:
|
||||
description:
|
||||
- Processes to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_servers:
|
||||
description:
|
||||
- Servers to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
update_ports:
|
||||
description:
|
||||
- Ports to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
update_processes:
|
||||
description:
|
||||
- Processes to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_private_network
|
||||
short_description: Configure 1&1 private networking.
|
||||
short_description: Configure 1&1 private networking
|
||||
description:
|
||||
- Create, remove, reconfigure, update a private network.
|
||||
This module has a dependency on 1and1 >= 1.0
|
||||
@@ -62,11 +62,13 @@ options:
|
||||
- List of server identifiers (name or id) to be added to the private network.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
remove_members:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be removed from the private network.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_public_ip
|
||||
short_description: Configure 1&1 public IPs.
|
||||
short_description: Configure 1&1 public IPs
|
||||
description:
|
||||
- Create, update, and remove public IPs.
|
||||
This module has a dependency on 1and1 >= 1.0
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneandone_server
|
||||
short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
|
||||
short_description: Create, destroy, start, stop, and reboot a 1&1 Host server
|
||||
description:
|
||||
- Create, destroy, update, start, stop, and reboot a 1&1 Host server.
|
||||
When the server is created it can optionally wait for it to be 'running' before returning.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: online_server_info
|
||||
short_description: Gather information about Online servers.
|
||||
short_description: Gather information about Online servers
|
||||
description:
|
||||
- Gather information about the servers.
|
||||
- U(https://www.online.net/en/dedicated-server)
|
||||
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
module: online_user_info
|
||||
short_description: Gather information about Online user.
|
||||
short_description: Gather information about Online user
|
||||
description:
|
||||
- Gather information about the user.
|
||||
author:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible Project
|
||||
# Copyright (c) 2018, Milan Ilic <milani@nordeus.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
@@ -8,25 +8,6 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""
|
||||
(c) 2018, Milan Ilic <milani@nordeus.com>
|
||||
|
||||
This file is part of Ansible
|
||||
|
||||
Ansible is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Ansible is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a clone of the GNU General Public License
|
||||
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: one_image
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible Project
|
||||
# Copyright (c) 2018, Milan Ilic <milani@nordeus.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
@@ -8,25 +8,6 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""
|
||||
(c) 2018, Milan Ilic <milani@nordeus.com>
|
||||
|
||||
This file is part of Ansible
|
||||
|
||||
Ansible is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Ansible is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a clone of the GNU General Public License
|
||||
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: one_image_info
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible Project
|
||||
# Copyright (c) 2017, Milan Ilic <milani@nordeus.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
@@ -8,25 +8,6 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""
|
||||
(c) 2017, Milan Ilic <milani@nordeus.com>
|
||||
|
||||
This file is part of Ansible
|
||||
|
||||
Ansible is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Ansible is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: one_service
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible Project
|
||||
# Copyright (c) 2017, Milan Ilic <milani@nordeus.com>
|
||||
# Copyright (c) 2019, Jan Meerkamp <meerkamp@dvv.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
@@ -8,26 +9,6 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""
|
||||
(c) 2017, Milan Ilic <milani@nordeus.com>
|
||||
(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
|
||||
|
||||
This file is part of Ansible
|
||||
|
||||
Ansible is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Ansible is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: one_vm
|
||||
@@ -989,7 +970,7 @@ def get_vm_labels_and_attributes_dict(client, vm_id):
|
||||
if key != 'LABELS':
|
||||
attrs_dict[key] = value
|
||||
else:
|
||||
if key is not None:
|
||||
if key is not None and value is not None:
|
||||
labels_list = value.split(',')
|
||||
|
||||
return labels_list, attrs_dict
|
||||
|
||||
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_device
|
||||
|
||||
short_description: Manage a bare metal server in the Packet Host.
|
||||
short_description: Manage a bare metal server in the Packet Host
|
||||
|
||||
description:
|
||||
- Manage a bare metal server in the Packet Host (a "device" in the API terms).
|
||||
@@ -136,6 +136,7 @@ options:
|
||||
- URL of custom iPXE script for provisioning.
|
||||
- More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
|
||||
type: str
|
||||
default: ''
|
||||
|
||||
always_pxe:
|
||||
description:
|
||||
|
||||
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_ip_subnet
|
||||
|
||||
short_description: Assign IP subnet to a bare metal server.
|
||||
short_description: Assign IP subnet to a bare metal server
|
||||
|
||||
description:
|
||||
- Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
|
||||
|
||||
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_project
|
||||
|
||||
short_description: Create/delete a project in Packet host.
|
||||
short_description: Create/delete a project in Packet host
|
||||
|
||||
description:
|
||||
- Create/delete a project in Packet host.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_sshkey
|
||||
short_description: Create/delete an SSH key in Packet host.
|
||||
short_description: Create/delete an SSH key in Packet host
|
||||
description:
|
||||
- Create/delete an SSH key in Packet host.
|
||||
- API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
|
||||
|
||||
@@ -13,7 +13,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_volume
|
||||
|
||||
short_description: Create/delete a volume in Packet host.
|
||||
short_description: Create/delete a volume in Packet host
|
||||
|
||||
description:
|
||||
- Create/delete a volume in Packet host.
|
||||
|
||||
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: packet_volume_attachment
|
||||
|
||||
short_description: Attach/detach a volume to a device in the Packet host.
|
||||
short_description: Attach/detach a volume to a device in the Packet host
|
||||
|
||||
description:
|
||||
- Attach/detach a volume to a device in the Packet host.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: profitbricks
|
||||
short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
|
||||
short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine
|
||||
description:
|
||||
- Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
|
||||
for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
|
||||
@@ -38,6 +38,7 @@ options:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter to provision this virtual machine.
|
||||
@@ -74,6 +75,7 @@ options:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
count:
|
||||
description:
|
||||
- The number of virtual machines to create.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: profitbricks_datacenter
|
||||
short_description: Create or destroy a ProfitBricks Virtual Datacenter.
|
||||
short_description: Create or destroy a ProfitBricks Virtual Datacenter
|
||||
description:
|
||||
- This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
|
||||
on profitbricks >= 1.0.0
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: profitbricks_nic
|
||||
short_description: Create or Remove a NIC.
|
||||
short_description: Create or Remove a NIC
|
||||
description:
|
||||
- This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
|
||||
options:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: profitbricks_volume
|
||||
short_description: Create or destroy a volume.
|
||||
short_description: Create or destroy a volume
|
||||
description:
|
||||
- Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
|
||||
options:
|
||||
@@ -50,7 +50,7 @@ options:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
disk_type:
|
||||
description:
|
||||
- The disk type of the volume.
|
||||
@@ -81,7 +81,7 @@ options:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: profitbricks_volume_attachments
|
||||
short_description: Attach or detach a volume.
|
||||
short_description: Attach or detach a volume
|
||||
description:
|
||||
- Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
|
||||
options:
|
||||
|
||||
@@ -17,7 +17,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pubnub_blocks
|
||||
short_description: PubNub blocks management module.
|
||||
short_description: PubNub blocks management module
|
||||
description:
|
||||
- "This module allows Ansible to interface with the PubNub BLOCKS
|
||||
infrastructure by providing the following operations: create / remove,
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
same play)."
|
||||
required: false
|
||||
type: str
|
||||
default: ''
|
||||
password:
|
||||
description:
|
||||
- Password which match to account to which specified C(email) belong.
|
||||
@@ -44,6 +45,7 @@ options:
|
||||
same play)."
|
||||
required: false
|
||||
type: str
|
||||
default: ''
|
||||
cache:
|
||||
description: >
|
||||
In case if single play use blocks management module few times it is
|
||||
@@ -58,7 +60,7 @@ options:
|
||||
manage blocks."
|
||||
- "User's account will be used if value not set or empty."
|
||||
type: str
|
||||
required: false
|
||||
default: ''
|
||||
application:
|
||||
description:
|
||||
- "Name of target PubNub application for which blocks configuration on
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax
|
||||
short_description: create / delete an instance in Rackspace Public Cloud
|
||||
short_description: Create / delete an instance in Rackspace Public Cloud
|
||||
description:
|
||||
- creates / deletes a Rackspace Public Cloud instance and optionally
|
||||
waits for it to be 'running'.
|
||||
@@ -82,17 +82,20 @@ options:
|
||||
default: false
|
||||
extra_client_args:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating the cloudservers
|
||||
client. This is considered an advanced option, use it wisely and
|
||||
with caution.
|
||||
extra_create_args:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating a new server.
|
||||
This is considered an advanced option, use it wisely and with caution.
|
||||
files:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Files to insert into the instance. remotefilename:localcontent
|
||||
flavor:
|
||||
@@ -124,6 +127,7 @@ options:
|
||||
- keypair
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
name:
|
||||
|
||||
@@ -26,6 +26,7 @@ options:
|
||||
C(name). This option requires C(pyrax>=1.9.3).
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the volume.
|
||||
name:
|
||||
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rax_cdb
|
||||
short_description: create/delete or resize a Rackspace Cloud Databases instance
|
||||
short_description: Create/delete or resize a Rackspace Cloud Databases instance
|
||||
description:
|
||||
- creates / deletes or resize a Rackspace Cloud Databases instance
|
||||
and optionally waits for it to be 'running'. The name option needs to be
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user