mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 18:06:53 +00:00
Compare commits
149 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3c711491a | ||
|
|
d6a57882d2 | ||
|
|
edbef2266d | ||
|
|
2b88ee01d3 | ||
|
|
afd24ccd35 | ||
|
|
26ada26df1 | ||
|
|
59999a89f1 | ||
|
|
e1f4be1e01 | ||
|
|
e7e2f095ee | ||
|
|
45200fc233 | ||
|
|
154d1b7024 | ||
|
|
541fcec900 | ||
|
|
dc6ccbea63 | ||
|
|
6cb044ac13 | ||
|
|
af78b2068a | ||
|
|
73569b1c36 | ||
|
|
5914c1df8e | ||
|
|
f847531a35 | ||
|
|
df01cde23d | ||
|
|
cdd9ced441 | ||
|
|
dfb61b283d | ||
|
|
fe7b151a26 | ||
|
|
9b983fba86 | ||
|
|
2d27dbd9ea | ||
|
|
55cbccf0fc | ||
|
|
675860c392 | ||
|
|
5e8914e00c | ||
|
|
c0230342b4 | ||
|
|
ebf15447f0 | ||
|
|
86d10f53fd | ||
|
|
d679f51018 | ||
|
|
e9e494e1ff | ||
|
|
bf90b4e88a | ||
|
|
874d7f7050 | ||
|
|
3a19fbc89c | ||
|
|
2d8e1339d1 | ||
|
|
4892f954fa | ||
|
|
6fd58ba388 | ||
|
|
49967547df | ||
|
|
492170b414 | ||
|
|
25b9391fb4 | ||
|
|
1e30afa92f | ||
|
|
7d4e69cdf7 | ||
|
|
ff23e41c21 | ||
|
|
3ce7d2fc7e | ||
|
|
20a9d120aa | ||
|
|
f4a40592a1 | ||
|
|
3d5145e924 | ||
|
|
1ae5a2cd05 | ||
|
|
75aa353281 | ||
|
|
5fe10915a8 | ||
|
|
eca6494503 | ||
|
|
b12d422fcc | ||
|
|
33809395ab | ||
|
|
60fe6ebc3c | ||
|
|
957f3e6eca | ||
|
|
dbd918865f | ||
|
|
46a83df85e | ||
|
|
57f262504d | ||
|
|
29671cb54c | ||
|
|
fd91e94279 | ||
|
|
b001e36fb3 | ||
|
|
5f63476404 | ||
|
|
eff452e4a5 | ||
|
|
d393b16064 | ||
|
|
40b5967fc3 | ||
|
|
ccabc342b9 | ||
|
|
570f6a8791 | ||
|
|
b10cf1e357 | ||
|
|
acfe464a31 | ||
|
|
f32a8dc740 | ||
|
|
ea8f109056 | ||
|
|
7e7b84348b | ||
|
|
baf552337d | ||
|
|
3ff9161ab0 | ||
|
|
aa8728b22c | ||
|
|
590ff351b4 | ||
|
|
a5824a2a9d | ||
|
|
71975be3c1 | ||
|
|
564f87c775 | ||
|
|
6750a866c6 | ||
|
|
c6316c1153 | ||
|
|
2c825f04e7 | ||
|
|
8a33e070be | ||
|
|
d2a5fdfc71 | ||
|
|
b6b2419206 | ||
|
|
4c399f1c01 | ||
|
|
923d335646 | ||
|
|
be96c33257 | ||
|
|
e68b1017e3 | ||
|
|
44e4b1d202 | ||
|
|
24378fd944 | ||
|
|
c3e1715233 | ||
|
|
ec86ebed98 | ||
|
|
5aa2779a48 | ||
|
|
f6d15ec818 | ||
|
|
f0320b5ac9 | ||
|
|
79578e5db3 | ||
|
|
43beaf4b00 | ||
|
|
6b0cc3c1de | ||
|
|
81a95a347d | ||
|
|
8026337c44 | ||
|
|
8fb1aaaf53 | ||
|
|
fda610cc23 | ||
|
|
8ecb322816 | ||
|
|
1260a63241 | ||
|
|
f6aad230c2 | ||
|
|
f3a4e5ea27 | ||
|
|
e26eaa0565 | ||
|
|
3432be3a2f | ||
|
|
4f78e7979e | ||
|
|
8a0b7dcdc9 | ||
|
|
9cd3f37686 | ||
|
|
5e1508e0df | ||
|
|
be4d294b61 | ||
|
|
1d36db1806 | ||
|
|
16a18d1456 | ||
|
|
493fa405e2 | ||
|
|
25464602d9 | ||
|
|
8a670a3929 | ||
|
|
47b8df8019 | ||
|
|
9c411586ea | ||
|
|
e0465d1f48 | ||
|
|
ad6c28069a | ||
|
|
31a1c8e185 | ||
|
|
20bd0d130c | ||
|
|
e5ecaffa1a | ||
|
|
8fd89721cd | ||
|
|
b1231c1315 | ||
|
|
48b20894b3 | ||
|
|
1676b09573 | ||
|
|
94efecaf67 | ||
|
|
e9bc32528e | ||
|
|
8e84b3ef8e | ||
|
|
944bc78360 | ||
|
|
a2a81f11c3 | ||
|
|
1a72aea4c5 | ||
|
|
36eff2fd99 | ||
|
|
47514e1401 | ||
|
|
186b4200f6 | ||
|
|
6057c3c7c4 | ||
|
|
b12113c182 | ||
|
|
4860c20108 | ||
|
|
8efbd8172c | ||
|
|
8c1b7e3ddb | ||
|
|
ec228bf32f | ||
|
|
1d39bbefd9 | ||
|
|
59fb0cf506 | ||
|
|
6c4080e78d |
@@ -29,14 +29,14 @@ schedules:
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-7
|
||||
- stable-6
|
||||
- stable-5
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-4
|
||||
- stable-5
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -73,6 +73,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_15
|
||||
displayName: Sanity 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.15/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
@@ -99,19 +112,6 @@ stages:
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
@@ -123,13 +123,23 @@ stages:
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_15
|
||||
displayName: Units 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/units/{0}/1
|
||||
targets:
|
||||
- test: 3.5
|
||||
- test: "3.10"
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
@@ -139,7 +149,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
@@ -152,17 +161,6 @@ stages:
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.8
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel_extra_vms
|
||||
@@ -177,8 +175,6 @@ stages:
|
||||
test: alpine/3.17
|
||||
# - name: Fedora 37
|
||||
# test: fedora/37
|
||||
# - name: Ubuntu 20.04
|
||||
# test: ubuntu/20.04
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu/22.04
|
||||
groups:
|
||||
@@ -191,20 +187,34 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: macOS 13.2
|
||||
test: macos/13.2
|
||||
- name: RHEL 9.1
|
||||
test: rhel/9.1
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
- name: FreeBSD 13.2
|
||||
test: freebsd/13.2
|
||||
- name: FreeBSD 12.4
|
||||
test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_15
|
||||
displayName: Remote 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/{0}
|
||||
targets:
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
@@ -233,22 +243,6 @@ stages:
|
||||
test: macos/12.0
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
@@ -265,8 +259,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: openSUSE 15
|
||||
@@ -281,6 +273,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_15
|
||||
displayName: Docker 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
@@ -313,24 +319,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
@@ -344,7 +332,7 @@ stages:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
test: archlinux/3.11
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
@@ -364,6 +352,16 @@ stages:
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_15
|
||||
displayName: Generic 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
@@ -384,42 +382,32 @@ stages:
|
||||
testFormat: 2.13/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_12
|
||||
displayName: Generic 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Sanity_2_15
|
||||
- Units_devel
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Units_2_15
|
||||
- Remote_devel_extra_vms
|
||||
- Remote_devel
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Remote_2_15
|
||||
- Docker_devel
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_2_15
|
||||
- Docker_community_devel
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_12
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
# - Generic_2_15
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
33
.github/BOTMETA.yml
vendored
33
.github/BOTMETA.yml
vendored
@@ -241,6 +241,8 @@ files:
|
||||
$lookups/manifold.py:
|
||||
labels: manifold
|
||||
maintainers: galanoff
|
||||
$lookups/merge_variables.py:
|
||||
maintainers: rlenferink m-a-r-k-e
|
||||
$lookups/onepass:
|
||||
labels: onepassword
|
||||
maintainers: samdoran
|
||||
@@ -265,6 +267,8 @@ files:
|
||||
maintainers: delineaKrehl tylerezimmerman
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/btrfs.py:
|
||||
maintainers: gnfzdz
|
||||
$module_utils/deps.py:
|
||||
maintainers: russoz
|
||||
$module_utils/gconftool2.py:
|
||||
@@ -294,7 +298,6 @@ files:
|
||||
maintainers: $team_manageiq
|
||||
$module_utils/memset.py:
|
||||
labels: cloud memset
|
||||
maintainers: glitchcrab
|
||||
$module_utils/mh/:
|
||||
labels: module_helper
|
||||
maintainers: russoz
|
||||
@@ -394,6 +397,8 @@ files:
|
||||
maintainers: catcombo
|
||||
$modules/bower.py:
|
||||
maintainers: mwarkentin
|
||||
$modules/btrfs_:
|
||||
maintainers: gnfzdz
|
||||
$modules/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/bzr.py:
|
||||
@@ -588,7 +593,7 @@ files:
|
||||
ignore: jose-delarosa
|
||||
maintainers: $team_redfish
|
||||
$modules/ilo_:
|
||||
ignore: jose-delarosa
|
||||
ignore: jose-delarosa varini-hp
|
||||
maintainers: $team_redfish
|
||||
$modules/imc_rest.py:
|
||||
labels: cisco
|
||||
@@ -667,16 +672,22 @@ files:
|
||||
ignore: DWSR
|
||||
labels: jira
|
||||
maintainers: Slezhuk tarka pertoft
|
||||
$modules/kdeconfig.py:
|
||||
maintainers: smeso
|
||||
$modules/kernel_blacklist.py:
|
||||
maintainers: matze
|
||||
$modules/keycloak_:
|
||||
maintainers: $team_keycloak
|
||||
$modules/keycloak_authentication.py:
|
||||
maintainers: elfelip Gaetan2907
|
||||
$modules/keycloak_authz_authorization_scope.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_client_rolemapping.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope_type.py:
|
||||
maintainers: simonpahl
|
||||
$modules/keycloak_clientsecret_info.py:
|
||||
maintainers: fynncfchen johncant
|
||||
$modules/keycloak_clientsecret_regenerate.py:
|
||||
@@ -785,7 +796,7 @@ files:
|
||||
labels: maven_artifact
|
||||
maintainers: tumbl3w33d turb
|
||||
$modules/memset_:
|
||||
maintainers: glitchcrab
|
||||
ignore: glitchcrab
|
||||
$modules/mksysb.py:
|
||||
labels: aix mksysb
|
||||
maintainers: $team_aix
|
||||
@@ -918,7 +929,7 @@ files:
|
||||
$modules/pamd.py:
|
||||
maintainers: kevensen
|
||||
$modules/parted.py:
|
||||
maintainers: ColOfAbRiX rosowiecki jake2184
|
||||
maintainers: ColOfAbRiX jake2184
|
||||
$modules/pear.py:
|
||||
ignore: jle64
|
||||
labels: pear
|
||||
@@ -987,7 +998,7 @@ files:
|
||||
maintainers: sysadmind
|
||||
$modules/puppet.py:
|
||||
labels: puppet
|
||||
maintainers: nibalizer emonty
|
||||
maintainers: emonty
|
||||
$modules/pushbullet.py:
|
||||
maintainers: willybarro
|
||||
$modules/pushover.py:
|
||||
@@ -1042,7 +1053,8 @@ files:
|
||||
maintainers: $team_redfish TSKushal
|
||||
$modules/redhat_subscription.py:
|
||||
labels: redhat_subscription
|
||||
maintainers: barnabycourt alikins kahowell
|
||||
maintainers: $team_rhsm
|
||||
ignore: barnabycourt alikins kahowell
|
||||
$modules/redis.py:
|
||||
maintainers: slok
|
||||
$modules/redis_data.py:
|
||||
@@ -1065,9 +1077,9 @@ files:
|
||||
labels: rhn_register
|
||||
maintainers: jlaska $team_rhn
|
||||
$modules/rhsm_release.py:
|
||||
maintainers: seandst
|
||||
maintainers: seandst $team_rhsm
|
||||
$modules/rhsm_repository.py:
|
||||
maintainers: giovannisciortino
|
||||
maintainers: giovannisciortino $team_rhsm
|
||||
$modules/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/rocketchat.py:
|
||||
@@ -1320,7 +1332,7 @@ files:
|
||||
labels: m:xml xml
|
||||
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
|
||||
$modules/yarn.py:
|
||||
maintainers: chrishoffman verkaufer
|
||||
ignore: chrishoffman verkaufer
|
||||
$modules/yum_versionlock.py:
|
||||
maintainers: gyptazy aminvakil
|
||||
$modules/zfs:
|
||||
@@ -1386,7 +1398,7 @@ macros:
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_keycloak: eikef ndclt mattock
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
@@ -1396,6 +1408,7 @@ macros:
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_rhsm: cnsnyder ptoscano
|
||||
team_scaleway: remyleone abarbare
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
|
||||
55
.github/workflows/ansible-test.yml
vendored
55
.github/workflows/ansible-test.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.11'
|
||||
- '2.12'
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
@@ -43,7 +44,7 @@ jobs:
|
||||
- name: Perform sanity testing
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
@@ -75,6 +76,10 @@ jobs:
|
||||
python: '2.7'
|
||||
- ansible: '2.11'
|
||||
python: '3.5'
|
||||
- ansible: '2.12'
|
||||
python: '2.6'
|
||||
- ansible: '2.12'
|
||||
python: '3.8'
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
@@ -82,7 +87,7 @@ jobs:
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pre-test-cmd: >-
|
||||
@@ -163,7 +168,49 @@ jobs:
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '3.5'
|
||||
# target: azp/generic/2/
|
||||
# target: azp/generic/1/
|
||||
# 2.12
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.12'
|
||||
# docker: default
|
||||
# python: '3.8'
|
||||
# target: azp/generic/1/
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
@@ -172,7 +219,7 @@ jobs:
|
||||
under Python ${{ matrix.python }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
docker-image: ${{ matrix.docker }}
|
||||
|
||||
165
CHANGELOG.rst
165
CHANGELOG.rst
@@ -6,6 +6,171 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 5.0.0.
|
||||
|
||||
v6.6.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- csv module utils - detects and remove unicode BOM markers from incoming CSV content (https://github.com/ansible-collections/community.general/pull/6662).
|
||||
- gitlab_group - the module passed parameters to the API call even when not set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
|
||||
- ini_file - fix a bug where the inactive options were not used when possible (https://github.com/ansible-collections/community.general/pull/6575).
|
||||
- keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries (https://github.com/ansible-collections/community.general/pull/6688).
|
||||
- keycloak module utils - the function ``get_user_by_username`` now return the user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
|
||||
|
||||
v6.6.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008, https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- portage - update the logic for generating the emerge command arguments to ensure that ``withbdeps: false`` results in a passing an ``n`` argument with the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451, https://github.com/ansible-collections/community.general/pull/6456).
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from ``required_together`` as it causes to require ``api_password`` even when API token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has been fixed (https://github.com/ansible-collections/community.general/issues/6452, https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
|
||||
v6.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- dconf - be forgiving about boolean values: convert them to GVariant booleans automatically (https://github.com/ansible-collections/community.general/pull/6206).
|
||||
- dconf - minor refactoring improving parameters and dependencies validation (https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects (https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response (https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address`` and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade`` and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in the native implementation of terraform will not cause errors due to the non-existent file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing Flatpaks because of a parameter witch only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob`` to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade`` (https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment`` is specified, so it possible to specify again the environment names for registering, as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when ``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258, https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices, not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/pull/5887, https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers or the underscore character, failing to identify valid snap names such as ``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- btrfs_info - Query btrfs filesystem info
|
||||
- btrfs_subvolume - Manage btrfs subvolumes
|
||||
- ilo_redfish_command - Manages Out-Of-Band controllers using Redfish APIs
|
||||
- keycloak_authz_authorization_scope - Allows administration of Keycloak client authorization scopes via Keycloak API
|
||||
- keycloak_clientscope_type - Set the type of aclientscope in realm or client via Keycloak API
|
||||
|
||||
v6.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
|
||||
- dconf - parse GVariants for equality comparison when the Python module ``gi.repository`` is available (https://github.com/ansible-collections/community.general/pull/6049).
|
||||
- gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
|
||||
- jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209, https://github.com/ansible-collections/community.general/pull/6210).
|
||||
- ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
|
||||
- make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
|
||||
- nmap inventory plugin - add new option ``open`` for only returning open ports (https://github.com/ansible-collections/community.general/pull/6200).
|
||||
- nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
|
||||
- nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
|
||||
- nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections (https://github.com/ansible-collections/community.general/issues/6065).
|
||||
- openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make module less dependant on the ``TERM`` environment variable set on the Ansible controller (https://github.com/ansible-collections/community.general/pull/6149).
|
||||
- pipx - optional ``install_apps`` parameter added to install applications from injected packages (https://github.com/ansible-collections/community.general/pull/6198).
|
||||
- proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
|
||||
- redfish_info - adds commands to retrieve the HPE ThermalConfiguration and FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
|
||||
- redhat_subscription - credentials (``username``, ``activationkey``, and so on) are required now only if a system needs to be registered, or ``force_register`` is specified (https://github.com/ansible-collections/community.general/pull/5664).
|
||||
- redhat_subscription - the registration is done using the D-Bus ``rhsm`` service instead of spawning a ``subscription-manager register`` command, if possible; this avoids passing plain-text credentials as arguments to ``subscription-manager register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
|
||||
- ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
|
||||
- ssh_config - vendored StormSSH's config parser to avoid having to install StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
|
||||
- znode module - optional ``use_tls`` parameter added for encrypted communication (https://github.com/ansible-collections/community.general/issues/6154).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
|
||||
- gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
|
||||
- influxdb_user - fix running in check mode when the user does not exist yet (https://github.com/ansible-collections/community.general/pull/6111).
|
||||
- interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
|
||||
- jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
|
||||
- memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
|
||||
- nmcli - fixed idempotency issue for bridge connections. Module forced default value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216, https://github.com/ansible-collections/community.general/issues/4683).
|
||||
- nmcli - fixed idempotency issue when module params is set to ``may_fail4=false`` and ``method4=disabled``; in this case nmcli ignores change and keeps their own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
|
||||
- nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
|
||||
- opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
|
||||
- redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not running as root, rather than hanging on an interactive ``console-helper`` prompt; they all interact with ``subscription-manager``, which already requires to be run as root (https://github.com/ansible-collections/community.general/issues/734, https://github.com/ansible-collections/community.general/pull/6211).
|
||||
- xenorchestra inventory plugin - fix failure to receive objects from server due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
|
||||
- yarn - fix ``global=true`` to not fail when `executable` wasn't specified (https://github.com/ansible-collections/community.general/pull/6132)
|
||||
- yarn - fixes bug where yarn module tasks would fail when warnings were emitted from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Lookup
|
||||
~~~~~~
|
||||
|
||||
- merge_variables - merge variables with a certain suffix
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- kdeconfig - Manage KDE configuration files
|
||||
|
||||
v6.4.0
|
||||
======
|
||||
|
||||
|
||||
@@ -1122,3 +1122,305 @@ releases:
|
||||
- 6100-jenkins_plugin.yml
|
||||
- remove-unneeded-imports.yml
|
||||
release_date: '2023-02-27'
|
||||
6.5.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
|
||||
- gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
|
||||
- influxdb_user - fix running in check mode when the user does not exist yet
|
||||
(https://github.com/ansible-collections/community.general/pull/6111).
|
||||
- interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
|
||||
- jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``.
|
||||
This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
|
||||
- memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
|
||||
- nmcli - fixed idempotency issue for bridge connections. Module forced default
|
||||
value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled
|
||||
nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216,
|
||||
https://github.com/ansible-collections/community.general/issues/4683).
|
||||
- nmcli - fixed idempotency issue when module params is set to ``may_fail4=false``
|
||||
and ``method4=disabled``; in this case nmcli ignores change and keeps their
|
||||
own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
|
||||
- nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
|
||||
- opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
|
||||
- redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not
|
||||
running as root, rather than hanging on an interactive ``console-helper``
|
||||
prompt; they all interact with ``subscription-manager``, which already requires
|
||||
to be run as root (https://github.com/ansible-collections/community.general/issues/734,
|
||||
https://github.com/ansible-collections/community.general/pull/6211).
|
||||
- xenorchestra inventory plugin - fix failure to receive objects from server
|
||||
due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
|
||||
- yarn - fix ``global=true`` to not fail when `executable` wasn't specified
|
||||
(https://github.com/ansible-collections/community.general/pull/6132)
|
||||
- yarn - fixes bug where yarn module tasks would fail when warnings were emitted
|
||||
from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
|
||||
minor_changes:
|
||||
- apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters
|
||||
for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
|
||||
- dconf - parse GVariants for equality comparison when the Python module ``gi.repository``
|
||||
is available (https://github.com/ansible-collections/community.general/pull/6049).
|
||||
- gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
|
||||
- jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209,
|
||||
https://github.com/ansible-collections/community.general/pull/6210).
|
||||
- ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
|
||||
- make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
|
||||
- nmap inventory plugin - add new option ``open`` for only returning open ports
|
||||
(https://github.com/ansible-collections/community.general/pull/6200).
|
||||
- nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
|
||||
- nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices
|
||||
for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
|
||||
- nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections
|
||||
(https://github.com/ansible-collections/community.general/issues/6065).
|
||||
- openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make
|
||||
module less dependant on the ``TERM`` environment variable set on the Ansible
|
||||
controller (https://github.com/ansible-collections/community.general/pull/6149).
|
||||
- pipx - optional ``install_apps`` parameter added to install applications from
|
||||
injected packages (https://github.com/ansible-collections/community.general/pull/6198).
|
||||
- proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM
|
||||
from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
|
||||
- redfish_info - adds commands to retrieve the HPE ThermalConfiguration and
|
||||
FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
|
||||
- redhat_subscription - credentials (``username``, ``activationkey``, and so
|
||||
on) are required now only if a system needs to be registered, or ``force_register``
|
||||
is specified (https://github.com/ansible-collections/community.general/pull/5664).
|
||||
- redhat_subscription - the registration is done using the D-Bus ``rhsm`` service
|
||||
instead of spawning a ``subscription-manager register`` command, if possible;
|
||||
this avoids passing plain-text credentials as arguments to ``subscription-manager
|
||||
register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
|
||||
- ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
|
||||
- ssh_config - vendored StormSSH's config parser to avoid having to install
|
||||
StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
|
||||
- znode module - optional ``use_tls`` parameter added for encrypted communication
|
||||
(https://github.com/ansible-collections/community.general/issues/6154).
|
||||
release_summary: Feature and bugfix release.
|
||||
fragments:
|
||||
- 3216-nmcli-bridge-idempotency-fix.yml
|
||||
- 3935-add-gitlab-group-runner.yml
|
||||
- 4387-nmcli-mtu-for-vlan-connection-fix.yml
|
||||
- 5664-redhat_subscription-credentials-when-needed.yaml
|
||||
- 5867-apt_rpm-add-clean-and-upgrade.yml
|
||||
- 5970-add-proxyjump-option-to-ssh-config.yml
|
||||
- 5974-nmcli_add_new_addr_gen_mode6_options.yml
|
||||
- 6.5.0.yml
|
||||
- 6049-dconf-strings.yml
|
||||
- 6065-nmcli-add-runner-fast-rate-option.yml
|
||||
- 6106-nmcli-ipv4-mayfail-idempotency-fix.yml
|
||||
- 6111-influxdb_user-check-mode.yaml
|
||||
- 6112-fix_key_error_in_gitlab_runner_creation_update.yml
|
||||
- 6114-memset-add-url-error-handling.yml
|
||||
- 6117-remove-stormssh-depend.yml
|
||||
- 6118-jail-plugin-fix-default-inventory_hostname.yml
|
||||
- 6119-opkg-update.yaml
|
||||
- 6122-redhat_subscription-subscribe-via-dbus.yaml
|
||||
- 6127-yarn-ignore-warnings.yml
|
||||
- 6131-fix-interfaces_file-for-no-leading-spaces.yml
|
||||
- 6138-fix-yarn-global.yml
|
||||
- 6149-openbsd_pkg-term.yml
|
||||
- 6154-znode-optional-tls.yml
|
||||
- 6158-create-proxmox-vm-from-archive.yml
|
||||
- 6160-add-command-make-output.yml
|
||||
- 6165-nmap-port.yml
|
||||
- 6180-replace-deprecated-badzipfile.yml
|
||||
- 6198-pipx-inject-install-apps.yml
|
||||
- 6200-adding-open-option-to-nmap.yml
|
||||
- 6208-hpe-thermal-fan-percent.yaml
|
||||
- 6210-add-worklog-functionality-to-jira.yml
|
||||
- 6211-rhsm-require-root.yml
|
||||
- 6227-xen-orchestra-check-response-id.yml
|
||||
- xxxx-ldap-ca-cert-file.yml
|
||||
modules:
|
||||
- description: Manage KDE configuration files
|
||||
name: kdeconfig
|
||||
namespace: ''
|
||||
plugins:
|
||||
lookup:
|
||||
- description: merge variables with a certain suffix
|
||||
name: merge_variables
|
||||
namespace: null
|
||||
release_date: '2023-03-27'
|
||||
6.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could
|
||||
fail to properly detect already existing Flatpaks because of a parameter witch
|
||||
only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates
|
||||
and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob``
|
||||
to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass
|
||||
``null`` as a value. This only was possible in the past due to a bug in ansible-core
|
||||
that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade``
|
||||
(https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment``
|
||||
is specified, so it possible to specify again the environment names for registering,
|
||||
as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when
|
||||
``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258,
|
||||
https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering
|
||||
a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to
|
||||
pass ``null`` as a value. This only was possible in the past due to a bug
|
||||
in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices,
|
||||
not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck``
|
||||
functions (https://github.com/ansible-collections/community.general/pull/5887,
|
||||
https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions
|
||||
(https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers
|
||||
or the underscore character, failing to identify valid snap names such as
|
||||
``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
minor_changes:
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- 'dconf - be forgiving about boolean values: convert them to GVariant booleans
|
||||
automatically (https://github.com/ansible-collections/community.general/pull/6206).'
|
||||
- dconf - minor refactoring improving parameters and dependencies validation
|
||||
(https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check
|
||||
the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in
|
||||
docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects
|
||||
(https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response
|
||||
(https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing
|
||||
hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation
|
||||
of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address``
|
||||
and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to
|
||||
system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade``
|
||||
and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources
|
||||
during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in
|
||||
the native implementation of terraform will not cause errors due to the non-existent
|
||||
file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
release_summary: Bugfix and feature release.
|
||||
fragments:
|
||||
- 2090-idrac-redfish-resource-id-fix.yml
|
||||
- 5603-redfish-idrac-job-id-in-response.yml
|
||||
- 6.6.0.yml
|
||||
- 6199-archive-generate-checksum-in-chunks.yml
|
||||
- 6203-add-append-option-to-ipa-hostgroup.yml
|
||||
- 6206-dconf-booleans.yml
|
||||
- 6212-pipx-include-injected.yml
|
||||
- 6259-redhat_subscription-fix-force.yaml
|
||||
- 6267-homebrew-cask-upgrade-all-greedy.yml
|
||||
- 6269-mksysb-output.yml
|
||||
- 6275-redhat_subscription-fix-environments-centos.yaml
|
||||
- 6277-add-topics-gitlab-project.yml
|
||||
- 6286-icinga2_host-template-and-template-vars.yml
|
||||
- 6289-bugfix-flatpak-check-if-already-installed.yml
|
||||
- 6293-add-puppet-skip-tags-option.yaml
|
||||
- 6294-fix-one_vm-instantiation.yml
|
||||
- 6296-LanceNero-Terraform_statefile_check.yml
|
||||
- 6300-rundeck-modules-fixes-and-improvements.yml
|
||||
- 6303-pipx-fix-state-latest-and-add-system-site-packages.yml
|
||||
- 6308-pipx-add-system-site-packages.yml
|
||||
- 6312-nmcli-add-macvlan-connection-type.yml
|
||||
- 6318-add-form-flow.yml
|
||||
- 6319-redhat_subscription-fix-environment-parameter.yaml
|
||||
- 6334-dig-support-multiple-domains.yml
|
||||
- 6336-dconf-refactor.yml
|
||||
- 6351-support-env-variables-to-nmap-dynamic-inventoiry.yaml
|
||||
- 6361-snap-alias-regex-bugfix.yml
|
||||
- 6382-udm-dns-record-refactor.yml
|
||||
- 6383-deps-failed.yml
|
||||
- 6385-cpan-mh-feat.yml
|
||||
- 6401-rhsm_release-required.yml
|
||||
- 6404-ini_file-section.yml
|
||||
modules:
|
||||
- description: Query btrfs filesystem info
|
||||
name: btrfs_info
|
||||
namespace: ''
|
||||
- description: Manage btrfs subvolumes
|
||||
name: btrfs_subvolume
|
||||
namespace: ''
|
||||
- description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
name: ilo_redfish_command
|
||||
namespace: ''
|
||||
- description: Allows administration of Keycloak client authorization scopes via
|
||||
Keycloak API
|
||||
name: keycloak_authz_authorization_scope
|
||||
namespace: ''
|
||||
- description: Set the type of aclientscope in realm or client via Keycloak API
|
||||
name: keycloak_clientscope_type
|
||||
namespace: ''
|
||||
release_date: '2023-04-24'
|
||||
6.6.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008,
|
||||
https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- 'portage - update the logic for generating the emerge command arguments to
|
||||
ensure that ``withbdeps: false`` results in a passing an ``n`` argument with
|
||||
the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451,
|
||||
https://github.com/ansible-collections/community.general/pull/6456).'
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from
|
||||
``required_together`` as it causes to require ``api_password`` even when API
|
||||
token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has
|
||||
been fixed (https://github.com/ansible-collections/community.general/issues/6452,
|
||||
https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not
|
||||
initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly
|
||||
now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
minor_changes:
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter
|
||||
that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5143-fix-xfs-quota-project-init.yml
|
||||
- 6.6.1.yml
|
||||
- 6456-fix-portage-withbdeps-false.yml
|
||||
- 6458-puppet-noop.yml
|
||||
- 6491-dconf-respawn.yml
|
||||
- 6497-terraform-fix.yml
|
||||
- 6527-nmcli-bond-fix-xmit_hash_policy.yml
|
||||
- 6534-zypper-exitcode-102-handled.yaml
|
||||
- 6548-portage-changed_use-newuse.yml
|
||||
- 6554-proxmox-tasks-info-fix-required-password.yaml
|
||||
- deps.yml
|
||||
- passwordstore-lock.yml
|
||||
release_date: '2023-05-22'
|
||||
6.6.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- csv module utils - detects and remove unicode BOM markers from incoming CSV
|
||||
content (https://github.com/ansible-collections/community.general/pull/6662).
|
||||
- gitlab_group - the module passed parameters to the API call even when not
|
||||
set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
|
||||
- ini_file - fix a bug where the inactive options were not used when possible
|
||||
(https://github.com/ansible-collections/community.general/pull/6575).
|
||||
- keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries
|
||||
(https://github.com/ansible-collections/community.general/pull/6688).
|
||||
- keycloak module utils - the function ``get_user_by_username`` now return the
|
||||
user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.2.yml
|
||||
- 6568-fix-get-user-by-username-in-keycloak-module-utils.yml
|
||||
- 6662-csv-bom.yml
|
||||
- 6688-is-struct-included-bug-in-keycloak-py.yml
|
||||
- 6712-gitlab_group-filtered-for-none-values.yml
|
||||
- ini_file-use-inactive-options-when-possible.yml
|
||||
release_date: '2023-06-19'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 6.4.0
|
||||
version: 6.6.2
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -22,6 +22,7 @@ DOCUMENTATION = '''
|
||||
- Path to the jail
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_jail_host
|
||||
remote_user:
|
||||
|
||||
@@ -24,6 +24,11 @@ options:
|
||||
- The password to use with I(bind_dn).
|
||||
type: str
|
||||
default: ''
|
||||
ca_path:
|
||||
description:
|
||||
- Set the path to PEM file with CA certs.
|
||||
type: path
|
||||
version_added: "6.5.0"
|
||||
dn:
|
||||
required: true
|
||||
description:
|
||||
|
||||
@@ -30,12 +30,27 @@ DOCUMENTATION = '''
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: true
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_ADDRESS
|
||||
version_added: 6.6.0
|
||||
exclude:
|
||||
description: list of addresses to exclude
|
||||
description:
|
||||
- List of addresses to exclude.
|
||||
- For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_EXCLUDE
|
||||
version_added: 6.6.0
|
||||
port:
|
||||
description:
|
||||
- Only scan specific port or port range (C(-p)).
|
||||
- For example, you could pass C(22) for a single port, C(1-65535) for a range of ports,
|
||||
or C(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
|
||||
type: string
|
||||
version_added: 6.5.0
|
||||
ports:
|
||||
description: Enable/disable scanning for open ports
|
||||
description: Enable/disable scanning ports.
|
||||
type: boolean
|
||||
default: true
|
||||
ipv4:
|
||||
@@ -60,6 +75,11 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
open:
|
||||
description: Only scan for open (or possibly open) ports.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
dns_resolve:
|
||||
description: Whether to always (C(true)) or never (C(false)) do DNS resolution.
|
||||
type: boolean
|
||||
@@ -81,6 +101,14 @@ plugin: community.general.nmap
|
||||
sudo: true
|
||||
strict: false
|
||||
address: 192.168.0.0/24
|
||||
|
||||
# an nmap scan specifying ports and classifying results to an inventory group
|
||||
plugin: community.general.nmap
|
||||
address: 192.168.0.0/24
|
||||
exclude: 192.168.0.1, web.example.com
|
||||
port: 22, 443
|
||||
groups:
|
||||
web_servers: "ports | selectattr('port', 'equalto', '443')"
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -171,6 +199,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self._options['sudo']:
|
||||
cmd.insert(0, 'sudo')
|
||||
|
||||
if self._options['port']:
|
||||
cmd.append('-p')
|
||||
cmd.append(self._options['port'])
|
||||
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
@@ -194,6 +226,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self._options['icmp_timestamp']:
|
||||
cmd.append('-PP')
|
||||
|
||||
if self._options['open']:
|
||||
cmd.append('--open')
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
|
||||
@@ -78,6 +78,7 @@ compose:
|
||||
|
||||
import json
|
||||
import ssl
|
||||
from time import sleep
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
@@ -138,21 +139,42 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self.conn = create_connection(
|
||||
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
|
||||
|
||||
CALL_TIMEOUT = 100
|
||||
"""Number of 1/10ths of a second to wait before method call times out."""
|
||||
|
||||
def call(self, method, params):
|
||||
"""Calls a method on the XO server with the provided parameters."""
|
||||
id = self.pointer
|
||||
self.conn.send(json.dumps({
|
||||
'id': id,
|
||||
'jsonrpc': '2.0',
|
||||
'method': method,
|
||||
'params': params
|
||||
}))
|
||||
|
||||
waited = 0
|
||||
while waited < self.CALL_TIMEOUT:
|
||||
response = json.loads(self.conn.recv())
|
||||
if 'id' in response and response['id'] == id:
|
||||
return response
|
||||
else:
|
||||
sleep(0.1)
|
||||
waited += 1
|
||||
|
||||
raise AnsibleError(
|
||||
'Method call {method} timed out after {timeout} seconds.'.format(method=method, timeout=self.CALL_TIMEOUT / 10))
|
||||
|
||||
def login(self, user, password):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
|
||||
'username': user, 'password': password}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
result = json.loads(self.conn.recv())
|
||||
result = self.call('session.signIn', {
|
||||
'username': user, 'password': password
|
||||
})
|
||||
|
||||
if 'error' in result:
|
||||
raise AnsibleError(
|
||||
'Could not connect: {0}'.format(result['error']))
|
||||
|
||||
def get_object(self, name):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0',
|
||||
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
answer = json.loads(self.conn.recv())
|
||||
answer = self.call('xo.getAllObjects', {'filter': {'type': name}})
|
||||
|
||||
if 'error' in answer:
|
||||
raise AnsibleError(
|
||||
|
||||
@@ -12,6 +12,8 @@ DOCUMENTATION = """
|
||||
requirements:
|
||||
- bw (command line utility)
|
||||
- be logged into bitwarden
|
||||
- bitwarden vault unlocked
|
||||
- C(BW_SESSION) environment variable set
|
||||
short_description: Retrieve secrets from Bitwarden
|
||||
version_added: 5.4.0
|
||||
description:
|
||||
|
||||
@@ -61,6 +61,7 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
|
||||
- The default for this option will likely change to C(true) in the future.
|
||||
- This option will be forced to C(true) if multiple domains to be queried are specified.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.0.0
|
||||
@@ -95,6 +96,21 @@ EXAMPLES = """
|
||||
msg: "MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}"
|
||||
|
||||
- name: Lookup multiple names at once
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}"
|
||||
|
||||
- name: Lookup multiple names at once (from list variable)
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', *hosts) }}"
|
||||
vars:
|
||||
hosts:
|
||||
- example.org.
|
||||
- example.com.
|
||||
- gmail.com.
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
@@ -308,7 +324,7 @@ class LookupModule(LookupBase):
|
||||
edns_size = 4096
|
||||
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
|
||||
|
||||
domain = None
|
||||
domains = []
|
||||
qtype = self.get_option('qtype')
|
||||
flat = self.get_option('flat')
|
||||
fail_on_error = self.get_option('fail_on_error')
|
||||
@@ -365,63 +381,71 @@ class LookupModule(LookupBase):
|
||||
if '/' in t:
|
||||
try:
|
||||
domain, qtype = t.split('/')
|
||||
domains.append(domain)
|
||||
except Exception:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
else:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
|
||||
# print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
reversed_domains = []
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
reversed_domains.append(n.to_text())
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
domains = reversed_domains
|
||||
|
||||
if len(domains) > 1:
|
||||
real_empty = True
|
||||
|
||||
ret = []
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
domain = n.to_text()
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
|
||||
return ret
|
||||
|
||||
212
plugins/lookup/merge_variables.py
Normal file
212
plugins/lookup/merge_variables.py
Normal file
@@ -0,0 +1,212 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Thales Netherlands
|
||||
# Copyright (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
author:
|
||||
- Roy Lenferink (@rlenferink)
|
||||
- Mark Ettema (@m-a-r-k-e)
|
||||
name: merge_variables
|
||||
short_description: merge variables with a certain suffix
|
||||
description:
|
||||
- This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
|
||||
regular expressions, optionally.
|
||||
version_added: 6.5.0
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- Depending on the value of I(pattern_type), this is a list of prefixes, suffixes, or regular expressions
|
||||
that will be used to match all variables that should be merged.
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
pattern_type:
|
||||
description:
|
||||
- Change the way of searching for the specified pattern.
|
||||
type: str
|
||||
default: 'regex'
|
||||
choices:
|
||||
- prefix
|
||||
- suffix
|
||||
- regex
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: pattern_type
|
||||
initial_value:
|
||||
description:
|
||||
- An initial value to start with.
|
||||
type: raw
|
||||
override:
|
||||
description:
|
||||
- Return an error, print a warning or ignore it when a key will be overwritten.
|
||||
- The default behavior C(error) makes the plugin fail when a key would be overwritten.
|
||||
- When C(warn) and C(ignore) are used, note that it is important to know that the variables
|
||||
are sorted by name before being merged. Keys for later variables in this order will overwrite
|
||||
keys of the same name for variables earlier in this order. To avoid potential confusion,
|
||||
better use I(override=error) whenever possible.
|
||||
type: str
|
||||
default: 'error'
|
||||
choices:
|
||||
- error
|
||||
- warn
|
||||
- ignore
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: override
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Some example variables, they can be defined anywhere as long as they are in scope
|
||||
test_init_list:
|
||||
- "list init item 1"
|
||||
- "list init item 2"
|
||||
|
||||
testa__test_list:
|
||||
- "test a item 1"
|
||||
|
||||
testb__test_list:
|
||||
- "test b item 1"
|
||||
|
||||
testa__test_dict:
|
||||
ports:
|
||||
- 1
|
||||
|
||||
testb__test_dict:
|
||||
ports:
|
||||
- 3
|
||||
|
||||
|
||||
# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
|
||||
example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
|
||||
|
||||
# The variable example_a now contains:
|
||||
# ports:
|
||||
# - 1
|
||||
# - 3
|
||||
|
||||
|
||||
# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
|
||||
# result in a variable 'example_b'
|
||||
example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
|
||||
|
||||
# The variable example_b now contains:
|
||||
# - "list init item 1"
|
||||
# - "list init item 2"
|
||||
# - "test a item 1"
|
||||
# - "test b item 1"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
|
||||
dict will be returned.
|
||||
type: raw
|
||||
elements: raw
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
def _verify_and_get_type(variable):
|
||||
if isinstance(variable, list):
|
||||
return "list"
|
||||
elif isinstance(variable, dict):
|
||||
return "dict"
|
||||
else:
|
||||
raise AnsibleError("Not supported type detected, variable must be a list or a dict")
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(direct=kwargs)
|
||||
initial_value = self.get_option("initial_value", None)
|
||||
self._override = self.get_option('override', 'error')
|
||||
self._pattern_type = self.get_option('pattern_type', 'regex')
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
if not isinstance(term, str):
|
||||
raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
|
||||
|
||||
ret.append(self._merge_vars(term, initial_value, variables))
|
||||
|
||||
return ret
|
||||
|
||||
def _var_matches(self, key, search_pattern):
|
||||
if self._pattern_type == "prefix":
|
||||
return key.startswith(search_pattern)
|
||||
elif self._pattern_type == "suffix":
|
||||
return key.endswith(search_pattern)
|
||||
elif self._pattern_type == "regex":
|
||||
matcher = re.compile(search_pattern)
|
||||
return matcher.search(key)
|
||||
|
||||
return False
|
||||
|
||||
def _merge_vars(self, search_pattern, initial_value, variables):
|
||||
display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
|
||||
var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
|
||||
display.vvv("The following variables will be merged: {0}".format(var_merge_names))
|
||||
|
||||
prev_var_type = None
|
||||
result = None
|
||||
|
||||
if initial_value is not None:
|
||||
prev_var_type = _verify_and_get_type(initial_value)
|
||||
result = initial_value
|
||||
|
||||
for var_name in var_merge_names:
|
||||
var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
|
||||
var_type = _verify_and_get_type(var_value)
|
||||
|
||||
if prev_var_type is None:
|
||||
prev_var_type = var_type
|
||||
elif prev_var_type != var_type:
|
||||
raise AnsibleError("Unable to merge, not all variables are of the same type")
|
||||
|
||||
if result is None:
|
||||
result = var_value
|
||||
continue
|
||||
|
||||
if var_type == "dict":
|
||||
result = self._merge_dict(var_value, result, [var_name])
|
||||
else: # var_type == "list"
|
||||
result += var_value
|
||||
|
||||
return result
|
||||
|
||||
def _merge_dict(self, src, dest, path):
|
||||
for key, value in src.items():
|
||||
if isinstance(value, dict):
|
||||
node = dest.setdefault(key, {})
|
||||
self._merge_dict(value, node, path + [key])
|
||||
elif isinstance(value, list) and key in dest:
|
||||
dest[key] += value
|
||||
else:
|
||||
if (key in dest) and dest[key] != value:
|
||||
msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format(
|
||||
key, dest[key], value, ".".join(path))
|
||||
|
||||
if self._override == "error":
|
||||
raise AnsibleError(msg)
|
||||
if self._override == "warn":
|
||||
display.warning(msg)
|
||||
|
||||
dest[key] = value
|
||||
|
||||
return dest
|
||||
@@ -209,7 +209,6 @@ import time
|
||||
import yaml
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.module_utils.common.file import FileLock
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.utils.display import Display
|
||||
@@ -217,6 +216,8 @@ from ansible.utils.encrypt import random_password
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible import constants as C
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils._filelock import FileLock
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
|
||||
109
plugins/module_utils/_filelock.py
Normal file
109
plugins/module_utils/_filelock.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
# NOTE:
|
||||
# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import fcntl
|
||||
import sys
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class LockTimeout(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FileLock:
|
||||
'''
|
||||
Currently FileLock is implemented via fcntl.flock on a lock file, however this
|
||||
behaviour may change in the future. Avoid mixing lock types fcntl.flock,
|
||||
fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
|
||||
unwanted and/or unexpected behaviour
|
||||
'''
|
||||
def __init__(self):
|
||||
self.lockfd = None
|
||||
|
||||
@contextmanager
|
||||
def lock_file(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Context for lock acquisition
|
||||
'''
|
||||
try:
|
||||
self.set_lock(path, tmpdir, lock_timeout)
|
||||
yield
|
||||
finally:
|
||||
self.unlock()
|
||||
|
||||
def set_lock(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Create a lock file based on path with flock to prevent other processes
|
||||
using given path.
|
||||
Please note that currently file locking only works when it's executed by
|
||||
the same user, I.E single user scenarios
|
||||
|
||||
:kw path: Path (file) to lock
|
||||
:kw tmpdir: Path where to place the temporary .lock file
|
||||
:kw lock_timeout:
|
||||
Wait n seconds for lock acquisition, fail if timeout is reached.
|
||||
0 = Do not wait, fail if lock cannot be acquired immediately,
|
||||
Default is None, wait indefinitely until lock is released.
|
||||
:returns: True
|
||||
'''
|
||||
lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
|
||||
l_wait = 0.1
|
||||
r_exception = IOError
|
||||
if sys.version_info[0] == 3:
|
||||
r_exception = BlockingIOError
|
||||
|
||||
self.lockfd = open(lock_path, 'w')
|
||||
|
||||
if lock_timeout <= 0:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
|
||||
if lock_timeout:
|
||||
e_secs = 0
|
||||
while e_secs < lock_timeout:
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
except r_exception:
|
||||
time.sleep(l_wait)
|
||||
e_secs += l_wait
|
||||
continue
|
||||
|
||||
self.lockfd.close()
|
||||
raise LockTimeout('{0} sec'.format(lock_timeout))
|
||||
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
|
||||
return True
|
||||
|
||||
def unlock(self):
|
||||
'''
|
||||
Make sure lock file is available for everyone and Unlock the file descriptor
|
||||
locked by set_lock
|
||||
|
||||
:returns: True
|
||||
'''
|
||||
if not self.lockfd:
|
||||
return True
|
||||
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_UN)
|
||||
self.lockfd.close()
|
||||
except ValueError: # file wasn't opened, let context manager fail gracefully
|
||||
pass
|
||||
|
||||
return True
|
||||
258
plugins/module_utils/_stormssh.py
Normal file
258
plugins/module_utils/_stormssh.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is based on
|
||||
# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py
|
||||
# Copyright (C) <2013> <Emre Yilmaz>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from operator import itemgetter
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from paramiko.config import SSHConfig
|
||||
except ImportError:
|
||||
SSHConfig = object
|
||||
HAS_PARAMIKO = False
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
|
||||
else:
|
||||
HAS_PARAMIKO = True
|
||||
PARAMIKO_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class StormConfig(SSHConfig):
|
||||
def parse(self, file_obj):
|
||||
"""
|
||||
Read an OpenSSH config from the given file object.
|
||||
@param file_obj: a file-like object to read the config file from
|
||||
@type file_obj: file
|
||||
"""
|
||||
order = 1
|
||||
host = {"host": ['*'], "config": {}, }
|
||||
for line in file_obj:
|
||||
line = line.rstrip('\n').lstrip()
|
||||
if line == '':
|
||||
self._config.append({
|
||||
'type': 'empty_line',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
self._config.append({
|
||||
'type': 'comment',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if '=' in line:
|
||||
# Ensure ProxyCommand gets properly split
|
||||
if line.lower().strip().startswith('proxycommand'):
|
||||
proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
|
||||
match = proxy_re.match(line)
|
||||
key, value = match.group(1).lower(), match.group(2)
|
||||
else:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip().lower()
|
||||
else:
|
||||
# find first whitespace, and split there
|
||||
i = 0
|
||||
while (i < len(line)) and not line[i].isspace():
|
||||
i += 1
|
||||
if i == len(line):
|
||||
raise Exception('Unparsable line: %r' % line)
|
||||
key = line[:i].lower()
|
||||
value = line[i:].lstrip()
|
||||
if key == 'host':
|
||||
self._config.append(host)
|
||||
value = value.split()
|
||||
host = {
|
||||
key: value,
|
||||
'config': {},
|
||||
'type': 'entry',
|
||||
'order': order
|
||||
}
|
||||
order += 1
|
||||
elif key in ['identityfile', 'localforward', 'remoteforward']:
|
||||
if key in host['config']:
|
||||
host['config'][key].append(value)
|
||||
else:
|
||||
host['config'][key] = [value]
|
||||
elif key not in host['config']:
|
||||
host['config'].update({key: value})
|
||||
self._config.append(host)
|
||||
|
||||
|
||||
class ConfigParser(object):
|
||||
"""
|
||||
Config parser for ~/.ssh/config files.
|
||||
"""
|
||||
|
||||
def __init__(self, ssh_config_file=None):
|
||||
if not ssh_config_file:
|
||||
ssh_config_file = self.get_default_ssh_config_file()
|
||||
|
||||
self.defaults = {}
|
||||
|
||||
self.ssh_config_file = ssh_config_file
|
||||
|
||||
if not os.path.exists(self.ssh_config_file):
|
||||
if not os.path.exists(os.path.dirname(self.ssh_config_file)):
|
||||
os.makedirs(os.path.dirname(self.ssh_config_file))
|
||||
open(self.ssh_config_file, 'w+').close()
|
||||
os.chmod(self.ssh_config_file, 0o600)
|
||||
|
||||
self.config_data = []
|
||||
|
||||
def get_default_ssh_config_file(self):
|
||||
return os.path.expanduser("~/.ssh/config")
|
||||
|
||||
def load(self):
|
||||
config = StormConfig()
|
||||
|
||||
with open(self.ssh_config_file) as fd:
|
||||
config.parse(fd)
|
||||
|
||||
for entry in config.__dict__.get("_config"):
|
||||
if entry.get("host") == ["*"]:
|
||||
self.defaults.update(entry.get("config"))
|
||||
|
||||
if entry.get("type") in ["comment", "empty_line"]:
|
||||
self.config_data.append(entry)
|
||||
continue
|
||||
|
||||
host_item = {
|
||||
'host': entry["host"][0],
|
||||
'options': entry.get("config"),
|
||||
'type': 'entry',
|
||||
'order': entry.get("order", 0),
|
||||
}
|
||||
|
||||
if len(entry["host"]) > 1:
|
||||
host_item.update({
|
||||
'host': " ".join(entry["host"]),
|
||||
})
|
||||
# minor bug in paramiko.SSHConfig that duplicates
|
||||
# "Host *" entries.
|
||||
if entry.get("config") and len(entry.get("config")) > 0:
|
||||
self.config_data.append(host_item)
|
||||
|
||||
return self.config_data
|
||||
|
||||
def add_host(self, host, options):
|
||||
self.config_data.append({
|
||||
'host': host,
|
||||
'options': options,
|
||||
'order': self.get_last_index(),
|
||||
})
|
||||
|
||||
return self
|
||||
|
||||
def update_host(self, host, options, use_regex=False):
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host or \
|
||||
(use_regex and re.match(host, host_entry.get("host"))):
|
||||
|
||||
if 'deleted_fields' in options:
|
||||
deleted_fields = options.pop("deleted_fields")
|
||||
for deleted_field in deleted_fields:
|
||||
del self.config_data[index]["options"][deleted_field]
|
||||
|
||||
self.config_data[index]["options"].update(options)
|
||||
|
||||
return self
|
||||
|
||||
def search_host(self, search_string):
|
||||
results = []
|
||||
for host_entry in self.config_data:
|
||||
if host_entry.get("type") != 'entry':
|
||||
continue
|
||||
if host_entry.get("host") == "*":
|
||||
continue
|
||||
|
||||
searchable_information = host_entry.get("host")
|
||||
for key, value in host_entry.get("options").items():
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
if isinstance(value, int):
|
||||
value = str(value)
|
||||
|
||||
searchable_information += " " + value
|
||||
|
||||
if search_string in searchable_information:
|
||||
results.append(host_entry)
|
||||
|
||||
return results
|
||||
|
||||
def delete_host(self, host):
|
||||
found = 0
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host:
|
||||
del self.config_data[index]
|
||||
found += 1
|
||||
|
||||
if found == 0:
|
||||
raise ValueError('No host found')
|
||||
return self
|
||||
|
||||
def delete_all_hosts(self):
|
||||
self.config_data = []
|
||||
self.write_to_ssh_config()
|
||||
|
||||
return self
|
||||
|
||||
def dump(self):
|
||||
if len(self.config_data) < 1:
|
||||
return
|
||||
|
||||
file_content = ""
|
||||
self.config_data = sorted(self.config_data, key=itemgetter("order"))
|
||||
|
||||
for host_item in self.config_data:
|
||||
if host_item.get("type") in ['comment', 'empty_line']:
|
||||
file_content += host_item.get("value") + "\n"
|
||||
continue
|
||||
host_item_content = "Host {0}\n".format(host_item.get("host"))
|
||||
for key, value in host_item.get("options").items():
|
||||
if isinstance(value, list):
|
||||
sub_content = ""
|
||||
for value_ in value:
|
||||
sub_content += " {0} {1}\n".format(
|
||||
key, value_
|
||||
)
|
||||
host_item_content += sub_content
|
||||
else:
|
||||
host_item_content += " {0} {1}\n".format(
|
||||
key, value
|
||||
)
|
||||
file_content += host_item_content
|
||||
|
||||
return file_content
|
||||
|
||||
def write_to_ssh_config(self):
|
||||
with open(self.ssh_config_file, 'w+') as f:
|
||||
data = self.dump()
|
||||
if data:
|
||||
f.write(data)
|
||||
return self
|
||||
|
||||
def get_last_index(self):
|
||||
last_index = 0
|
||||
indexes = []
|
||||
for item in self.config_data:
|
||||
if item.get("order"):
|
||||
indexes.append(item.get("order"))
|
||||
if len(indexes) > 0:
|
||||
last_index = max(indexes)
|
||||
|
||||
return last_index
|
||||
464
plugins/module_utils/btrfs.py
Normal file
464
plugins/module_utils/btrfs.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def normalize_subvolume_path(path):
|
||||
"""
|
||||
Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
|
||||
In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
|
||||
"""
|
||||
fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
|
||||
result = re.sub(r'/+$', '', re.sub(r'/+', '/', '/' + fstree_stripped))
|
||||
return result if len(result) > 0 else '/'
|
||||
|
||||
|
||||
class BtrfsModuleException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BtrfsCommands(object):
|
||||
|
||||
"""
|
||||
Provides access to a subset of the Btrfs command line
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs = self.__module.get_bin_path("btrfs", required=True)
|
||||
|
||||
def filesystem_show(self):
|
||||
command = "%s filesystem show -d" % (self.__btrfs)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.strip() for x in result[1].splitlines()]
|
||||
filesystems = []
|
||||
current = None
|
||||
for line in stdout:
|
||||
if line.startswith('Label'):
|
||||
current = self.__parse_filesystem(line)
|
||||
filesystems.append(current)
|
||||
elif line.startswith('devid'):
|
||||
current['devices'].append(self.__parse_filesystem_device(line))
|
||||
return filesystems
|
||||
|
||||
def __parse_filesystem(self, line):
|
||||
label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
|
||||
id = re.sub(r'^.*uuid:\s*', '', line)
|
||||
|
||||
filesystem = {}
|
||||
filesystem['label'] = label.strip("'") if label != 'none' else None
|
||||
filesystem['uuid'] = id
|
||||
filesystem['devices'] = []
|
||||
filesystem['mountpoints'] = []
|
||||
filesystem['subvolumes'] = []
|
||||
filesystem['default_subvolid'] = None
|
||||
return filesystem
|
||||
|
||||
def __parse_filesystem_device(self, line):
|
||||
return re.sub(r'^.*path\s', '', line)
|
||||
|
||||
def subvolumes_list(self, filesystem_path):
|
||||
command = "%s subvolume list -tap %s" % (self.__btrfs, filesystem_path)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.split('\t') for x in result[1].splitlines()]
|
||||
subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
|
||||
if len(stdout) > 2:
|
||||
subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
|
||||
return subvolumes
|
||||
|
||||
def __parse_subvolume_list_record(self, item):
|
||||
return {
|
||||
'id': int(item[0]),
|
||||
'parent': int(item[2]),
|
||||
'path': normalize_subvolume_path(item[5]),
|
||||
}
|
||||
|
||||
def subvolume_get_default(self, filesystem_path):
|
||||
command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
# ID [n] ...
|
||||
return int(result[1].strip().split()[1])
|
||||
|
||||
def subvolume_set_default(self, filesystem_path, subvolume_id):
|
||||
command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_create(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_snapshot(self, snapshot_source, snapshot_destination):
|
||||
command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_delete(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
|
||||
class BtrfsInfoProvider(object):
|
||||
|
||||
"""
|
||||
Utility providing details of the currently available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True)
|
||||
|
||||
def get_filesystems(self):
|
||||
filesystems = self.__btrfs_api.filesystem_show()
|
||||
mountpoints = self.__find_mountpoints()
|
||||
for filesystem in filesystems:
|
||||
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
|
||||
filesystem['mountpoints'] = device_mountpoints
|
||||
|
||||
if len(device_mountpoints) > 0:
|
||||
|
||||
# any path within the filesystem can be used to query metadata
|
||||
mountpoint = device_mountpoints[0]['mountpoint']
|
||||
filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
|
||||
filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
|
||||
|
||||
return filesystems
|
||||
|
||||
def get_mountpoints(self, filesystem_devices):
|
||||
mountpoints = self.__find_mountpoints()
|
||||
return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices)
|
||||
|
||||
def get_subvolumes(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolumes_list(filesystem_path)
|
||||
|
||||
def get_default_subvolume_id(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolume_get_default(filesystem_path)
|
||||
|
||||
def __filter_mountpoints_for_devices(self, mountpoints, devices):
|
||||
return [m for m in mountpoints if (m['device'] in devices)]
|
||||
|
||||
def __find_mountpoints(self):
|
||||
command = "%s -t btrfs -nvP" % self.__findmnt_path
|
||||
result = self.__module.run_command(command)
|
||||
mountpoints = []
|
||||
if result[0] == 0:
|
||||
lines = result[1].splitlines()
|
||||
for line in lines:
|
||||
mountpoint = self.__parse_mountpoint_pairs(line)
|
||||
mountpoints.append(mountpoint)
|
||||
return mountpoints
|
||||
|
||||
def __parse_mountpoint_pairs(self, line):
|
||||
pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
|
||||
match = pattern.search(line)
|
||||
if match is not None:
|
||||
groups = match.groupdict()
|
||||
|
||||
return {
|
||||
'mountpoint': groups['target'],
|
||||
'device': groups['source'],
|
||||
'subvolid': self.__extract_mount_subvolid(groups['options']),
|
||||
}
|
||||
else:
|
||||
raise BtrfsModuleException("Failed to parse findmnt result for line: '%s'" % line)
|
||||
|
||||
def __extract_mount_subvolid(self, mount_options):
|
||||
for option in mount_options.split(','):
|
||||
if option.startswith('subvolid='):
|
||||
return int(option[len('subvolid='):])
|
||||
raise BtrfsModuleException("Failed to find subvolid for mountpoint in options '%s'" % mount_options)
|
||||
|
||||
|
||||
class BtrfsSubvolume(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs subvolume
|
||||
"""
|
||||
|
||||
def __init__(self, filesystem, subvolume_id):
|
||||
self.__filesystem = filesystem
|
||||
self.__subvolume_id = subvolume_id
|
||||
|
||||
def get_filesystem(self):
|
||||
return self.__filesystem
|
||||
|
||||
def is_mounted(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
return mountpoints is not None and len(mountpoints) > 0
|
||||
|
||||
def is_filesystem_root(self):
|
||||
return 5 == self.__subvolume_id
|
||||
|
||||
def is_filesystem_default(self):
|
||||
return self.__filesystem.default_subvolid == self.__subvolume_id
|
||||
|
||||
def get_mounted_path(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
if mountpoints is not None and len(mountpoints) > 0:
|
||||
return mountpoints[0]
|
||||
elif self.parent is not None:
|
||||
parent = self.__filesystem.get_subvolume_by_id(self.parent)
|
||||
parent_path = parent.get_mounted_path()
|
||||
if parent_path is not None:
|
||||
return parent_path + os.path.sep + self.name
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_mountpoints(self):
|
||||
return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id)
|
||||
|
||||
def get_child_relative_path(self, absolute_child_path):
|
||||
"""
|
||||
Get the relative path from this subvolume to the named child subvolume.
|
||||
The provided parameter is expected to be normalized as by normalize_subvolume_path.
|
||||
"""
|
||||
path = self.path
|
||||
if absolute_child_path.startswith(path):
|
||||
relative = absolute_child_path[len(path):]
|
||||
return re.sub(r'^/*', '', relative)
|
||||
else:
|
||||
raise BtrfsModuleException("Path '%s' doesn't start with '%s'" % (absolute_child_path, path))
|
||||
|
||||
def get_parent_subvolume(self):
|
||||
parent_id = self.parent
|
||||
return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None
|
||||
|
||||
def get_child_subvolumes(self):
|
||||
return self.__filesystem.get_subvolume_children(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def __info(self):
|
||||
return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.__subvolume_id
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.path.split('/').pop()
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.__info['path']
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self.__info['parent']
|
||||
|
||||
|
||||
class BtrfsFilesystem(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs filesystem
|
||||
"""
|
||||
|
||||
def __init__(self, info, provider, module):
|
||||
self.__provider = provider
|
||||
|
||||
# constant for module execution
|
||||
self.__uuid = info['uuid']
|
||||
self.__label = info['label']
|
||||
self.__devices = info['devices']
|
||||
|
||||
# refreshable
|
||||
self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
|
||||
self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
|
||||
self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.__uuid
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.__label
|
||||
|
||||
@property
|
||||
def default_subvolid(self):
|
||||
return self.__default_subvolid
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
return list(self.__devices)
|
||||
|
||||
def refresh(self):
|
||||
self.refresh_mountpoints()
|
||||
self.refresh_subvolumes()
|
||||
self.refresh_default_subvolume()
|
||||
|
||||
def refresh_mountpoints(self):
|
||||
mountpoints = self.__provider.get_mountpoints(list(self.__devices))
|
||||
self.__update_mountpoints(mountpoints)
|
||||
|
||||
def __update_mountpoints(self, mountpoints):
|
||||
self.__mountpoints = dict()
|
||||
for i in mountpoints:
|
||||
subvolid = i['subvolid']
|
||||
mountpoint = i['mountpoint']
|
||||
if subvolid not in self.__mountpoints:
|
||||
self.__mountpoints[subvolid] = []
|
||||
self.__mountpoints[subvolid].append(mountpoint)
|
||||
|
||||
def refresh_subvolumes(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
subvolumes = self.__provider.get_subvolumes(filesystem_path)
|
||||
self.__update_subvolumes(subvolumes)
|
||||
|
||||
def __update_subvolumes(self, subvolumes):
|
||||
# TODO strategy for retaining information on deleted subvolumes?
|
||||
self.__subvolumes = dict()
|
||||
for subvolume in subvolumes:
|
||||
self.__subvolumes[subvolume['id']] = subvolume
|
||||
|
||||
def refresh_default_subvolume(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path)
|
||||
|
||||
def contains_device(self, device):
|
||||
return device in self.__devices
|
||||
|
||||
def contains_subvolume(self, subvolume):
|
||||
return self.get_subvolume_by_name(subvolume) is not None
|
||||
|
||||
def get_subvolume_by_id(self, subvolume_id):
|
||||
return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_info_for_id(self, subvolume_id):
|
||||
return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_by_name(self, subvolume):
|
||||
for subvolume_info in self.__subvolumes.values():
|
||||
if subvolume_info['path'] == subvolume:
|
||||
return BtrfsSubvolume(self, subvolume_info['id'])
|
||||
return None
|
||||
|
||||
def get_any_mountpoint(self):
|
||||
for subvol_mountpoints in self.__mountpoints.values():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return subvol_mountpoints[0]
|
||||
# maybe error?
|
||||
return None
|
||||
|
||||
def get_any_mounted_subvolume(self):
|
||||
for subvolid, subvol_mountpoints in self.__mountpoints.items():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return self.get_subvolume_by_id(subvolid)
|
||||
return None
|
||||
|
||||
def get_mountpoints_by_subvolume_id(self, subvolume_id):
|
||||
return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else []
|
||||
|
||||
def get_nearest_subvolume(self, subvolume):
|
||||
"""Return the identified subvolume if existing, else the closest matching parent"""
|
||||
subvolumes_by_path = self.__get_subvolumes_by_path()
|
||||
while len(subvolume) > 1:
|
||||
if subvolume in subvolumes_by_path:
|
||||
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
|
||||
else:
|
||||
subvolume = re.sub(r'/[^/]+$', '', subvolume)
|
||||
|
||||
return BtrfsSubvolume(self, 5)
|
||||
|
||||
def get_mountpath_as_child(self, subvolume_name):
|
||||
"""Find a path to the target subvolume through a mounted ancestor"""
|
||||
nearest = self.get_nearest_subvolume(subvolume_name)
|
||||
if nearest.path == subvolume_name:
|
||||
nearest = nearest.get_parent_subvolume()
|
||||
if nearest is None or nearest.get_mounted_path() is None:
|
||||
raise BtrfsModuleException("Failed to find a path '%s' through a mounted parent subvolume" % subvolume_name)
|
||||
else:
|
||||
return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
|
||||
|
||||
def get_subvolume_children(self, subvolume_id):
|
||||
return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
|
||||
|
||||
def __get_subvolumes_by_path(self):
|
||||
result = {}
|
||||
for s in self.__subvolumes.values():
|
||||
path = s['path']
|
||||
result[path] = s
|
||||
return result
|
||||
|
||||
def is_mounted(self):
|
||||
return self.__mountpoints is not None and len(self.__mountpoints) > 0
|
||||
|
||||
def get_summary(self):
|
||||
subvolumes = []
|
||||
sources = self.__subvolumes.values() if self.__subvolumes is not None else []
|
||||
for subvolume in sources:
|
||||
id = subvolume['id']
|
||||
subvolumes.append({
|
||||
'id': id,
|
||||
'path': subvolume['path'],
|
||||
'parent': subvolume['parent'],
|
||||
'mountpoints': self.get_mountpoints_by_subvolume_id(id),
|
||||
})
|
||||
|
||||
return {
|
||||
'default_subvolume': self.__default_subvolid,
|
||||
'devices': self.__devices,
|
||||
'label': self.__label,
|
||||
'uuid': self.__uuid,
|
||||
'subvolumes': subvolumes,
|
||||
}
|
||||
|
||||
|
||||
class BtrfsFilesystemsProvider(object):
|
||||
|
||||
"""
|
||||
Provides methods to query available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__provider = BtrfsInfoProvider(module)
|
||||
self.__filesystems = None
|
||||
|
||||
def get_matching_filesystem(self, criteria):
|
||||
if criteria['device'] is not None:
|
||||
criteria['device'] = os.path.realpath(criteria['device'])
|
||||
|
||||
self.__check_init()
|
||||
matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
|
||||
if len(matching) == 1:
|
||||
return matching[0]
|
||||
else:
|
||||
raise BtrfsModuleException("Found %d filesystems matching criteria uuid=%s label=%s device=%s" % (
|
||||
len(matching),
|
||||
criteria['uuid'],
|
||||
criteria['label'],
|
||||
criteria['device']
|
||||
))
|
||||
|
||||
def __filesystem_matches_criteria(self, filesystem, criteria):
|
||||
return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
|
||||
(criteria['label'] is None or filesystem.label == criteria['label']) and
|
||||
(criteria['device'] is None or filesystem.contains_device(criteria['device'])))
|
||||
|
||||
def get_filesystem_for_device(self, device):
|
||||
real_device = os.path.realpath(device)
|
||||
self.__check_init()
|
||||
for fs in self.__filesystems.values():
|
||||
if fs.contains_device(real_device):
|
||||
return fs
|
||||
return None
|
||||
|
||||
def get_filesystems(self):
|
||||
self.__check_init()
|
||||
return list(self.__filesystems.values())
|
||||
|
||||
def __check_init(self):
|
||||
if self.__filesystems is None:
|
||||
self.__filesystems = dict()
|
||||
for f in self.__provider.get_filesystems():
|
||||
uuid = f['uuid']
|
||||
self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)
|
||||
@@ -55,8 +55,10 @@ def initialize_dialect(dialect, **kwargs):
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
BOM = to_native(u'\ufeff')
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
if data.startswith(BOM):
|
||||
data = data[len(BOM):]
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
|
||||
@@ -50,7 +50,7 @@ class _Dependency(object):
|
||||
def failed(self):
|
||||
return self.state == 1
|
||||
|
||||
def verify(self, module):
|
||||
def validate(self, module):
|
||||
if self.failed:
|
||||
module.fail_json(msg=self.message, exception=self.trace)
|
||||
|
||||
@@ -71,20 +71,28 @@ def declare(name, *args, **kwargs):
|
||||
_deps[name] = dep
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
def _select_names(spec):
|
||||
dep_names = sorted(_deps)
|
||||
|
||||
if spec is not None:
|
||||
if spec:
|
||||
if spec.startswith("-"):
|
||||
spec_split = spec[1:].split(":")
|
||||
for d in spec_split:
|
||||
dep_names.remove(d)
|
||||
else:
|
||||
spec_split = spec[1:].split(":")
|
||||
spec_split = spec.split(":")
|
||||
dep_names = []
|
||||
for d in spec_split:
|
||||
_deps[d] # ensure it exists
|
||||
dep_names.append(d)
|
||||
|
||||
for dep in dep_names:
|
||||
_deps[dep].verify(module)
|
||||
return dep_names
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
for dep in _select_names(spec):
|
||||
_deps[dep].validate(module)
|
||||
|
||||
|
||||
def failed(spec=None):
|
||||
return any(_deps[d].failed for d in _select_names(spec))
|
||||
|
||||
@@ -49,6 +49,16 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes"
|
||||
URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}"
|
||||
URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes"
|
||||
URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes"
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
@@ -80,6 +90,9 @@ URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/ins
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -194,24 +207,30 @@ def is_struct_included(struct1, struct2, exclude=None):
|
||||
Return True if all element of dict 1 are present in dict 2, return false otherwise.
|
||||
"""
|
||||
if isinstance(struct1, list) and isinstance(struct2, list):
|
||||
if not struct1 and not struct2:
|
||||
return True
|
||||
for item1 in struct1:
|
||||
if isinstance(item1, (list, dict)):
|
||||
for item2 in struct2:
|
||||
if not is_struct_included(item1, item2, exclude):
|
||||
return False
|
||||
if is_struct_included(item1, item2, exclude):
|
||||
break
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
if item1 not in struct2:
|
||||
return False
|
||||
return True
|
||||
elif isinstance(struct1, dict) and isinstance(struct2, dict):
|
||||
if not struct1 and not struct2:
|
||||
return True
|
||||
try:
|
||||
for key in struct1:
|
||||
if not (exclude and key in exclude):
|
||||
if not is_struct_included(struct1[key], struct2[key], exclude):
|
||||
return False
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
elif isinstance(struct1, bool) and isinstance(struct2, bool):
|
||||
return struct1 == struct2
|
||||
else:
|
||||
@@ -734,8 +753,15 @@ class KeycloakAPI(object):
|
||||
users_url = URL_USERS.format(url=self.baseurl, realm=realm)
|
||||
users_url += '?username=%s&exact=true' % username
|
||||
try:
|
||||
return json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
userrep = None
|
||||
users = json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for user in users:
|
||||
if user['username'] == username:
|
||||
userrep = user
|
||||
break
|
||||
return userrep
|
||||
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
@@ -1163,6 +1189,131 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
|
||||
% (mapper_rep, realm, str(e)))
|
||||
|
||||
def get_default_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The default clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'default', client_id)
|
||||
|
||||
def get_optional_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The optinal clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
|
||||
|
||||
def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param url_template the template for the right type
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The clientscopes of the specified type of this realm
|
||||
"""
|
||||
if client_id is None:
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
|
||||
else:
|
||||
cid = self.get_client_id(client_id=client_id, realm=realm)
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
|
||||
|
||||
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
|
||||
"""Decides which url to use.
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
if client_id is None:
|
||||
if scope_type == "default":
|
||||
return URL_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_OPTIONAL_CLIENTSCOPE
|
||||
else:
|
||||
if scope_type == "default":
|
||||
return URL_CLIENT_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_CLIENT_OPTIONAL_CLIENTSCOPE
|
||||
|
||||
def add_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'add')
|
||||
|
||||
def add_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'add')
|
||||
|
||||
def delete_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'delete')
|
||||
|
||||
def delete_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'delete')
|
||||
|
||||
def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'):
|
||||
""" Delete or add a clientscope of type.
|
||||
:param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
|
||||
:param client_id: The ID of the clientscope (preferred to name).
|
||||
:param scope_type 'default' or 'optional'
|
||||
:param realm: The realm in which this group resides, default "master".
|
||||
"""
|
||||
cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm)
|
||||
# should have a good cid by here.
|
||||
clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
|
||||
try:
|
||||
method = 'PUT' if action == "add" else 'DELETE'
|
||||
return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
except Exception as e:
|
||||
place = 'realm' if client_id is None else 'client ' + client_id
|
||||
self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
|
||||
|
||||
def create_clientsecret(self, id, realm="master"):
|
||||
""" Generate a new client secret by id
|
||||
|
||||
@@ -1795,6 +1946,9 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(updatedExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
|
||||
(flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
|
||||
|
||||
@@ -1819,7 +1973,7 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
|
||||
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master'):
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
|
||||
""" Create new sublow on the flow
|
||||
|
||||
:param subflowName: name of the subflow to create
|
||||
@@ -1830,7 +1984,7 @@ class KeycloakAPI(object):
|
||||
newSubFlow = {}
|
||||
newSubFlow["alias"] = subflowName
|
||||
newSubFlow["provider"] = "registration-page-form"
|
||||
newSubFlow["type"] = "basic-flow"
|
||||
newSubFlow["type"] = flowType
|
||||
open_url(
|
||||
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
|
||||
url=self.baseurl,
|
||||
@@ -1865,8 +2019,11 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(newExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
|
||||
(flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e)))
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
|
||||
|
||||
def change_execution_priority(self, executionId, diff, realm='master'):
|
||||
""" Raise or lower execution priority of diff time
|
||||
@@ -2190,3 +2347,44 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
search_url = "%s/search?name=%s" % (url, quote(name))
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def create_authz_authorization_scope(self, payload, client_id, realm):
|
||||
"""Create an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def update_authz_authorization_scope(self, payload, id, client_id, realm):
|
||||
"""Update an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def remove_authz_authorization_scope(self, id, client_id, realm):
|
||||
"""Remove an authorization scope from a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
|
||||
|
||||
@@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
import time
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
@@ -228,3 +229,79 @@ class iLORedfishUtils(RedfishUtils):
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
|
||||
def get_server_poststate(self):
|
||||
# Get server details
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
server_data = response["data"]
|
||||
|
||||
if "Hpe" in server_data["Oem"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hpe"]["PostState"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hp"]["PostState"]
|
||||
}
|
||||
|
||||
def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
|
||||
# This method checks if OOB controller reboot is completed
|
||||
time.sleep(10)
|
||||
|
||||
# Check server poststate
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
count = int(max_polling_time / polling_interval)
|
||||
times = 0
|
||||
|
||||
# When server is powered OFF
|
||||
pcount = 0
|
||||
while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5:
|
||||
time.sleep(10)
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] not in ["PowerOff", "Off"]:
|
||||
break
|
||||
pcount = pcount + 1
|
||||
if state["server_poststate"] in ["PowerOff", "Off"]:
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server is powered OFF"
|
||||
}
|
||||
|
||||
# When server is not rebooting
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": False,
|
||||
"msg": "Server is not rebooting"
|
||||
}
|
||||
|
||||
while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": True,
|
||||
"msg": "Server reboot is completed"
|
||||
}
|
||||
time.sleep(polling_interval)
|
||||
times = times + 1
|
||||
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server Reboot has failed, server state: {state} ".format(state=state)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ def gen_specs(**specs):
|
||||
specs.update({
|
||||
'bind_dn': dict(),
|
||||
'bind_pw': dict(default='', no_log=True),
|
||||
'ca_path': dict(type='path'),
|
||||
'dn': dict(required=True),
|
||||
'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
|
||||
'server_uri': dict(default='ldapi:///'),
|
||||
@@ -52,6 +53,7 @@ class LdapGeneric(object):
|
||||
self.module = module
|
||||
self.bind_dn = self.module.params['bind_dn']
|
||||
self.bind_pw = self.module.params['bind_pw']
|
||||
self.ca_path = self.module.params['ca_path']
|
||||
self.referrals_chasing = self.module.params['referrals_chasing']
|
||||
self.server_uri = self.module.params['server_uri']
|
||||
self.start_tls = self.module.params['start_tls']
|
||||
@@ -97,6 +99,9 @@ class LdapGeneric(object):
|
||||
if not self.verify_cert:
|
||||
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
|
||||
if self.ca_path:
|
||||
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path)
|
||||
|
||||
connection = ldap.initialize(self.server_uri)
|
||||
|
||||
if self.referrals_chasing == 'disabled':
|
||||
|
||||
@@ -26,6 +26,7 @@ class Response(object):
|
||||
def __init__(self):
|
||||
self.content = None
|
||||
self.status_code = None
|
||||
self.stderr = None
|
||||
|
||||
def json(self):
|
||||
return json.loads(self.content)
|
||||
@@ -75,6 +76,10 @@ def memset_api_call(api_key, api_method, payload=None):
|
||||
msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
|
||||
else:
|
||||
msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
|
||||
except urllib_error.URLError as e:
|
||||
has_failed = True
|
||||
msg = "An URLError occured ({0})." . format(type(e))
|
||||
response.stderr = "{0}" . format(e)
|
||||
|
||||
if msg is None:
|
||||
msg = response.json()
|
||||
|
||||
@@ -45,6 +45,8 @@ def render(to_render):
|
||||
"""Converts dictionary to OpenNebula template."""
|
||||
def recurse(to_render):
|
||||
for key, value in sorted(to_render.items()):
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
|
||||
continue
|
||||
@@ -52,6 +54,9 @@ def render(to_render):
|
||||
for item in value:
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
|
||||
continue
|
||||
if isinstance(value, str):
|
||||
yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
continue
|
||||
yield '{0:}="{1:}"'.format(key, value)
|
||||
return '\n'.join(recurse(to_render))
|
||||
|
||||
|
||||
@@ -32,12 +32,14 @@ def pipx_runner(module, command, **kwargs):
|
||||
state=fmt.as_map(_state_map),
|
||||
name=fmt.as_list(),
|
||||
name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])),
|
||||
install_apps=fmt.as_bool("--include-apps"),
|
||||
install_deps=fmt.as_bool("--include-deps"),
|
||||
inject_packages=fmt.as_list(),
|
||||
force=fmt.as_bool("--force"),
|
||||
include_injected=fmt.as_bool("--include-injected"),
|
||||
index_url=fmt.as_opt_val('--index-url'),
|
||||
python=fmt.as_opt_val('--python'),
|
||||
system_site_packages=fmt.as_bool("--system-site-packages"),
|
||||
_list=fmt.as_fixed(['list', '--include-injected', '--json']),
|
||||
editable=fmt.as_bool("--editable"),
|
||||
pip_args=fmt.as_opt_val('--pip-args'),
|
||||
|
||||
@@ -63,11 +63,7 @@ def puppet_runner(module):
|
||||
return cmd
|
||||
|
||||
def noop_func(v):
|
||||
_noop = cmd_runner_fmt.as_map({
|
||||
True: "--noop",
|
||||
False: "--no-noop",
|
||||
})
|
||||
return _noop(module.check_mode or v)
|
||||
return ["--noop"] if module.check_mode or v else ["--no-noop"]
|
||||
|
||||
_logdest_map = {
|
||||
"syslog": ["--logdest", "syslog"],
|
||||
@@ -96,6 +92,7 @@ def puppet_runner(module):
|
||||
confdir=cmd_runner_fmt.as_opt_val("--confdir"),
|
||||
environment=cmd_runner_fmt.as_opt_val("--environment"),
|
||||
tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]),
|
||||
skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
|
||||
certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
|
||||
noop=cmd_runner_fmt.as_func(noop_func),
|
||||
use_srv_records=cmd_runner_fmt.as_map({
|
||||
|
||||
@@ -3217,3 +3217,35 @@ class RedfishUtils(object):
|
||||
body["SecureBootEnable"] = True
|
||||
|
||||
return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
|
||||
|
||||
def get_hpe_thermal_config(self):
|
||||
result = {}
|
||||
key = "Thermal"
|
||||
# Go through list
|
||||
for chassis_uri in self.chassis_uri_list:
|
||||
response = self.get_request(self.root_uri + chassis_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
oem = data.get['Oem']
|
||||
hpe = oem.get['Hpe']
|
||||
thermal_config = hpe.get('ThermalConfiguration')
|
||||
result["current_thermal_config"] = thermal_config
|
||||
return result
|
||||
|
||||
def get_hpe_fan_percent_min(self):
|
||||
result = {}
|
||||
key = "Thermal"
|
||||
# Go through list
|
||||
for chassis_uri in self.chassis_uri_list:
|
||||
response = self.get_request(self.root_uri + chassis_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
oem = data.get['Oem']
|
||||
hpe = oem.get['Hpe']
|
||||
fan_percent_min_config = hpe.get('FanPercentMinimum')
|
||||
result["fan_percent_min"] = fan_percent_min_config
|
||||
return result
|
||||
|
||||
@@ -81,12 +81,18 @@ def api_request(module, endpoint, data=None, method="GET"):
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
|
||||
if not content:
|
||||
return None, info
|
||||
else:
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
module.fail_json(
|
||||
msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info
|
||||
)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
|
||||
@@ -108,7 +108,7 @@ EXAMPLES = r'''
|
||||
device: en1
|
||||
attributes:
|
||||
mtu: 900
|
||||
arp: off
|
||||
arp: 'off'
|
||||
state: available
|
||||
|
||||
- name: Configure IP, netmask and set en1 up.
|
||||
|
||||
@@ -27,8 +27,7 @@ attributes:
|
||||
options:
|
||||
package:
|
||||
description:
|
||||
- list of packages to install, upgrade or remove.
|
||||
required: true
|
||||
- List of packages to install, upgrade, or remove.
|
||||
aliases: [ name, pkg ]
|
||||
type: list
|
||||
elements: str
|
||||
@@ -40,9 +39,30 @@ options:
|
||||
type: str
|
||||
update_cache:
|
||||
description:
|
||||
- update the package database first C(apt-get update).
|
||||
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
|
||||
- Default is not to update the cache.
|
||||
type: bool
|
||||
default: false
|
||||
clean:
|
||||
description:
|
||||
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
|
||||
the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
|
||||
- Can be run as part of the package installation (clean runs before install) or as a separate step.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
dist_upgrade:
|
||||
description:
|
||||
- If true performs an C(apt-get dist-upgrade) to upgrade system.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
update_kernel:
|
||||
description:
|
||||
- If true performs an C(update-kernel) to upgrade kernel packages.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
author:
|
||||
- Evgenii Terechkov (@evgkrsk)
|
||||
'''
|
||||
@@ -76,6 +96,16 @@ EXAMPLES = '''
|
||||
name: bar
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Run the equivalent of "apt-get clean" as a separate step
|
||||
community.general.apt_rpm:
|
||||
clean: true
|
||||
|
||||
- name: Perform cache update and complete system upgrade (includes kernel)
|
||||
community.general.apt_rpm:
|
||||
update_cache: true
|
||||
dist_upgrade: true
|
||||
update_kernel: true
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -84,6 +114,8 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
APT_PATH = "/usr/bin/apt-get"
|
||||
RPM_PATH = "/usr/bin/rpm"
|
||||
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
|
||||
UPDATE_KERNEL_ZERO = "\nTry to install new kernel "
|
||||
|
||||
|
||||
def query_package(module, name):
|
||||
@@ -104,14 +136,39 @@ def query_package_provides(module, name):
|
||||
|
||||
|
||||
def update_package_db(module):
|
||||
rc, out, err = module.run_command("%s update" % APT_PATH)
|
||||
rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (False, update_out)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not update package db: %s" % err)
|
||||
|
||||
def dir_size(module, path):
|
||||
total_size = 0
|
||||
for path, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
total_size += os.path.getsize(os.path.join(path, f))
|
||||
return total_size
|
||||
|
||||
|
||||
def clean(module):
|
||||
t = dir_size(module, "/var/cache/apt/archives")
|
||||
rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True)
|
||||
return (t != dir_size(module, "/var/cache/apt/archives"), out)
|
||||
|
||||
|
||||
def dist_upgrade(module):
|
||||
rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (APT_GET_ZERO not in out, out)
|
||||
|
||||
|
||||
def update_kernel(module):
|
||||
rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (UPDATE_KERNEL_ZERO not in out, out)
|
||||
|
||||
|
||||
def remove_packages(module, packages):
|
||||
|
||||
if packages is None:
|
||||
return (False, "Empty package list")
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
@@ -119,7 +176,7 @@ def remove_packages(module, packages):
|
||||
if not query_package(module, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"})
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, err))
|
||||
@@ -127,13 +184,16 @@ def remove_packages(module, packages):
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
return (True, "removed %s package(s)" % remove_c)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
return (False, "package(s) already absent")
|
||||
|
||||
|
||||
def install_packages(module, pkgspec):
|
||||
|
||||
if pkgspec is None:
|
||||
return (False, "Empty package list")
|
||||
|
||||
packages = ""
|
||||
for package in pkgspec:
|
||||
if not query_package_provides(module, package):
|
||||
@@ -141,7 +201,7 @@ def install_packages(module, pkgspec):
|
||||
|
||||
if len(packages) != 0:
|
||||
|
||||
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
|
||||
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"})
|
||||
|
||||
installed = True
|
||||
for packages in pkgspec:
|
||||
@@ -152,9 +212,9 @@ def install_packages(module, pkgspec):
|
||||
if rc or not installed:
|
||||
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="%s present(s)" % packages)
|
||||
return (True, "%s present(s)" % packages)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
return (False, "Nothing to install")
|
||||
|
||||
|
||||
def main():
|
||||
@@ -162,7 +222,10 @@ def main():
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
|
||||
update_cache=dict(type='bool', default=False),
|
||||
package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
|
||||
clean=dict(type='bool', default=False),
|
||||
dist_upgrade=dict(type='bool', default=False),
|
||||
update_kernel=dict(type='bool', default=False),
|
||||
package=dict(type='list', elements='str', aliases=['name', 'pkg']),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -170,17 +233,39 @@ def main():
|
||||
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
|
||||
|
||||
p = module.params
|
||||
modified = False
|
||||
output = ""
|
||||
|
||||
if p['update_cache']:
|
||||
update_package_db(module)
|
||||
|
||||
if p['clean']:
|
||||
(m, out) = clean(module)
|
||||
modified = modified or m
|
||||
|
||||
if p['dist_upgrade']:
|
||||
(m, out) = dist_upgrade(module)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
if p['update_kernel']:
|
||||
(m, out) = update_kernel(module)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
packages = p['package']
|
||||
|
||||
if p['state'] in ['installed', 'present']:
|
||||
install_packages(module, packages)
|
||||
(m, out) = install_packages(module, packages)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
elif p['state'] in ['absent', 'removed']:
|
||||
remove_packages(module, packages)
|
||||
if p['state'] in ['absent', 'removed']:
|
||||
(m, out) = remove_packages(module, packages)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
# Return total modification status and output of all commands
|
||||
module.exit_json(changed=modified, msg=output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -198,6 +198,10 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
from ansible.module_utils import six
|
||||
|
||||
try: # python 3.2+
|
||||
from zipfile import BadZipFile # type: ignore[attr-defined]
|
||||
except ImportError: # older python
|
||||
from zipfile import BadZipfile as BadZipFile
|
||||
|
||||
LZMA_IMP_ERR = None
|
||||
if six.PY3:
|
||||
@@ -534,7 +538,7 @@ class ZipArchive(Archive):
|
||||
archive = zipfile.ZipFile(_to_native_ascii(path), 'r')
|
||||
checksums = set((info.filename, info.CRC) for info in archive.infolist())
|
||||
archive.close()
|
||||
except zipfile.BadZipfile:
|
||||
except BadZipFile:
|
||||
checksums = set()
|
||||
return checksums
|
||||
|
||||
@@ -604,7 +608,13 @@ class TarArchive(Archive):
|
||||
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
|
||||
# to their original names so only file checksum is returned
|
||||
f = self._open_compressed_file(_to_native_ascii(path), 'r')
|
||||
checksums = set([(b'', crc32(f.read()))])
|
||||
checksum = 0
|
||||
while True:
|
||||
chunk = f.read(16 * 1024 * 1024)
|
||||
if not chunk:
|
||||
break
|
||||
checksum = crc32(chunk, checksum)
|
||||
checksums = set([(b'', checksum)])
|
||||
f.close()
|
||||
except Exception:
|
||||
checksums = set()
|
||||
|
||||
@@ -15,7 +15,7 @@ module: awall
|
||||
short_description: Manage awall policies
|
||||
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
|
||||
description:
|
||||
- This modules allows for enable/disable/activate of I(awall) policies.
|
||||
- This modules allows for enable/disable/activate of C(awall) policies.
|
||||
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
|
||||
and activates the configuration on the system.
|
||||
extends_documentation_fragment:
|
||||
@@ -41,8 +41,11 @@ options:
|
||||
description:
|
||||
- Activate the new firewall rules.
|
||||
- Can be run with other steps or on its own.
|
||||
- Idempotency is affected if I(activate=true), as the module will always report a changed state.
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- At least one of I(name) and I(activate) is required.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
109
plugins/modules/btrfs_info.py
Normal file
109
plugins/modules/btrfs_info.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_info
|
||||
short_description: Query btrfs filesystem info
|
||||
version_added: "6.6.0"
|
||||
description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Query information about mounted btrfs filesystems
|
||||
community.general.btrfs_info:
|
||||
register: my_btrfs_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystems:
|
||||
description: Summaries of the current state for all btrfs filesystems found on the target host.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The id of the filesystem's default subvolume.
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
'''
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
provider = BtrfsFilesystemsProvider(module)
|
||||
filesystems = [x.get_summary() for x in provider.get_filesystems()]
|
||||
result = {
|
||||
"filesystems": filesystems,
|
||||
}
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
682
plugins/modules/btrfs_subvolume.py
Normal file
682
plugins/modules/btrfs_subvolume.py
Normal file
@@ -0,0 +1,682 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_subvolume
|
||||
short_description: Manage btrfs subvolumes
|
||||
version_added: "6.6.0"
|
||||
|
||||
description: Creates, updates and deletes btrfs subvolumes and snapshots.
|
||||
|
||||
options:
|
||||
automount:
|
||||
description:
|
||||
- Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
|
||||
type: bool
|
||||
default: false
|
||||
default:
|
||||
description:
|
||||
- Make the subvolume specified by I(name) the filesystem's default subvolume.
|
||||
type: bool
|
||||
default: false
|
||||
filesystem_device:
|
||||
description:
|
||||
- A block device contained within the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: path
|
||||
filesystem_label:
|
||||
description:
|
||||
- A descriptive label assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
filesystem_uuid:
|
||||
description:
|
||||
- A unique identifier assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the subvolume/snapshot to be targeted.
|
||||
required: true
|
||||
type: str
|
||||
recursive:
|
||||
description:
|
||||
- When true, indicates that parent/child subvolumes should be created/removedas necessary
|
||||
to complete the operation (for I(state=present) and I(state=absent) respectively).
|
||||
type: bool
|
||||
default: false
|
||||
snapshot_source:
|
||||
description:
|
||||
- Identifies the source subvolume for the created snapshot.
|
||||
- Infers that the created subvolume is a snapshot.
|
||||
type: str
|
||||
snapshot_conflict:
|
||||
description:
|
||||
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
|
||||
- C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
|
||||
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
|
||||
- C(clobber) - If a subvolume already exists at the requested location, delete it first.
|
||||
This option is not idempotent and will result in a new snapshot being generated on every execution.
|
||||
- C(error) - If a subvolume already exists at the requested location, return an error.
|
||||
This option is not idempotent and will result in an error on replay of the module.
|
||||
type: str
|
||||
choices: [ skip, clobber, error ]
|
||||
default: skip
|
||||
state:
|
||||
description:
|
||||
- Indicates the current state of the targeted subvolume.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
|
||||
notes:
|
||||
- If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
|
||||
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
|
||||
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: partial
|
||||
details:
|
||||
- In some scenarios it may erroneously report intermediate subvolumes being created.
|
||||
After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
|
||||
diff_mode:
|
||||
support: none
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Create a @home subvolume under the root subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the @home subvolume if it exists
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
state: absent
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume named @
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume and make it the new default subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
default: Yes
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystem:
|
||||
description:
|
||||
- A summary of the final state of the targeted btrfs filesystem.
|
||||
type: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The ID of the filesystem's default subvolume.
|
||||
returned: success and if filesystem is mounted
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
returned: success and if filesystem is mounted
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
modifications:
|
||||
description:
|
||||
- A list where each element describes a change made to the target btrfs filesystem.
|
||||
type: list
|
||||
returned: Success
|
||||
elements: str
|
||||
|
||||
target_subvolume_id:
|
||||
description:
|
||||
- The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
|
||||
type: int
|
||||
sample: 257
|
||||
returned: Success and subvolume exists after module execution
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
|
||||
class BtrfsSubvolumeModule(object):
|
||||
|
||||
__BTRFS_ROOT_SUBVOLUME = '/'
|
||||
__BTRFS_ROOT_SUBVOLUME_ID = 5
|
||||
__BTRFS_SUBVOLUME_INODE_NUMBER = 256
|
||||
|
||||
__CREATE_SUBVOLUME_OPERATION = 'create'
|
||||
__CREATE_SNAPSHOT_OPERATION = 'snapshot'
|
||||
__DELETE_SUBVOLUME_OPERATION = 'delete'
|
||||
__SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
|
||||
|
||||
__UNKNOWN_SUBVOLUME_ID = '?'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__provider = BtrfsFilesystemsProvider(module)
|
||||
|
||||
# module parameters
|
||||
name = self.module.params['name']
|
||||
self.__name = normalize_subvolume_path(name) if name is not None else None
|
||||
self.__state = self.module.params['state']
|
||||
|
||||
self.__automount = self.module.params['automount']
|
||||
self.__default = self.module.params['default']
|
||||
self.__filesystem_device = self.module.params['filesystem_device']
|
||||
self.__filesystem_label = self.module.params['filesystem_label']
|
||||
self.__filesystem_uuid = self.module.params['filesystem_uuid']
|
||||
self.__recursive = self.module.params['recursive']
|
||||
self.__snapshot_conflict = self.module.params['snapshot_conflict']
|
||||
snapshot_source = self.module.params['snapshot_source']
|
||||
self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
|
||||
|
||||
# execution state
|
||||
self.__filesystem = None
|
||||
self.__required_mounts = []
|
||||
self.__unit_of_work = []
|
||||
self.__completed_work = []
|
||||
self.__temporary_mounts = dict()
|
||||
|
||||
def run(self):
|
||||
error = None
|
||||
try:
|
||||
self.__load_filesystem()
|
||||
self.__prepare_unit_of_work()
|
||||
|
||||
if not self.module.check_mode:
|
||||
# check required mounts & mount
|
||||
if len(self.__unit_of_work) > 0:
|
||||
self.__execute_unit_of_work()
|
||||
self.__filesystem.refresh()
|
||||
else:
|
||||
# check required mounts
|
||||
self.__completed_work.extend(self.__unit_of_work)
|
||||
except Exception as e:
|
||||
error = e
|
||||
finally:
|
||||
self.__cleanup_mounts()
|
||||
if self.__filesystem is not None:
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
return (error, self.get_results())
|
||||
|
||||
# Identify the targeted filesystem and obtain the current state
|
||||
def __load_filesystem(self):
|
||||
if self.__has_filesystem_criteria():
|
||||
filesystem = self.__find_matching_filesytem()
|
||||
else:
|
||||
filesystem = self.__find_default_filesystem()
|
||||
|
||||
# The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
|
||||
if not filesystem.is_mounted():
|
||||
if not self.__automount:
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted and automount=False."
|
||||
"Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
|
||||
elif self.module.check_mode:
|
||||
# TODO is failing the module an appropriate outcome in this scenario?
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
|
||||
"state while running with check_mode=True" % filesystem.uuid)
|
||||
else:
|
||||
self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
filesystem.refresh()
|
||||
self.__filesystem = filesystem
|
||||
|
||||
def __has_filesystem_criteria(self):
|
||||
return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
|
||||
|
||||
def __find_matching_filesytem(self):
|
||||
criteria = {
|
||||
'uuid': self.__filesystem_uuid,
|
||||
'label': self.__filesystem_label,
|
||||
'device': self.__filesystem_device,
|
||||
}
|
||||
return self.__provider.get_matching_filesystem(criteria)
|
||||
|
||||
def __find_default_filesystem(self):
|
||||
filesystems = self.__provider.get_filesystems()
|
||||
filesystem = None
|
||||
|
||||
if len(filesystems) == 1:
|
||||
filesystem = filesystems[0]
|
||||
else:
|
||||
mounted_filesystems = [x for x in filesystems if x.is_mounted()]
|
||||
if len(mounted_filesystems) == 1:
|
||||
filesystem = mounted_filesystems[0]
|
||||
|
||||
if filesystem is not None:
|
||||
return filesystem
|
||||
else:
|
||||
raise BtrfsModuleException(
|
||||
"Failed to automatically identify targeted filesystem. "
|
||||
"No explicit device indicated and found %d available filesystems." % len(filesystems)
|
||||
)
|
||||
|
||||
# Prepare unit of work
|
||||
def __prepare_unit_of_work(self):
|
||||
if self.__state == "present":
|
||||
if self.__snapshot_source is None:
|
||||
self.__prepare_subvolume_present()
|
||||
else:
|
||||
self.__prepare_snapshot_present()
|
||||
|
||||
if self.__default:
|
||||
self.__prepare_set_default()
|
||||
elif self.__state == "absent":
|
||||
self.__prepare_subvolume_absent()
|
||||
|
||||
def __prepare_subvolume_present(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is None:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
self.__stage_create_subvolume(self.__name)
|
||||
|
||||
def __prepare_before_create_subvolume(self, subvolume_name):
|
||||
closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
|
||||
self.__stage_required_mount(closest_parent)
|
||||
if self.__recursive:
|
||||
self.__prepare_create_intermediates(closest_parent, subvolume_name)
|
||||
|
||||
def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
|
||||
relative_path = closest_subvolume.get_child_relative_path(self.__name)
|
||||
missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
|
||||
if len(missing_subvolumes) > 1:
|
||||
current = closest_subvolume.path
|
||||
for s in missing_subvolumes[:-1]:
|
||||
separator = os.path.sep if current[-1] != os.path.sep else ""
|
||||
current = current + separator + s
|
||||
self.__stage_create_subvolume(current, True)
|
||||
|
||||
def __prepare_snapshot_present(self):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_exists = subvolume is not None
|
||||
|
||||
if subvolume_exists:
|
||||
if self.__snapshot_conflict == "skip":
|
||||
# No change required
|
||||
return
|
||||
elif self.__snapshot_conflict == "error":
|
||||
raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
|
||||
|
||||
if source_subvolume is None:
|
||||
raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
|
||||
elif subvolume is not None and source_subvolume.id == subvolume.id:
|
||||
raise BtrfsModuleException("Snapshot source and target are the same.")
|
||||
else:
|
||||
self.__stage_required_mount(source_subvolume)
|
||||
|
||||
if subvolume_exists and self.__snapshot_conflict == "clobber":
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
elif not subvolume_exists:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
|
||||
self.__stage_create_snapshot(source_subvolume, self.__name)
|
||||
|
||||
def __prepare_subvolume_absent(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is not None:
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
|
||||
def __prepare_delete_subvolume_tree(self, subvolume):
|
||||
if subvolume.is_filesystem_root():
|
||||
raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
|
||||
if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
|
||||
raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
|
||||
"Either explicitly delete the child subvolumes first or pass "
|
||||
"parameter recursive=True." % subvolume.path)
|
||||
|
||||
self.__stage_required_mount(subvolume.get_parent_subvolume())
|
||||
queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
|
||||
# prepare unit of work
|
||||
for s in queue:
|
||||
if s.is_mounted():
|
||||
# TODO potentially unmount the subvolume if automount=True ?
|
||||
raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
|
||||
if s.is_filesystem_default():
|
||||
self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
self.__stage_delete_subvolume(s)
|
||||
|
||||
def __prepare_recursive_delete_order(self, subvolume):
|
||||
"""Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
|
||||
pending = [subvolume]
|
||||
ordered = []
|
||||
while len(pending) > 0:
|
||||
next = pending.pop()
|
||||
ordered.append(next)
|
||||
pending.extend(next.get_child_subvolumes())
|
||||
ordered.reverse() # reverse to ensure children are deleted before their parent
|
||||
return ordered
|
||||
|
||||
def __prepare_set_default(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_id = subvolume.id if subvolume is not None else None
|
||||
|
||||
if self.__filesystem.default_subvolid != subvolume_id:
|
||||
self.__stage_set_default_subvolume(self.__name, subvolume_id)
|
||||
|
||||
# Stage operations to the unit of work
|
||||
def __stage_required_mount(self, subvolume):
|
||||
if subvolume.get_mounted_path() is None:
|
||||
if self.__automount:
|
||||
self.__required_mounts.append(subvolume)
|
||||
else:
|
||||
raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
|
||||
|
||||
def __stage_create_subvolume(self, subvolume_path, intermediate=False):
|
||||
"""
|
||||
Add required creation of an intermediate subvolume to the unit of work
|
||||
If intermediate is true, the action will be skipped if a directory like file is found at target
|
||||
after mounting a parent subvolume
|
||||
"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'intermediate': intermediate,
|
||||
})
|
||||
|
||||
def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
|
||||
"""Add creation of a snapshot from source to target to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SNAPSHOT_OPERATION,
|
||||
'source': source_subvolume.path,
|
||||
'source_id': source_subvolume.id,
|
||||
'target': target_subvolume_path,
|
||||
})
|
||||
|
||||
def __stage_delete_subvolume(self, subvolume):
|
||||
"""Add deletion of the target subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__DELETE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume.path,
|
||||
'target_id': subvolume.id,
|
||||
})
|
||||
|
||||
def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
|
||||
"""Add update of the filesystem's default subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'target_id': subvolume_id,
|
||||
})
|
||||
|
||||
# Execute the unit of work
|
||||
def __execute_unit_of_work(self):
|
||||
self.__check_required_mounts()
|
||||
for op in self.__unit_of_work:
|
||||
if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
self.__execute_create_subvolume(op)
|
||||
elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
self.__execute_create_snapshot(op)
|
||||
elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
self.__execute_delete_subvolume(op)
|
||||
elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
self.__execute_set_default_subvolume(op)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % op['action'])
|
||||
|
||||
def __execute_create_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
if not self.__is_existing_directory_like(target_mounted_path):
|
||||
self.__btrfs_api.subvolume_create(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_create_snapshot(self, operation):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
|
||||
source_mounted_path = source_subvolume.get_mounted_path()
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
|
||||
self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_delete_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
self.__btrfs_api.subvolume_delete(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_set_default_subvolume(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
|
||||
if target_id is None:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
self.__filesystem.refresh() # the target may have been created earlier in module execution
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
|
||||
else:
|
||||
target_id = target_subvolume.id
|
||||
|
||||
self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __is_existing_directory_like(self, path):
|
||||
return os.path.exists(path) and (
|
||||
os.path.isdir(path) or
|
||||
os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
|
||||
)
|
||||
|
||||
def __check_required_mounts(self):
|
||||
filtered = self.__filter_child_subvolumes(self.__required_mounts)
|
||||
if len(filtered) > 0:
|
||||
for subvolume in filtered:
|
||||
self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
def __filter_child_subvolumes(self, subvolumes):
|
||||
"""Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
|
||||
filtered = []
|
||||
last = None
|
||||
ordered = sorted(subvolumes, key=lambda x: x.path)
|
||||
for next in ordered:
|
||||
if last is None or not next.path[0:len(last)] == last:
|
||||
filtered.append(next)
|
||||
last = next.path
|
||||
return filtered
|
||||
|
||||
# Create/cleanup temporary mountpoints
|
||||
def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
|
||||
# this check should be redundant
|
||||
if self.module.check_mode or not self.__automount:
|
||||
raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
|
||||
"with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
|
||||
|
||||
cache_key = "%s:%d" % (filesystem.uuid, subvolid)
|
||||
# The subvolume was already mounted, so return the current path
|
||||
if cache_key in self.__temporary_mounts:
|
||||
return self.__temporary_mounts[cache_key]
|
||||
|
||||
device = filesystem.devices[0]
|
||||
mountpoint = tempfile.mkdtemp(dir="/tmp")
|
||||
self.__temporary_mounts[cache_key] = mountpoint
|
||||
|
||||
mount = self.module.get_bin_path("mount", required=True)
|
||||
command = "%s -o noatime,subvolid=%d %s %s " % (mount,
|
||||
subvolid,
|
||||
device,
|
||||
mountpoint)
|
||||
result = self.module.run_command(command, check_rc=True)
|
||||
|
||||
return mountpoint
|
||||
|
||||
def __cleanup_mounts(self):
|
||||
for key in self.__temporary_mounts.keys():
|
||||
self.__cleanup_mount(self.__temporary_mounts[key])
|
||||
|
||||
def __cleanup_mount(self, mountpoint):
|
||||
umount = self.module.get_bin_path("umount", required=True)
|
||||
result = self.module.run_command("%s %s" % (umount, mountpoint))
|
||||
if result[0] == 0:
|
||||
rmdir = self.module.get_bin_path("rmdir", required=True)
|
||||
self.module.run_command("%s %s" % (rmdir, mountpoint))
|
||||
|
||||
# Format and return results
|
||||
def get_results(self):
|
||||
target = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
return dict(
|
||||
changed=len(self.__completed_work) > 0,
|
||||
filesystem=self.__filesystem.get_summary(),
|
||||
modifications=self.__get_formatted_modifications(),
|
||||
target_subvolume_id=(target.id if target is not None else None)
|
||||
)
|
||||
|
||||
def __get_formatted_modifications(self):
|
||||
return [self.__format_operation_result(op) for op in self.__completed_work]
|
||||
|
||||
def __format_operation_result(self, operation):
|
||||
action_type = operation['action']
|
||||
if action_type == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
return self.__format_create_subvolume_result(operation)
|
||||
elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
return self.__format_create_snapshot_result(operation)
|
||||
elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
return self.__format_delete_subvolume_result(operation)
|
||||
elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
return self.__format_set_default_subvolume_result(operation)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % operation['action'])
|
||||
|
||||
def __format_create_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_create_snapshot_result(self, operation):
|
||||
source = operation['source']
|
||||
source_id = operation['source_id']
|
||||
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
|
||||
|
||||
def __format_delete_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
return "Deleted subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_set_default_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
if 'target_id' in operation:
|
||||
target_id = operation['target_id']
|
||||
else:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Updated default subvolume to '%s' (%s)" % (target, target_id)
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
automount=dict(type='bool', required=False, default=False),
|
||||
default=dict(type='bool', required=False, default=False),
|
||||
filesystem_device=dict(type='path', required=False),
|
||||
filesystem_label=dict(type='str', required=False),
|
||||
filesystem_uuid=dict(type='str', required=False),
|
||||
name=dict(type='str', required=True),
|
||||
recursive=dict(type='bool', default=False),
|
||||
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
|
||||
snapshot_source=dict(type='str', required=False),
|
||||
snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
subvolume = BtrfsSubvolumeModule(module)
|
||||
error, result = subvolume.run()
|
||||
if error is not None:
|
||||
module.fail_json(str(error), **result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -183,7 +183,7 @@ class CPANMinus(ModuleHelper):
|
||||
if v.name and v.from_path:
|
||||
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
|
||||
|
||||
self.command = self.module.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.command = self.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.vars.set("binary", self.command)
|
||||
|
||||
def _is_package_installed(self, name, locallib, version):
|
||||
|
||||
@@ -16,7 +16,7 @@ short_description: Manages Datadog downtimes
|
||||
version_added: 2.0.0
|
||||
description:
|
||||
- Manages downtimes within Datadog.
|
||||
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s).
|
||||
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/).
|
||||
author:
|
||||
- Datadog (@Datadog)
|
||||
requirements:
|
||||
|
||||
@@ -21,11 +21,23 @@ description:
|
||||
- Since C(dconf) requires a running D-Bus session to change values, the module
|
||||
will try to detect an existing session and reuse it, or run the tool via
|
||||
C(dbus-run-session).
|
||||
requirements:
|
||||
- Optionally the C(gi.repository) Python library (usually included in the OS
|
||||
on hosts which have C(dconf)); this will become a non-optional requirement
|
||||
in a future major release of community.general.
|
||||
notes:
|
||||
- This module depends on C(psutil) Python library (version 4.0.0 and upwards),
|
||||
C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
|
||||
distribution you are using, you may need to install additional packages to
|
||||
have these available.
|
||||
- This module uses the C(gi.repository) Python library when available for
|
||||
accurate comparison of values in C(dconf) to values specified in Ansible
|
||||
code. C(gi.repository) is likely to be present on most systems which have
|
||||
C(dconf) but may not be present everywhere. When it is missing, a simple
|
||||
string comparison between values is used, and there may be false positives,
|
||||
that is, Ansible may think that a value is being changed when it is not.
|
||||
This fallback will be removed in a future version of this module, at which
|
||||
point the module will stop working on hosts without C(gi.repository).
|
||||
- Detection of existing, running D-Bus session, required to change settings
|
||||
via C(dconf), is not 100% reliable due to implementation details of D-Bus
|
||||
daemon itself. This might lead to running applications not picking-up
|
||||
@@ -37,8 +49,8 @@ notes:
|
||||
I(value="'myvalue'") - with single quotes as part of the Ansible parameter
|
||||
value.
|
||||
- When using loops in combination with a value like
|
||||
:code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
|
||||
type conversions. Applying a filter :code:`"{{ item.value | string }}"`
|
||||
"[('xkb', 'us'), ('xkb', 'se')]", you need to be aware of possible
|
||||
type conversions. Applying a filter C({{ item.value | string }})
|
||||
to the parameter variable can avoid potential conversion problems.
|
||||
- The easiest way to figure out exact syntax/value you need to provide for a
|
||||
key is by making the configuration change in application affected by the
|
||||
@@ -58,13 +70,18 @@ options:
|
||||
description:
|
||||
- A dconf key to modify or read from the dconf database.
|
||||
value:
|
||||
type: str
|
||||
type: raw
|
||||
required: false
|
||||
description:
|
||||
- Value to set for the specified dconf key. Value should be specified in
|
||||
GVariant format. Due to complexity of this format, it is best to have a
|
||||
look at existing values in the dconf database.
|
||||
- Required for I(state=present).
|
||||
- Although the type is specified as "raw", it should typically be
|
||||
specified as a string. However, boolean values in particular are
|
||||
handled properly even when specified as booleans rather than strings
|
||||
(in fact, handling booleans properly is why the type of this parameter
|
||||
is "raw").
|
||||
state:
|
||||
type: str
|
||||
required: false
|
||||
@@ -126,17 +143,27 @@ EXAMPLES = r"""
|
||||
|
||||
|
||||
import os
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.respawn import (
|
||||
has_respawned,
|
||||
probe_interpreters_for_module,
|
||||
respawn_module,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils import deps
|
||||
|
||||
glib_module_name = 'gi.repository.GLib'
|
||||
|
||||
PSUTIL_IMP_ERR = None
|
||||
try:
|
||||
import psutil
|
||||
HAS_PSUTIL = True
|
||||
from gi.repository.GLib import Variant, GError
|
||||
except ImportError:
|
||||
PSUTIL_IMP_ERR = traceback.format_exc()
|
||||
HAS_PSUTIL = False
|
||||
Variant = None
|
||||
GError = AttributeError
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
with deps.declare("psutil"):
|
||||
import psutil
|
||||
|
||||
|
||||
class DBusWrapper(object):
|
||||
@@ -258,6 +285,29 @@ class DconfPreference(object):
|
||||
# Check if dconf binary exists
|
||||
self.dconf_bin = self.module.get_bin_path('dconf', required=True)
|
||||
|
||||
@staticmethod
|
||||
def variants_are_equal(canonical_value, user_value):
|
||||
"""Compare two string GVariant representations for equality.
|
||||
|
||||
Assumes `canonical_value` is "canonical" in the sense that the type of
|
||||
the variant is specified explicitly if it cannot be inferred; this is
|
||||
true for textual representations of variants generated by the `dconf`
|
||||
command. The type of `canonical_value` is used to parse `user_value`,
|
||||
so the latter does not need to be explicitly typed.
|
||||
|
||||
Returns True if the two values are equal.
|
||||
"""
|
||||
if canonical_value is None:
|
||||
# It's unset in dconf database, so anything the user is trying to
|
||||
# set is a change.
|
||||
return False
|
||||
try:
|
||||
variant1 = Variant.parse(None, canonical_value)
|
||||
variant2 = Variant.parse(variant1.get_type(), user_value)
|
||||
return variant1 == variant2
|
||||
except GError:
|
||||
return canonical_value == user_value
|
||||
|
||||
def read(self, key):
|
||||
"""
|
||||
Retrieves current value associated with the dconf key.
|
||||
@@ -298,7 +348,7 @@ class DconfPreference(object):
|
||||
"""
|
||||
# If no change is needed (or won't be done due to check_mode), notify
|
||||
# caller straight away.
|
||||
if value == self.read(key):
|
||||
if self.variants_are_equal(self.read(key), value):
|
||||
return False
|
||||
elif self.check_mode:
|
||||
return True
|
||||
@@ -312,7 +362,7 @@ class DconfPreference(object):
|
||||
rc, out, err = dbus_wrapper.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='dconf failed while write the value with error: %s' % err,
|
||||
self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err),
|
||||
out=out,
|
||||
err=err)
|
||||
|
||||
@@ -364,17 +414,62 @@ def main():
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent', 'read']),
|
||||
key=dict(required=True, type='str', no_log=False),
|
||||
value=dict(required=False, default=None, type='str'),
|
||||
# Converted to str below after special handling of bool.
|
||||
value=dict(required=False, default=None, type='raw'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['value']),
|
||||
],
|
||||
)
|
||||
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
|
||||
if Variant is None:
|
||||
# This interpreter can't see the GLib module. To try to fix that, we'll
|
||||
# look in common locations for system-owned interpreters that can see
|
||||
# it; if we find one, we'll respawn under it. Otherwise we'll proceed
|
||||
# with degraded performance, without the ability to parse GVariants.
|
||||
# Later (in a different PR) we'll actually deprecate this degraded
|
||||
# performance level and fail with an error if the library can't be
|
||||
# found.
|
||||
|
||||
# If present state was specified, value must be provided.
|
||||
if module.params['state'] == 'present' and module.params['value'] is None:
|
||||
module.fail_json(msg='State "present" requires "value" to be set.')
|
||||
if has_respawned():
|
||||
# This shouldn't be possible; short-circuit early if it happens.
|
||||
module.fail_json(
|
||||
msg="%s must be installed and visible from %s." %
|
||||
(glib_module_name, sys.executable))
|
||||
|
||||
interpreters = ['/usr/bin/python3', '/usr/bin/python2',
|
||||
'/usr/bin/python']
|
||||
|
||||
interpreter = probe_interpreters_for_module(
|
||||
interpreters, glib_module_name)
|
||||
|
||||
if interpreter:
|
||||
# Found the Python bindings; respawn this module under the
|
||||
# interpreter where we found them.
|
||||
respawn_module(interpreter)
|
||||
# This is the end of the line for this process, it will exit here
|
||||
# once the respawned module has completed.
|
||||
|
||||
# Try to be forgiving about the user specifying a boolean as the value, or
|
||||
# more accurately about the fact that YAML and Ansible are quite insistent
|
||||
# about converting strings that look like booleans into booleans. Convert
|
||||
# the boolean into a string of the type dconf will understand. Any type for
|
||||
# the value other than boolean is just converted into a string directly.
|
||||
if module.params['value'] is not None:
|
||||
if isinstance(module.params['value'], bool):
|
||||
module.params['value'] = 'true' if module.params['value'] else 'false'
|
||||
else:
|
||||
module.params['value'] = to_native(
|
||||
module.params['value'], errors='surrogate_or_strict')
|
||||
|
||||
if Variant is None:
|
||||
module.warn(
|
||||
'WARNING: The gi.repository Python library is not available; '
|
||||
'using string comparison to check value equality. This fallback '
|
||||
'will be deprecated in a future version of community.general.')
|
||||
|
||||
deps.validate(module)
|
||||
|
||||
# Create wrapper instance.
|
||||
dconf = DconfPreference(module, module.check_mode)
|
||||
|
||||
@@ -215,7 +215,7 @@ def uninstall_flat(module, binary, names, method):
|
||||
|
||||
def flatpak_exists(module, binary, names, method):
|
||||
"""Check if the flatpaks are installed."""
|
||||
command = [binary, "list", "--{0}".format(method), "--app"]
|
||||
command = [binary, "list", "--{0}".format(method)]
|
||||
output = _flatpak_command(module, False, command)
|
||||
installed = []
|
||||
not_installed = []
|
||||
|
||||
@@ -255,7 +255,10 @@ class GitLabGroup(object):
|
||||
return True
|
||||
|
||||
try:
|
||||
group = self._gitlab.groups.create(arguments)
|
||||
# Filter out None values
|
||||
filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None)
|
||||
|
||||
group = self._gitlab.groups.create(filtered)
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
|
||||
|
||||
|
||||
@@ -251,6 +251,13 @@ options:
|
||||
type: str
|
||||
choices: ["private", "disabled", "enabled"]
|
||||
version_added: "6.4.0"
|
||||
topics:
|
||||
description:
|
||||
- A topic or list of topics to be assigned to a project.
|
||||
- It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)).
|
||||
type: list
|
||||
elements: str
|
||||
version_added: "6.6.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -334,6 +341,8 @@ from ansible_collections.community.general.plugins.module_utils.gitlab import (
|
||||
auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
|
||||
)
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class GitLabProject(object):
|
||||
def __init__(self, module, gitlab_instance):
|
||||
@@ -376,6 +385,14 @@ class GitLabProject(object):
|
||||
'monitor_access_level': options['monitor_access_level'],
|
||||
'security_and_compliance_access_level': options['security_and_compliance_access_level'],
|
||||
}
|
||||
|
||||
# topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version
|
||||
# and check if less than 14. If yes we use tag_list instead topics
|
||||
if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"):
|
||||
project_options['tag_list'] = options['topics']
|
||||
else:
|
||||
project_options['topics'] = options['topics']
|
||||
|
||||
# Because we have already call userExists in main()
|
||||
if self.project_object is None:
|
||||
project_options.update({
|
||||
@@ -514,6 +531,7 @@ def main():
|
||||
infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
topics=dict(type='list', elements='str'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -570,6 +588,7 @@ def main():
|
||||
infrastructure_access_level = module.params['infrastructure_access_level']
|
||||
monitor_access_level = module.params['monitor_access_level']
|
||||
security_and_compliance_access_level = module.params['security_and_compliance_access_level']
|
||||
topics = module.params['topics']
|
||||
|
||||
if default_branch and not initialize_with_readme:
|
||||
module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
|
||||
@@ -648,6 +667,7 @@ def main():
|
||||
"infrastructure_access_level": infrastructure_access_level,
|
||||
"monitor_access_level": monitor_access_level,
|
||||
"security_and_compliance_access_level": security_and_compliance_access_level,
|
||||
"topics": topics,
|
||||
}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs)
|
||||
|
||||
@@ -44,10 +44,17 @@ attributes:
|
||||
support: none
|
||||
|
||||
options:
|
||||
group:
|
||||
description:
|
||||
- ID or full path of the group in the form group/subgroup.
|
||||
- Mutually exclusive with I(owned) and I(project).
|
||||
type: str
|
||||
version_added: '6.5.0'
|
||||
project:
|
||||
description:
|
||||
- ID or full path of the project in the form of group/name.
|
||||
- Mutually exclusive with I(owned) since community.general 4.5.0.
|
||||
- Mutually exclusive with I(group).
|
||||
type: str
|
||||
version_added: '3.7.0'
|
||||
description:
|
||||
@@ -73,6 +80,7 @@ options:
|
||||
description:
|
||||
- Searches only runners available to the user when searching for existing, when false admin token required.
|
||||
- Mutually exclusive with I(project) since community.general 4.5.0.
|
||||
- Mutually exclusive with I(group).
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 2.0.0
|
||||
@@ -209,21 +217,23 @@ except NameError:
|
||||
|
||||
|
||||
class GitLabRunner(object):
|
||||
def __init__(self, module, gitlab_instance, project=None):
|
||||
def __init__(self, module, gitlab_instance, group=None, project=None):
|
||||
self._module = module
|
||||
self._gitlab = gitlab_instance
|
||||
self.runner_object = None
|
||||
|
||||
# Whether to operate on GitLab-instance-wide or project-wide runners
|
||||
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774
|
||||
# for group runner token access
|
||||
if project:
|
||||
self._runners_endpoint = project.runners.list
|
||||
elif group:
|
||||
self._runners_endpoint = group.runners.list
|
||||
elif module.params['owned']:
|
||||
self._runners_endpoint = gitlab_instance.runners.list
|
||||
else:
|
||||
self._runners_endpoint = gitlab_instance.runners.all
|
||||
|
||||
self.runner_object = None
|
||||
|
||||
def create_or_update_runner(self, description, options):
|
||||
changed = False
|
||||
|
||||
@@ -234,9 +244,8 @@ class GitLabRunner(object):
|
||||
'maximum_timeout': options['maximum_timeout'],
|
||||
'tag_list': options['tag_list'],
|
||||
}
|
||||
if arguments['access_level'] is not None:
|
||||
if options.get('access_level') is not None:
|
||||
arguments['access_level'] = options['access_level']
|
||||
|
||||
# Because we have already call userExists in main()
|
||||
if self.runner_object is None:
|
||||
arguments['description'] = description
|
||||
@@ -251,7 +260,7 @@ class GitLabRunner(object):
|
||||
access_level_on_creation = False
|
||||
|
||||
if not access_level_on_creation:
|
||||
del arguments['access_level']
|
||||
arguments.pop('access_level', None)
|
||||
|
||||
runner = self.create_runner(arguments)
|
||||
changed = True
|
||||
@@ -361,6 +370,7 @@ def main():
|
||||
maximum_timeout=dict(type='int', default=3600),
|
||||
registration_token=dict(type='str', no_log=True),
|
||||
project=dict(type='str'),
|
||||
group=dict(type='str'),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
))
|
||||
|
||||
@@ -373,6 +383,8 @@ def main():
|
||||
['api_token', 'api_oauth_token'],
|
||||
['api_token', 'api_job_token'],
|
||||
['project', 'owned'],
|
||||
['group', 'owned'],
|
||||
['project', 'group'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password'],
|
||||
@@ -397,6 +409,7 @@ def main():
|
||||
maximum_timeout = module.params['maximum_timeout']
|
||||
registration_token = module.params['registration_token']
|
||||
project = module.params['project']
|
||||
group = module.params['group']
|
||||
|
||||
if access_level is None:
|
||||
message = "The option 'access_level' is unspecified, so 'ref_protected' is assumed. "\
|
||||
@@ -409,13 +422,20 @@ def main():
|
||||
|
||||
gitlab_instance = gitlab_authentication(module)
|
||||
gitlab_project = None
|
||||
gitlab_group = None
|
||||
|
||||
if project:
|
||||
try:
|
||||
gitlab_project = gitlab_instance.projects.get(project)
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
module.fail_json(msg='No such a project %s' % project, exception=to_native(e))
|
||||
elif group:
|
||||
try:
|
||||
gitlab_group = gitlab_instance.groups.get(group)
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
module.fail_json(msg='No such a group %s' % group, exception=to_native(e))
|
||||
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project)
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project)
|
||||
runner_exists = gitlab_runner.exists_runner(runner_description)
|
||||
|
||||
if state == 'absent':
|
||||
|
||||
@@ -78,8 +78,9 @@ options:
|
||||
greedy:
|
||||
description:
|
||||
- Upgrade casks that auto update.
|
||||
- Passes --greedy to brew cask outdated when checking
|
||||
if an installed cask has a newer version available.
|
||||
- Passes C(--greedy) to C(brew outdated --cask) when checking
|
||||
if an installed cask has a newer version available,
|
||||
or to C(brew upgrade --cask) when upgrading all casks.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
@@ -128,6 +129,11 @@ EXAMPLES = '''
|
||||
community.general.homebrew_cask:
|
||||
upgrade_all: true
|
||||
|
||||
- name: Upgrade all casks with greedy option
|
||||
community.general.homebrew_cask:
|
||||
upgrade_all: true
|
||||
greedy: true
|
||||
|
||||
- name: Upgrade given cask with force option
|
||||
community.general.homebrew_cask:
|
||||
name: alfred
|
||||
@@ -581,6 +587,9 @@ class HomebrewCask(object):
|
||||
else:
|
||||
cmd = [self.brew_path, 'cask', 'upgrade']
|
||||
|
||||
if self.greedy:
|
||||
cmd = cmd + ['--greedy']
|
||||
|
||||
rc, out, err = '', '', ''
|
||||
|
||||
if self.sudo_password:
|
||||
|
||||
@@ -256,9 +256,9 @@ def main():
|
||||
state = module.params["state"]
|
||||
name = module.params["name"]
|
||||
zone = module.params["zone"]
|
||||
template = [name]
|
||||
template = []
|
||||
if module.params["template"]:
|
||||
template.append(module.params["template"])
|
||||
template = [module.params["template"]]
|
||||
check_command = module.params["check_command"]
|
||||
ip = module.params["ip"]
|
||||
display_name = module.params["display_name"]
|
||||
@@ -273,20 +273,18 @@ def main():
|
||||
module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
|
||||
|
||||
data = {
|
||||
'templates': template,
|
||||
'attrs': {
|
||||
'address': ip,
|
||||
'display_name': display_name,
|
||||
'check_command': check_command,
|
||||
'zone': zone,
|
||||
'vars': {
|
||||
'made_by': "ansible",
|
||||
},
|
||||
'templates': template,
|
||||
'vars.made_by': "ansible"
|
||||
}
|
||||
}
|
||||
|
||||
if variables:
|
||||
data['attrs']['vars'].update(variables)
|
||||
for key, value in variables.items():
|
||||
data['attrs']['vars.' + key] = value
|
||||
|
||||
changed = False
|
||||
if icinga.exists(name):
|
||||
|
||||
@@ -85,6 +85,14 @@ msg:
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
return_values:
|
||||
description: Dictionary containing command-specific response data from the action.
|
||||
returned: on success
|
||||
type: dict
|
||||
version_added: 6.6.0
|
||||
sample: {
|
||||
"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"
|
||||
}
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -128,10 +136,9 @@ class IdracRedfishUtils(RedfishUtils):
|
||||
return response
|
||||
|
||||
response_output = response['resp'].__dict__
|
||||
job_id = response_output["headers"]["Location"]
|
||||
job_id = re.search("JID_.+", job_id).group()
|
||||
# Currently not passing job_id back to user but patch is coming
|
||||
return {'ret': True, 'msg': "Config job %s created" % job_id}
|
||||
job_id_full = response_output["headers"]["Location"]
|
||||
job_id = re.search("JID_.+", job_id_full).group()
|
||||
return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full}
|
||||
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
@@ -143,6 +150,7 @@ CATEGORY_COMMANDS_ALL = {
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
return_values = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
@@ -199,7 +207,20 @@ def main():
|
||||
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
# NOTE: Currently overriding the usage of 'data_modification' due to
|
||||
# how 'resource_id' is processed. In the case of CreateBiosConfigJob,
|
||||
# we interact with BOTH systems and managers, so you currently cannot
|
||||
# specify a single 'resource_id' to make both '_find_systems_resource'
|
||||
# and '_find_managers_resource' return success. Since
|
||||
# CreateBiosConfigJob doesn't use the matched 'resource_id' for a
|
||||
# system regardless of what's specified, disabling the 'resource_id'
|
||||
# inspection for the next call allows a specific manager to be
|
||||
# specified with 'resource_id'. If we ever need to expand the input
|
||||
# to inspect a specific system and manager in parallel, this will need
|
||||
# updates.
|
||||
rf_utils.data_modification = False
|
||||
result = rf_utils._find_systems_resource()
|
||||
rf_utils.data_modification = True
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
@@ -210,11 +231,13 @@ def main():
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
result = rf_utils.create_bios_config_job()
|
||||
if 'job_id' in result:
|
||||
return_values['job_id'] = result['job_id']
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
del result['ret']
|
||||
module.exit_json(changed=True, msg='Action was successful')
|
||||
module.exit_json(changed=True, msg='Action was successful', return_values=return_values)
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
175
plugins/modules/ilo_redfish_command.py
Normal file
175
plugins/modules/ilo_redfish_command.py
Normal file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ilo_redfish_command
|
||||
short_description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
version_added: 6.6.0
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
perform an action.
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller.
|
||||
type: str
|
||||
choices: ['Systems']
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on OOB controller.
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller.
|
||||
type: str
|
||||
username:
|
||||
required: false
|
||||
description:
|
||||
- Username for authenticating to iLO.
|
||||
type: str
|
||||
password:
|
||||
required: false
|
||||
description:
|
||||
- Password for authenticating to iLO.
|
||||
type: str
|
||||
auth_token:
|
||||
required: false
|
||||
description:
|
||||
- Security token for authenticating to iLO.
|
||||
type: str
|
||||
timeout:
|
||||
required: false
|
||||
description:
|
||||
- Timeout in seconds for HTTP requests to iLO.
|
||||
default: 60
|
||||
type: int
|
||||
author:
|
||||
- Varni H P (@varini-hp)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Wait for iLO Reboot Completion
|
||||
community.general.ilo_redfish_command:
|
||||
category: Systems
|
||||
command: WaitforiLORebootCompletion
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ilo_redfish_command:
|
||||
description: Returns the status of the operation performed on the iLO.
|
||||
type: dict
|
||||
contains:
|
||||
WaitforiLORebootCompletion:
|
||||
description: Returns the output msg and whether the function executed successfully.
|
||||
type: dict
|
||||
contains:
|
||||
ret:
|
||||
description: Return True/False based on whether the operation was performed succesfully.
|
||||
type: bool
|
||||
msg:
|
||||
description: Status of the operation performed on the iLO.
|
||||
type: str
|
||||
returned: always
|
||||
'''
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["WaitforiLORebootCompletion"]
|
||||
}
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
timeout=dict(type="int", default=60),
|
||||
username=dict(),
|
||||
password=dict(no_log=True),
|
||||
auth_token=dict(no_log=True)
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
],
|
||||
required_one_of=[
|
||||
('username', 'auth_token'),
|
||||
],
|
||||
mutually_exclusive=[
|
||||
('username', 'auth_token'),
|
||||
],
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password'],
|
||||
'token': module.params['auth_token']}
|
||||
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native(
|
||||
"Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(
|
||||
msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
|
||||
result = rf_utils._find_systems_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "WaitforiLORebootCompletion":
|
||||
result[command] = rf_utils.wait_for_ilo_reboot_completion()
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if not result[command]['ret']:
|
||||
module.fail_json(msg=result)
|
||||
|
||||
changed = result[command].get('changed', False)
|
||||
module.exit_json(ilo_redfish_command=result, changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -174,8 +174,14 @@ def drop_user(module, client, user_name):
|
||||
def set_user_grants(module, client, user_name, grants):
|
||||
changed = False
|
||||
|
||||
current_grants = []
|
||||
try:
|
||||
current_grants = client.get_list_privileges(user_name)
|
||||
except influx.exceptions.InfluxDBClientError as e:
|
||||
if not module.check_mode or 'user not found' not in e.content:
|
||||
module.fail_json(msg=e.content)
|
||||
|
||||
try:
|
||||
parsed_grants = []
|
||||
# Fix privileges wording
|
||||
for i, v in enumerate(current_grants):
|
||||
|
||||
@@ -42,10 +42,9 @@ options:
|
||||
description:
|
||||
- Section name in INI file. This is added if I(state=present) automatically when
|
||||
a single value is being set.
|
||||
- If left empty or set to C(null), the I(option) will be placed before the first I(section).
|
||||
- If left empty, being omitted, or being set to C(null), the I(option) will be placed before the first I(section).
|
||||
- Using C(null) is also required if the config format does not support sections.
|
||||
type: str
|
||||
required: true
|
||||
option:
|
||||
description:
|
||||
- If set (required for changing a I(value)), this is the name of the option.
|
||||
@@ -317,14 +316,14 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
# override option with no value to option with value if not allow_no_value
|
||||
if len(values) > 0:
|
||||
for index, line in enumerate(section_lines):
|
||||
if not changed_lines[index] and match_active_opt(option, line):
|
||||
if not changed_lines[index] and match_opt(option, line):
|
||||
newline = assignment_format % (option, values.pop(0))
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
if len(values) == 0:
|
||||
break
|
||||
# remove all remaining option occurrences from the rest of the section
|
||||
for index in range(len(section_lines) - 1, 0, -1):
|
||||
if not changed_lines[index] and match_active_opt(option, section_lines[index]):
|
||||
if not changed_lines[index] and match_opt(option, section_lines[index]):
|
||||
del section_lines[index]
|
||||
del changed_lines[index]
|
||||
changed = True
|
||||
@@ -430,7 +429,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True, aliases=['dest']),
|
||||
section=dict(type='str', required=True),
|
||||
section=dict(type='str'),
|
||||
option=dict(type='str'),
|
||||
value=dict(type='str'),
|
||||
values=dict(type='list', elements='str'),
|
||||
|
||||
@@ -156,20 +156,22 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
|
||||
|
||||
def line_dict(line):
|
||||
def lineDict(line):
|
||||
return {'line': line, 'line_type': 'unknown'}
|
||||
|
||||
|
||||
def make_option_dict(line, iface, option, value, address_family):
|
||||
def optionDict(line, iface, option, value, address_family):
|
||||
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
|
||||
|
||||
|
||||
def get_option_value(line):
|
||||
patt = re.compile(r'^\s+(?P<option>\S+)\s+(?P<value>\S?.*\S)\s*$')
|
||||
match = patt.match(line)
|
||||
if not match:
|
||||
return None, None
|
||||
return match.group("option"), match.group("value")
|
||||
def getValueFromLine(s):
|
||||
spaceRe = re.compile(r'\s+')
|
||||
m = list(spaceRe.finditer(s))[-1]
|
||||
valueEnd = m.start()
|
||||
option = s.split()[0]
|
||||
optionStart = s.find(option)
|
||||
optionLen = len(option)
|
||||
return s[optionLen + optionStart:].strip()
|
||||
|
||||
|
||||
def read_interfaces_file(module, filename):
|
||||
@@ -177,27 +179,32 @@ def read_interfaces_file(module, filename):
|
||||
return read_interfaces_lines(module, f)
|
||||
|
||||
|
||||
def _is_line_processing_none(first_word):
|
||||
return first_word in ("source", "source-dir", "source-directory", "auto", "no-auto-down", "no-scripts") or first_word.startswith("auto-")
|
||||
|
||||
|
||||
def read_interfaces_lines(module, line_strings):
|
||||
lines = []
|
||||
ifaces = {}
|
||||
iface_name = None
|
||||
address_family = None
|
||||
currif = {}
|
||||
currently_processing = None
|
||||
for i, line in enumerate(line_strings):
|
||||
i = 0
|
||||
for line in line_strings:
|
||||
i += 1
|
||||
words = line.split()
|
||||
if not words or words[0].startswith("#"):
|
||||
lines.append(line_dict(line))
|
||||
if len(words) < 1:
|
||||
lines.append(lineDict(line))
|
||||
continue
|
||||
if words[0][0] == "#":
|
||||
lines.append(lineDict(line))
|
||||
continue
|
||||
if words[0] == "mapping":
|
||||
lines.append(line_dict(line))
|
||||
# currmap = calloc(1, sizeof *currmap);
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "MAPPING"
|
||||
elif _is_line_processing_none(words[0]):
|
||||
lines.append(line_dict(line))
|
||||
elif words[0] == "source":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-dir":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-directory":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "iface":
|
||||
currif = {
|
||||
@@ -220,25 +227,57 @@ def read_interfaces_lines(module, line_strings):
|
||||
ifaces[iface_name] = currif
|
||||
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
|
||||
currently_processing = "IFACE"
|
||||
elif words[0] == "auto":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0].startswith("allow-"):
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-auto-down":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-scripts":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
else:
|
||||
if currently_processing == "IFACE":
|
||||
option_name, value = get_option_value(line)
|
||||
# TODO: if option_name in currif.options
|
||||
lines.append(make_option_dict(line, iface_name, option_name, value, address_family))
|
||||
option_name = words[0]
|
||||
value = getValueFromLine(line)
|
||||
lines.append(optionDict(line, iface_name, option_name, value, address_family))
|
||||
if option_name in ["pre-up", "up", "down", "post-up"]:
|
||||
currif[option_name].append(value)
|
||||
else:
|
||||
currif[option_name] = value
|
||||
elif currently_processing == "MAPPING":
|
||||
lines.append(line_dict(line))
|
||||
lines.append(lineDict(line))
|
||||
elif currently_processing == "NONE":
|
||||
lines.append(line_dict(line))
|
||||
lines.append(lineDict(line))
|
||||
else:
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i + 1))
|
||||
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
|
||||
return None, None
|
||||
return lines, ifaces
|
||||
|
||||
|
||||
def get_interface_options(iface_lines):
|
||||
return [i for i in iface_lines if i['line_type'] == 'option']
|
||||
|
||||
|
||||
def get_target_options(iface_options, option):
|
||||
return [i for i in iface_options if i['option'] == option]
|
||||
|
||||
|
||||
def update_existing_option_line(target_option, value):
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
prefix_start = old_line.find(target_option["option"])
|
||||
optionLen = len(target_option["option"])
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
|
||||
start = old_value_position.start() + prefix_start + optionLen
|
||||
end = old_value_position.end() + prefix_start + optionLen
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
return line
|
||||
|
||||
|
||||
def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None):
|
||||
value = str(raw_value)
|
||||
changed = False
|
||||
@@ -248,41 +287,35 @@ def set_interface_option(module, lines, iface, option, raw_value, state, address
|
||||
iface_lines = [item for item in iface_lines
|
||||
if "address_family" in item and item["address_family"] == address_family]
|
||||
|
||||
if not iface_lines:
|
||||
if len(iface_lines) < 1:
|
||||
# interface not found
|
||||
module.fail_json(msg="Error: interface %s not found" % iface)
|
||||
return changed, None
|
||||
|
||||
iface_options = [il for il in iface_lines if il['line_type'] == 'option']
|
||||
target_options = [io for io in iface_options if io['option'] == option]
|
||||
iface_options = get_interface_options(iface_lines)
|
||||
target_options = get_target_options(iface_options, option)
|
||||
|
||||
if state == "present":
|
||||
if not target_options:
|
||||
if len(target_options) < 1:
|
||||
changed = True
|
||||
# add new option
|
||||
last_line_dict = iface_lines[-1]
|
||||
return add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
|
||||
if option in ["pre-up", "up", "down", "post-up"] and all(ito['value'] != value for ito in target_options):
|
||||
return add_option_after_line(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
address_family = target_option['address_family']
|
||||
prefix_start = old_line.find(option)
|
||||
option_len = len(option)
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + option_len:])
|
||||
start = old_value_position.start() + prefix_start + option_len
|
||||
end = old_value_position.end() + prefix_start + option_len
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = make_option_dict(line, iface, option, value, address_family)
|
||||
return changed, lines
|
||||
|
||||
if state == "absent":
|
||||
if target_options:
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
else:
|
||||
if option in ["pre-up", "up", "down", "post-up"]:
|
||||
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
else:
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
line = update_existing_option_line(target_option, value)
|
||||
address_family = target_option['address_family']
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = optionDict(line, iface, option, value, address_family)
|
||||
elif state == "absent":
|
||||
if len(target_options) >= 1:
|
||||
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
|
||||
for target_option in [ito for ito in target_options if ito['value'] == value]:
|
||||
changed = True
|
||||
@@ -291,11 +324,13 @@ def set_interface_option(module, lines, iface, option, raw_value, state, address
|
||||
changed = True
|
||||
for target_option in target_options:
|
||||
lines = [ln for ln in lines if ln != target_option]
|
||||
else:
|
||||
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
||||
|
||||
return changed, lines
|
||||
|
||||
|
||||
def add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
# Changing method of interface is not an addition
|
||||
if option == 'method':
|
||||
changed = False
|
||||
@@ -311,18 +346,19 @@ def add_option_after_line(option, value, iface, lines, last_line_dict, iface_opt
|
||||
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
|
||||
prefix = last_line[:prefix_start]
|
||||
|
||||
if not iface_options:
|
||||
if len(iface_options) < 1:
|
||||
# interface has no options, ident
|
||||
prefix += " "
|
||||
|
||||
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
|
||||
option_dict = make_option_dict(line, iface, option, value, address_family)
|
||||
option_dict = optionDict(line, iface, option, value, address_family)
|
||||
index = len(lines) - lines[::-1].index(last_line_dict)
|
||||
lines.insert(index, option_dict)
|
||||
return True, lines
|
||||
|
||||
|
||||
def write_changes(module, lines, dest):
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
with os.fdopen(tmpfd, 'wb') as f:
|
||||
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
|
||||
|
||||
@@ -59,7 +59,7 @@ options:
|
||||
record_values:
|
||||
description:
|
||||
- Manage DNS record name with this value.
|
||||
- Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified.
|
||||
- Mutually exclusive with I(record_value), and exactly one of I(record_value) and I(record_values) has to be specified.
|
||||
- In the case of 'A' or 'AAAA' record types, this will be the IP address.
|
||||
- In the case of 'A6' record type, this will be the A6 Record data.
|
||||
- In the case of 'CNAME' record type, this will be the hostname.
|
||||
|
||||
@@ -20,6 +20,13 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
append:
|
||||
description:
|
||||
- If C(true), add the listed I(host) to the I(hostgroup).
|
||||
- If C(false), only the listed I(host) will be in I(hostgroup), removing any other hosts.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.6.0
|
||||
cn:
|
||||
description:
|
||||
- Name of host-group.
|
||||
@@ -147,6 +154,7 @@ def ensure(module, client):
|
||||
state = module.params['state']
|
||||
host = module.params['host']
|
||||
hostgroup = module.params['hostgroup']
|
||||
append = module.params['append']
|
||||
|
||||
ipa_hostgroup = client.hostgroup_find(name=name)
|
||||
module_hostgroup = get_hostgroup_dict(description=module.params['description'])
|
||||
@@ -168,14 +176,18 @@ def ensure(module, client):
|
||||
client.hostgroup_mod(name=name, item=data)
|
||||
|
||||
if host is not None:
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
|
||||
client.hostgroup_add_host, client.hostgroup_remove_host) or changed
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []),
|
||||
[item.lower() for item in host],
|
||||
client.hostgroup_add_host,
|
||||
client.hostgroup_remove_host,
|
||||
append=append) or changed
|
||||
|
||||
if hostgroup is not None:
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
|
||||
[item.lower() for item in hostgroup],
|
||||
client.hostgroup_add_hostgroup,
|
||||
client.hostgroup_remove_hostgroup) or changed
|
||||
client.hostgroup_remove_hostgroup,
|
||||
append=append) or changed
|
||||
|
||||
else:
|
||||
if ipa_hostgroup:
|
||||
@@ -192,7 +204,8 @@ def main():
|
||||
description=dict(type='str'),
|
||||
host=dict(type='list', elements='str'),
|
||||
hostgroup=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
|
||||
append=dict(type='bool', default=False))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
@@ -100,7 +100,7 @@ powerstate:
|
||||
description: The current power state of the machine.
|
||||
returned: success and I(machine) is not provided
|
||||
type: str
|
||||
sample: on
|
||||
sample: 'on'
|
||||
status:
|
||||
description: The current power state of the machine when the machine option is set.
|
||||
returned: success and I(machine) is provided
|
||||
@@ -132,14 +132,14 @@ EXAMPLES = '''
|
||||
name: test.testdomain.com
|
||||
user: admin
|
||||
password: password
|
||||
state: on
|
||||
state: 'on'
|
||||
|
||||
- name: Ensure machines of which remote target address is 48 and 50 are powered off
|
||||
community.general.ipmi_power:
|
||||
name: test.testdomain.com
|
||||
user: admin
|
||||
password: password
|
||||
state: off
|
||||
state: 'off'
|
||||
machine:
|
||||
- targetAddress: 48
|
||||
- targetAddress: 50
|
||||
@@ -151,9 +151,9 @@ EXAMPLES = '''
|
||||
password: password
|
||||
machine:
|
||||
- targetAddress: 48
|
||||
state: on
|
||||
state: 'on'
|
||||
- targetAddress: 50
|
||||
state: off
|
||||
state: 'off'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#
|
||||
# Copyright (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
|
||||
# Copyright (c) 2021, Brandon McNama <brandonmcnama@outlook.com> Issue attachment functionality
|
||||
# Copyright (c) 2022, Hugo Prudente <hugo.kenshin+oss@gmail.com> Worklog functionality
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -40,9 +41,10 @@ options:
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ command ]
|
||||
choices: [ attach, comment, create, edit, fetch, link, search, transition, update ]
|
||||
choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ]
|
||||
description:
|
||||
- The operation to perform.
|
||||
- C(worklog) was added in community.genereal 6.5.0.
|
||||
|
||||
username:
|
||||
type: str
|
||||
@@ -171,7 +173,6 @@ options:
|
||||
- When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example.
|
||||
- Note that JIRA may not allow changing field values on specific transitions or states.
|
||||
default: {}
|
||||
|
||||
jql:
|
||||
required: false
|
||||
description:
|
||||
@@ -287,6 +288,47 @@ EXAMPLES = r"""
|
||||
value:
|
||||
internal: true
|
||||
|
||||
# Add an workog to an existing issue
|
||||
- name: Worklog on issue
|
||||
community.general.jira:
|
||||
uri: '{{ server }}'
|
||||
username: '{{ user }}'
|
||||
password: '{{ pass }}'
|
||||
issue: '{{ issue.meta.key }}'
|
||||
operation: worklog
|
||||
comment: A worklog added by Ansible
|
||||
fields:
|
||||
timeSpentSeconds: 12000
|
||||
|
||||
- name: Workflow on issue with comment restricted visibility
|
||||
community.general.jira:
|
||||
uri: '{{ server }}'
|
||||
username: '{{ user }}'
|
||||
password: '{{ pass }}'
|
||||
issue: '{{ issue.meta.key }}'
|
||||
operation: worklog
|
||||
comment: A worklog added by Ansible
|
||||
comment_visibility:
|
||||
type: role
|
||||
value: Developers
|
||||
fields:
|
||||
timeSpentSeconds: 12000
|
||||
|
||||
- name: Workflow on issue with comment property to mark it internal
|
||||
community.general.jira:
|
||||
uri: '{{ server }}'
|
||||
username: '{{ user }}'
|
||||
password: '{{ pass }}'
|
||||
issue: '{{ issue.meta.key }}'
|
||||
operation: worklog
|
||||
comment: A worklog added by Ansible
|
||||
fields:
|
||||
properties:
|
||||
- key: 'sd.public.comment'
|
||||
value:
|
||||
internal: true
|
||||
timeSpentSeconds: 12000
|
||||
|
||||
# Assign an existing issue using edit
|
||||
- name: Assign an issue using free-form fields
|
||||
community.general.jira:
|
||||
@@ -438,7 +480,7 @@ class JIRA(StateModuleHelper):
|
||||
uri=dict(type='str', required=True),
|
||||
operation=dict(
|
||||
type='str',
|
||||
choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'],
|
||||
choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'],
|
||||
aliases=['command'], required=True
|
||||
),
|
||||
username=dict(type='str'),
|
||||
@@ -481,6 +523,7 @@ class JIRA(StateModuleHelper):
|
||||
('operation', 'attach', ['issue', 'attachment']),
|
||||
('operation', 'create', ['project', 'issuetype', 'summary']),
|
||||
('operation', 'comment', ['issue', 'comment']),
|
||||
('operation', 'workflow', ['issue', 'comment']),
|
||||
('operation', 'fetch', ['issue']),
|
||||
('operation', 'transition', ['issue', 'status']),
|
||||
('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
|
||||
@@ -535,6 +578,22 @@ class JIRA(StateModuleHelper):
|
||||
url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment'
|
||||
self.vars.meta = self.post(url, data)
|
||||
|
||||
@cause_changes(on_success=True)
|
||||
def operation_worklog(self):
|
||||
data = {
|
||||
'comment': self.vars.comment
|
||||
}
|
||||
# if comment_visibility is specified restrict visibility
|
||||
if self.vars.comment_visibility is not None:
|
||||
data['visibility'] = self.vars.comment_visibility
|
||||
|
||||
# Use 'fields' to merge in any additional data
|
||||
if self.vars.fields:
|
||||
data.update(self.vars.fields)
|
||||
|
||||
url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog'
|
||||
self.vars.meta = self.post(url, data)
|
||||
|
||||
@cause_changes(on_success=True)
|
||||
def operation_edit(self):
|
||||
data = {
|
||||
|
||||
277
plugins/modules/kdeconfig.py
Normal file
277
plugins/modules/kdeconfig.py
Normal file
@@ -0,0 +1,277 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2023, Salvatore Mesoraca <s.mesoraca16@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: kdeconfig
|
||||
short_description: Manage KDE configuration files
|
||||
version_added: "6.5.0"
|
||||
description:
|
||||
- Add or change individual settings in KDE configuration files.
|
||||
- It uses B(kwriteconfig) under the hood.
|
||||
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Path to the config file. If the file does not exist it will be created.
|
||||
type: path
|
||||
required: true
|
||||
kwriteconfig_path:
|
||||
description:
|
||||
- Path to the kwriteconfig executable. If not specified, Ansible will try
|
||||
to discover it.
|
||||
type: path
|
||||
values:
|
||||
description:
|
||||
- List of values to set.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
group:
|
||||
description:
|
||||
- The option's group. One between this and I(groups) is required.
|
||||
type: str
|
||||
groups:
|
||||
description:
|
||||
- List of the option's groups. One between this and I(group) is required.
|
||||
type: list
|
||||
elements: str
|
||||
key:
|
||||
description:
|
||||
- The option's name.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- The option's value. One between this and I(bool_value) is required.
|
||||
type: str
|
||||
bool_value:
|
||||
description:
|
||||
- Boolean value.
|
||||
- One between this and I(value) is required.
|
||||
type: bool
|
||||
required: true
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file.
|
||||
type: bool
|
||||
default: false
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
requirements:
|
||||
- kwriteconfig
|
||||
author:
|
||||
- Salvatore Mesoraca (@smeso)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding"
|
||||
community.general.kdeconfig:
|
||||
path: /etc/xdg/kickoffrc
|
||||
values:
|
||||
- group: Branding
|
||||
key: Homepage
|
||||
value: https://www.ansible.com/
|
||||
mode: '0644'
|
||||
|
||||
- name: Ensure "KEY=true" in groups "Group" and "Subgroup", and "KEY=VALUE" in Group2
|
||||
community.general.kdeconfig:
|
||||
path: /etc/xdg/someconfigrc
|
||||
values:
|
||||
- groups: [Group, Subgroup]
|
||||
key: KEY
|
||||
bool_value: true
|
||||
- group: Group2
|
||||
key: KEY
|
||||
value: VALUE
|
||||
backup: true
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
|
||||
class TemporaryDirectory(object):
|
||||
"""Basic backport of tempfile.TemporaryDirectory"""
|
||||
|
||||
def __init__(self, suffix="", prefix="tmp", dir=None):
|
||||
self.name = None
|
||||
self.name = tempfile.mkdtemp(suffix, prefix, dir)
|
||||
|
||||
def __enter__(self):
|
||||
return self.name
|
||||
|
||||
def rm(self):
|
||||
if self.name:
|
||||
shutil.rmtree(self.name, ignore_errors=True)
|
||||
self.name = None
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
self.rm()
|
||||
|
||||
def __del__(self):
|
||||
self.rm()
|
||||
|
||||
|
||||
def run_kwriteconfig(module, cmd, path, groups, key, value):
|
||||
"""Invoke kwriteconfig with arguments"""
|
||||
args = [cmd, '--file', path, '--key', key]
|
||||
for group in groups:
|
||||
args.extend(['--group', group])
|
||||
if isinstance(value, bool):
|
||||
args.extend(['--type', 'bool'])
|
||||
if value:
|
||||
args.append('true')
|
||||
else:
|
||||
args.append('false')
|
||||
else:
|
||||
args.append(value)
|
||||
module.run_command(args, check_rc=True)
|
||||
|
||||
|
||||
def run_module(module, tmpdir, kwriteconfig):
|
||||
result = dict(changed=False, msg='OK', path=module.params['path'])
|
||||
b_path = to_bytes(module.params['path'])
|
||||
tmpfile = os.path.join(tmpdir, 'file')
|
||||
b_tmpfile = to_bytes(tmpfile)
|
||||
diff = dict(
|
||||
before='',
|
||||
after='',
|
||||
before_header=result['path'],
|
||||
after_header=result['path'],
|
||||
)
|
||||
try:
|
||||
with open(b_tmpfile, 'wb') as dst:
|
||||
try:
|
||||
with open(b_path, 'rb') as src:
|
||||
b_data = src.read()
|
||||
except IOError:
|
||||
result['changed'] = True
|
||||
else:
|
||||
dst.write(b_data)
|
||||
try:
|
||||
diff['before'] = to_text(b_data)
|
||||
except UnicodeError:
|
||||
diff['before'] = repr(b_data)
|
||||
except IOError:
|
||||
module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc())
|
||||
|
||||
for row in module.params['values']:
|
||||
groups = row['groups']
|
||||
if groups is None:
|
||||
groups = [row['group']]
|
||||
key = row['key']
|
||||
value = row['bool_value']
|
||||
if value is None:
|
||||
value = row['value']
|
||||
run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value)
|
||||
|
||||
with open(b_tmpfile, 'rb') as tmpf:
|
||||
b_data = tmpf.read()
|
||||
try:
|
||||
diff['after'] = to_text(b_data)
|
||||
except UnicodeError:
|
||||
diff['after'] = repr(b_data)
|
||||
|
||||
result['changed'] = result['changed'] or diff['after'] != diff['before']
|
||||
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
|
||||
if module.check_mode:
|
||||
if not result['changed']:
|
||||
shutil.copystat(b_path, b_tmpfile)
|
||||
uid, gid = module.user_and_group(b_path)
|
||||
os.chown(b_tmpfile, uid, gid)
|
||||
if module._diff:
|
||||
diff = {}
|
||||
else:
|
||||
diff = None
|
||||
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
|
||||
if module._diff:
|
||||
result['diff'] = diff
|
||||
module.exit_json(**result)
|
||||
|
||||
if result['changed']:
|
||||
if module.params['backup'] and os.path.exists(b_path):
|
||||
result['backup_file'] = module.backup_local(result['path'])
|
||||
try:
|
||||
module.atomic_move(b_tmpfile, b_path)
|
||||
except IOError:
|
||||
module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc())
|
||||
|
||||
if result['changed']:
|
||||
module.set_fs_attributes_if_different(file_args, result['changed'])
|
||||
else:
|
||||
if module._diff:
|
||||
diff = {}
|
||||
else:
|
||||
diff = None
|
||||
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
|
||||
if module._diff:
|
||||
result['diff'] = diff
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
single_value_arg = dict(group=dict(type='str'),
|
||||
groups=dict(type='list', elements='str'),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='str'),
|
||||
bool_value=dict(type='bool'))
|
||||
required_alternatives = [('group', 'groups'), ('value', 'bool_value')]
|
||||
module_args = dict(
|
||||
values=dict(type='list',
|
||||
elements='dict',
|
||||
options=single_value_arg,
|
||||
mutually_exclusive=required_alternatives,
|
||||
required_one_of=required_alternatives,
|
||||
required=True),
|
||||
path=dict(type='path', required=True),
|
||||
kwriteconfig_path=dict(type='path'),
|
||||
backup=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
kwriteconfig = None
|
||||
if module.params['kwriteconfig_path'] is not None:
|
||||
kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True)
|
||||
else:
|
||||
for progname in ('kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'):
|
||||
kwriteconfig = module.get_bin_path(progname)
|
||||
if kwriteconfig is not None:
|
||||
break
|
||||
if kwriteconfig is None:
|
||||
module.fail_json(msg='kwriteconfig is not installed')
|
||||
for v in module.params['values']:
|
||||
if not v['key']:
|
||||
module.fail_json(msg="'key' cannot be empty")
|
||||
with TemporaryDirectory(dir=module.tmpdir) as tmpdir:
|
||||
run_module(module, tmpdir, kwriteconfig)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -79,6 +79,14 @@ options:
|
||||
description:
|
||||
- Priority order of the execution.
|
||||
type: int
|
||||
subFlowType:
|
||||
description:
|
||||
- For new subflows, optionally specify the type.
|
||||
- Is only used at creation.
|
||||
choices: ["basic-flow", "form-flow"]
|
||||
default: "basic-flow"
|
||||
type: str
|
||||
version_added: 6.6.0
|
||||
state:
|
||||
description:
|
||||
- Control if the authentication flow must exists or not.
|
||||
@@ -264,7 +272,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
exec_index = find_exec_in_executions(new_exec, existing_executions)
|
||||
if exec_index != -1:
|
||||
# Remove key that doesn't need to be compared with existing_exec
|
||||
exclude_key = ["flowAlias"]
|
||||
exclude_key = ["flowAlias", "subFlowType"]
|
||||
for index_key, key in enumerate(new_exec, start=0):
|
||||
if new_exec[key] is None:
|
||||
exclude_key.append(key)
|
||||
@@ -282,7 +290,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
elif new_exec["displayName"] is not None:
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm)
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"])
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
@@ -299,7 +307,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm)
|
||||
for key in new_exec:
|
||||
# remove unwanted key for the next API call
|
||||
if key != "flowAlias" and key != "authenticationConfig":
|
||||
if key not in ("flowAlias", "authenticationConfig", "subFlowType"):
|
||||
updated_exec[key] = new_exec[key]
|
||||
if new_exec["requirement"] is not None:
|
||||
kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm)
|
||||
@@ -334,6 +342,7 @@ def main():
|
||||
flowAlias=dict(type='str'),
|
||||
authenticationConfig=dict(type='dict'),
|
||||
index=dict(type='int'),
|
||||
subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'),
|
||||
)),
|
||||
state=dict(choices=["absent", "present"], default='present'),
|
||||
force=dict(type='bool', default=False),
|
||||
|
||||
280
plugins/modules/keycloak_authz_authorization_scope.py
Normal file
280
plugins/modules/keycloak_authz_authorization_scope.py
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
|
||||
# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_authz_authorization_scope
|
||||
|
||||
short_description: Allows administration of Keycloak client authorization scopes via Keycloak API
|
||||
|
||||
version_added: 6.6.0
|
||||
|
||||
description:
|
||||
- This module allows the administration of Keycloak client Authorization Scopes via the Keycloak REST
|
||||
API. Authorization Scopes are only available if a client has Authorization enabled.
|
||||
|
||||
- This module requires access to the REST API via OpenID Connect; the user connecting and the realm
|
||||
being used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate realm definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase options used by Keycloak.
|
||||
The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
|
||||
U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the authorization scope.
|
||||
- On C(present), the authorization scope will be created (or updated if it exists already).
|
||||
- On C(absent), the authorization scope will be removed if it exists.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the authorization scope to create.
|
||||
type: str
|
||||
required: true
|
||||
display_name:
|
||||
description:
|
||||
- The display name of the authorization scope.
|
||||
type: str
|
||||
required: false
|
||||
icon_uri:
|
||||
description:
|
||||
- The icon URI for the authorization scope.
|
||||
type: str
|
||||
required: false
|
||||
client_id:
|
||||
description:
|
||||
- The C(clientId) of the Keycloak client that should have the authorization scope.
|
||||
- This is usually a human-readable name of the Keycloak client.
|
||||
type: str
|
||||
required: true
|
||||
realm:
|
||||
description:
|
||||
- The name of the Keycloak realm the Keycloak client is in.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Samuli Seppänen (@mattock)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Manage Keycloak file:delete authorization scope
|
||||
keycloak_authz_authorization_scope:
|
||||
name: file:delete
|
||||
state: present
|
||||
display_name: File delete
|
||||
client_id: myclient
|
||||
realm: myrealm
|
||||
auth_keycloak_url: http://localhost:8080/auth
|
||||
auth_username: keycloak
|
||||
auth_password: keycloak
|
||||
auth_realm: master
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
|
||||
end_state:
|
||||
description: Representation of the authorization scope after module execution.
|
||||
returned: on success
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: ID of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
|
||||
name:
|
||||
description: Name of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: file:delete
|
||||
display_name:
|
||||
description: Display name of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: File delete
|
||||
icon_uri:
|
||||
description: Icon URI for the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: http://localhost/icon.png
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'absent']),
|
||||
name=dict(type='str', required=True),
|
||||
display_name=dict(type='str', required=False),
|
||||
icon_uri=dict(type='str', required=False),
|
||||
client_id=dict(type='str', required=True),
|
||||
realm=dict(type='str', required=True)
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=(
|
||||
[['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
# Convenience variables
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('name')
|
||||
display_name = module.params.get('display_name')
|
||||
icon_uri = module.params.get('icon_uri')
|
||||
client_id = module.params.get('client_id')
|
||||
realm = module.params.get('realm')
|
||||
|
||||
# Get the "id" of the client based on the usually more human-readable
|
||||
# "clientId"
|
||||
cid = kc.get_client_id(client_id, realm=realm)
|
||||
if not cid:
|
||||
module.fail_json(msg='Invalid client %s for realm %s' %
|
||||
(client_id, realm))
|
||||
|
||||
# Get current state of the Authorization Scope using its name as the search
|
||||
# filter. This returns False if it is not found.
|
||||
before_authz_scope = kc.get_authz_authorization_scope_by_name(
|
||||
name=name, client_id=cid, realm=realm)
|
||||
|
||||
# Generate a JSON payload for Keycloak Admin API. This is needed for
|
||||
# "create" and "update" operations.
|
||||
desired_authz_scope = {}
|
||||
desired_authz_scope['name'] = name
|
||||
desired_authz_scope['displayName'] = display_name
|
||||
desired_authz_scope['iconUri'] = icon_uri
|
||||
|
||||
# Add "id" to payload for modify operations
|
||||
if before_authz_scope:
|
||||
desired_authz_scope['id'] = before_authz_scope['id']
|
||||
|
||||
# Ensure that undefined (null) optional parameters are presented as empty
|
||||
# strings in the desired state. This makes comparisons with current state
|
||||
# much easier.
|
||||
for k, v in desired_authz_scope.items():
|
||||
if not v:
|
||||
desired_authz_scope[k] = ''
|
||||
|
||||
# Do the above for the current state
|
||||
if before_authz_scope:
|
||||
for k in ['displayName', 'iconUri']:
|
||||
if k not in before_authz_scope:
|
||||
before_authz_scope[k] = ''
|
||||
|
||||
if before_authz_scope and state == 'present':
|
||||
changes = False
|
||||
for k, v in desired_authz_scope.items():
|
||||
if before_authz_scope[k] != v:
|
||||
changes = True
|
||||
# At this point we know we have to update the object anyways,
|
||||
# so there's no need to do more work.
|
||||
break
|
||||
|
||||
if changes:
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope)
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be updated'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.update_authz_authorization_scope(
|
||||
payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope updated'
|
||||
else:
|
||||
result['changed'] = False
|
||||
result['msg'] = 'Authorization scope not updated'
|
||||
|
||||
result['end_state'] = desired_authz_scope
|
||||
elif not before_authz_scope and state == 'present':
|
||||
if module._diff:
|
||||
result['diff'] = dict(before={}, after=desired_authz_scope)
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be created'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.create_authz_authorization_scope(
|
||||
payload=desired_authz_scope, client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope created'
|
||||
result['end_state'] = desired_authz_scope
|
||||
elif before_authz_scope and state == 'absent':
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=before_authz_scope, after={})
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be removed'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.remove_authz_authorization_scope(
|
||||
id=before_authz_scope['id'], client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope removed'
|
||||
elif not before_authz_scope and state == 'absent':
|
||||
result['changed'] = False
|
||||
else:
|
||||
module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % (
|
||||
name, client_id, realm))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
285
plugins/modules/keycloak_clientscope_type.py
Normal file
285
plugins/modules/keycloak_clientscope_type.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_clientscope_type
|
||||
|
||||
short_description: Set the type of aclientscope in realm or client via Keycloak API
|
||||
|
||||
version_added: 6.6.0
|
||||
|
||||
description:
|
||||
- This module allows you to set the type (optional, default) of clientscopes
|
||||
via the Keycloak REST API. It requires access to the REST API via OpenID
|
||||
Connect; the user connecting and the client being used must have the
|
||||
requisite access rights. In a default Keycloak installation, admin-cli and
|
||||
an admin user would work, as would a separate client definition with the
|
||||
scope tailored to your needs and a user having the expected roles.
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
|
||||
options:
|
||||
realm:
|
||||
type: str
|
||||
description:
|
||||
- The Keycloak realm.
|
||||
default: 'master'
|
||||
|
||||
client_id:
|
||||
description:
|
||||
- The I(client_id) of the client. If not set the clientscop types are set as a default for the realm.
|
||||
aliases:
|
||||
- clientId
|
||||
type: str
|
||||
|
||||
default_clientscopes:
|
||||
description:
|
||||
- Client scopes that should be of type default.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
optional_clientscopes:
|
||||
description:
|
||||
- Client scopes that should be of type optional.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Simon Pahl (@simonpahl)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set default client scopes on realm level
|
||||
community.general.keycloak_clientsecret_info:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: USERNAME
|
||||
auth_password: PASSWORD
|
||||
realm: "MyCustomRealm"
|
||||
default_clientscopes: ['profile', 'roles']
|
||||
delegate_to: localhost
|
||||
|
||||
|
||||
- name: Set default and optional client scopes on client level with token auth
|
||||
community.general.keycloak_clientsecret_info:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
token: TOKEN
|
||||
realm: "MyCustomRealm"
|
||||
client_id: "MyCustomClient"
|
||||
default_clientscopes: ['profile', 'roles']
|
||||
optional_clientscopes: ['phone']
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
sample: ""
|
||||
proposed:
|
||||
description: Representation of proposed client-scope types mapping.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: []
|
||||
}
|
||||
existing:
|
||||
description:
|
||||
- Representation of client scopes before module execution.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: ["phone"]
|
||||
}
|
||||
end_state:
|
||||
description:
|
||||
- Representation of client scopes after module execution.
|
||||
- The sample is truncated.
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: []
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
|
||||
KeycloakAPI, KeycloakError, get_token)
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
|
||||
keycloak_argument_spec
|
||||
|
||||
|
||||
def keycloak_clientscope_type_module():
|
||||
"""
|
||||
Returns an AnsibleModule definition.
|
||||
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
realm=dict(default='master'),
|
||||
client_id=dict(type='str', aliases=['clientId']),
|
||||
default_clientscopes=dict(type='list', elements='str'),
|
||||
optional_clientscopes=dict(type='list', elements='str'),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([
|
||||
['token', 'auth_realm', 'auth_username', 'auth_password'],
|
||||
['default_clientscopes', 'optional_clientscopes']
|
||||
]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]),
|
||||
mutually_exclusive=[
|
||||
['token', 'auth_realm'],
|
||||
['token', 'auth_username'],
|
||||
['token', 'auth_password']
|
||||
])
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def clientscopes_to_add(existing, proposed):
|
||||
to_add = []
|
||||
existing_clientscope_ids = extract_field(existing, 'id')
|
||||
for clientscope in proposed:
|
||||
if not clientscope['id'] in existing_clientscope_ids:
|
||||
to_add.append(clientscope)
|
||||
return to_add
|
||||
|
||||
|
||||
def clientscopes_to_delete(existing, proposed):
|
||||
to_delete = []
|
||||
proposed_clientscope_ids = extract_field(proposed, 'id')
|
||||
for clientscope in existing:
|
||||
if not clientscope['id'] in proposed_clientscope_ids:
|
||||
to_delete.append(clientscope)
|
||||
return to_delete
|
||||
|
||||
|
||||
def extract_field(dictionary, field='name'):
|
||||
return [cs[field] for cs in dictionary]
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module keycloak_clientscope_type
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
module = keycloak_clientscope_type_module()
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
client_id = module.params.get('client_id')
|
||||
default_clientscopes = module.params.get('default_clientscopes')
|
||||
optional_clientscopes = module.params.get('optional_clientscopes')
|
||||
|
||||
result = dict(changed=False, msg='', proposed={}, existing={}, end_state={})
|
||||
|
||||
all_clientscopes = kc.get_clientscopes(realm)
|
||||
default_clientscopes_real = []
|
||||
optional_clientscopes_real = []
|
||||
|
||||
for client_scope in all_clientscopes:
|
||||
if default_clientscopes is not None and client_scope["name"] in default_clientscopes:
|
||||
default_clientscopes_real.append(client_scope)
|
||||
if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes:
|
||||
optional_clientscopes_real.append(client_scope)
|
||||
|
||||
if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes):
|
||||
module.fail_json(msg='At least one of the default_clientscopes does not exist!')
|
||||
|
||||
if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes):
|
||||
module.fail_json(msg='At least one of the optional_clientscopes does not exist!')
|
||||
|
||||
result['proposed'].update({
|
||||
'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes,
|
||||
'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes
|
||||
})
|
||||
|
||||
default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id)
|
||||
optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id)
|
||||
|
||||
result['existing'].update({
|
||||
'default_clientscopes': extract_field(default_clientscopes_existing),
|
||||
'optional_clientscopes': extract_field(optional_clientscopes_existing)
|
||||
})
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=result['existing'], after=result['proposed'])
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real)
|
||||
optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real)
|
||||
|
||||
default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real)
|
||||
optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real)
|
||||
|
||||
# first delete so clientscopes can change type
|
||||
for clientscope in default_clientscopes_delete:
|
||||
kc.delete_default_clientscope(clientscope['id'], realm, client_id)
|
||||
for clientscope in optional_clientscopes_delete:
|
||||
kc.delete_optional_clientscope(clientscope['id'], realm, client_id)
|
||||
|
||||
for clientscope in default_clientscopes_add:
|
||||
kc.add_default_clientscope(clientscope['id'], realm, client_id)
|
||||
for clientscope in optional_clientscopes_add:
|
||||
kc.add_optional_clientscope(clientscope['id'], realm, client_id)
|
||||
|
||||
result["changed"] = (
|
||||
len(default_clientscopes_add) > 0
|
||||
or len(optional_clientscopes_add) > 0
|
||||
or len(default_clientscopes_delete) > 0
|
||||
or len(optional_clientscopes_delete) > 0
|
||||
)
|
||||
|
||||
result['end_state'].update({
|
||||
'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)),
|
||||
'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id))
|
||||
})
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -62,8 +62,15 @@ options:
|
||||
required: true
|
||||
type: dict
|
||||
description:
|
||||
- The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass
|
||||
a list of strings (see examples).
|
||||
- The attribute(s) and value(s) to add or remove.
|
||||
- Each attribute value can be a string for single-valued attributes or
|
||||
a list of strings for multi-valued attributes.
|
||||
- If you specify values for this option in YAML, please note that you can improve
|
||||
readability for long string values by using YAML block modifiers as seen in the
|
||||
examples for this module.
|
||||
- Note that when using values that YAML/ansible-core interprets as other types,
|
||||
like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
|
||||
these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
|
||||
ordered:
|
||||
required: false
|
||||
type: bool
|
||||
|
||||
@@ -41,6 +41,14 @@ options:
|
||||
- If I(state=present), attributes necessary to create an entry. Existing
|
||||
entries are never modified. To assert specific attribute values on an
|
||||
existing entry, use M(community.general.ldap_attrs) module instead.
|
||||
- Each attribute value can be a string for single-valued attributes or
|
||||
a list of strings for multi-valued attributes.
|
||||
- If you specify values for this option in YAML, please note that you can improve
|
||||
readability for long string values by using YAML block modifiers as seen in the
|
||||
examples for this module.
|
||||
- Note that when using values that YAML/ansible-core interprets as other types,
|
||||
like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
|
||||
these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
|
||||
type: dict
|
||||
default: {}
|
||||
objectClass:
|
||||
@@ -86,6 +94,29 @@ EXAMPLES = """
|
||||
description: An LDAP administrator
|
||||
userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
|
||||
|
||||
- name: Set possible values for attributes elements
|
||||
community.general.ldap_entry:
|
||||
dn: cn=admin,dc=example,dc=com
|
||||
objectClass:
|
||||
- simpleSecurityObject
|
||||
- organizationalRole
|
||||
attributes:
|
||||
description: An LDAP Administrator
|
||||
roleOccupant:
|
||||
- cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com
|
||||
- cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com
|
||||
olcAccess:
|
||||
- >-
|
||||
{0}to attrs=userPassword,shadowLastChange
|
||||
by self write
|
||||
by anonymous auth
|
||||
by dn="cn=admin,dc=example,dc=com" write
|
||||
by * none'
|
||||
- >-
|
||||
{1}to dn.base="dc=example,dc=com"
|
||||
by dn="cn=admin,dc=example,dc=com" write
|
||||
by * read
|
||||
|
||||
- name: Get rid of an old entry
|
||||
community.general.ldap_entry:
|
||||
dn: ou=stuff,dc=example,dc=com
|
||||
|
||||
@@ -25,15 +25,6 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
target:
|
||||
description:
|
||||
- The target to run.
|
||||
- Typically this would be something like C(install),C(test) or C(all)."
|
||||
type: str
|
||||
params:
|
||||
description:
|
||||
- Any extra parameters to pass to make.
|
||||
type: dict
|
||||
chdir:
|
||||
description:
|
||||
- Change to this directory before running make.
|
||||
@@ -43,11 +34,6 @@ options:
|
||||
description:
|
||||
- Use a custom Makefile.
|
||||
type: path
|
||||
make:
|
||||
description:
|
||||
- Use a specific make binary.
|
||||
type: path
|
||||
version_added: '0.2.0'
|
||||
jobs:
|
||||
description:
|
||||
- Set the number of make jobs to run concurrently.
|
||||
@@ -55,6 +41,20 @@ options:
|
||||
- This is not supported by all make implementations.
|
||||
type: int
|
||||
version_added: 2.0.0
|
||||
make:
|
||||
description:
|
||||
- Use a specific make binary.
|
||||
type: path
|
||||
version_added: '0.2.0'
|
||||
params:
|
||||
description:
|
||||
- Any extra parameters to pass to make.
|
||||
type: dict
|
||||
target:
|
||||
description:
|
||||
- The target to run.
|
||||
- Typically this would be something like C(install), C(test), or C(all).
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -83,9 +83,42 @@ EXAMPLES = r'''
|
||||
file: /some-project/Makefile
|
||||
'''
|
||||
|
||||
RETURN = r'''# '''
|
||||
RETURN = r'''
|
||||
chdir:
|
||||
description:
|
||||
- The value of the module parameter I(chdir).
|
||||
type: str
|
||||
returned: success
|
||||
command:
|
||||
description:
|
||||
- The command built and executed by the module.
|
||||
type: str
|
||||
returned: success
|
||||
version_added: 6.5.0
|
||||
file:
|
||||
description:
|
||||
- The value of the module parameter I(file).
|
||||
type: str
|
||||
returned: success
|
||||
jobs:
|
||||
description:
|
||||
- The value of the module parameter I(jobs).
|
||||
type: int
|
||||
returned: success
|
||||
params:
|
||||
description:
|
||||
- The value of the module parameter I(params).
|
||||
type: dict
|
||||
returned: success
|
||||
target:
|
||||
description:
|
||||
- The value of the module parameter I(target).
|
||||
type: str
|
||||
returned: success
|
||||
'''
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
@@ -192,6 +225,7 @@ def main():
|
||||
chdir=module.params['chdir'],
|
||||
file=module.params['file'],
|
||||
jobs=module.params['jobs'],
|
||||
command=' '.join([shlex_quote(part) for part in base_command]),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -139,7 +139,10 @@ def reload_dns(args=None):
|
||||
# manifest themselves at this point so we need to ensure the user is
|
||||
# informed of the reason.
|
||||
retvals['failed'] = has_failed
|
||||
retvals['memset_api'] = response.json()
|
||||
if response.status_code is not None:
|
||||
retvals['memset_api'] = response.json()
|
||||
else:
|
||||
retvals['stderr'] = response.stderr
|
||||
retvals['msg'] = msg
|
||||
return retvals
|
||||
|
||||
|
||||
@@ -134,7 +134,10 @@ def get_facts(args=None):
|
||||
# informed of the reason.
|
||||
retvals['failed'] = has_failed
|
||||
retvals['msg'] = msg
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
if response.status_code is not None:
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
else:
|
||||
retvals['stderr'] = "{0}" . format(response.stderr)
|
||||
return retvals
|
||||
|
||||
# we don't want to return the same thing twice
|
||||
|
||||
@@ -259,7 +259,10 @@ def get_facts(args=None):
|
||||
# informed of the reason.
|
||||
retvals['failed'] = has_failed
|
||||
retvals['msg'] = msg
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
if response.status_code is not None:
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
else:
|
||||
retvals['stderr'] = "{0}" . format(response.stderr)
|
||||
return retvals
|
||||
|
||||
# we don't want to return the same thing twice
|
||||
|
||||
@@ -264,6 +264,9 @@ def create_or_delete(args=None):
|
||||
retvals['failed'] = _has_failed
|
||||
retvals['msg'] = _msg
|
||||
|
||||
if response.stderr is not None:
|
||||
retvals['stderr'] = response.stderr
|
||||
|
||||
return retvals
|
||||
|
||||
zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
|
||||
|
||||
@@ -195,7 +195,10 @@ def create_or_delete_domain(args=None):
|
||||
# informed of the reason.
|
||||
retvals['failed'] = has_failed
|
||||
retvals['msg'] = msg
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
if response.status_code is not None:
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
else:
|
||||
retvals['stderr'] = response.stderr
|
||||
return retvals
|
||||
|
||||
zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
|
||||
|
||||
@@ -313,7 +313,10 @@ def create_or_delete(args=None):
|
||||
# informed of the reason.
|
||||
retvals['failed'] = _has_failed
|
||||
retvals['msg'] = msg
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
if response.status_code is not None:
|
||||
retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
|
||||
else:
|
||||
retvals['stderr'] = response.stderr
|
||||
return retvals
|
||||
|
||||
zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
|
||||
|
||||
@@ -146,8 +146,7 @@ class MkSysB(ModuleHelper):
|
||||
def __run__(self):
|
||||
def process(rc, out, err):
|
||||
if rc != 0:
|
||||
self.do_raise("mksysb failed.")
|
||||
self.vars.msg = out
|
||||
self.do_raise("mksysb failed: {0}".format(out))
|
||||
|
||||
runner = CmdRunner(
|
||||
self.module,
|
||||
@@ -158,6 +157,8 @@ class MkSysB(ModuleHelper):
|
||||
'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'],
|
||||
output_process=process, check_mode_skip=True) as ctx:
|
||||
ctx.run(combined_path=[self.vars.storage_path, self.vars.name])
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
|
||||
self.changed = True
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ options:
|
||||
- If C(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
|
||||
- If C(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
|
||||
loaded on next reboot.
|
||||
- If C(disabled), will not toch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
|
||||
- If C(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
|
||||
- Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the
|
||||
kernel modules themselves instead of configuration like this.
|
||||
- In fact, most modern kernel modules are prepared for automatic loading already.
|
||||
|
||||
@@ -63,11 +63,12 @@ options:
|
||||
- Type C(generic) is added in Ansible 2.5.
|
||||
- Type C(infiniband) is added in community.general 2.0.0.
|
||||
- Type C(gsm) is added in community.general 3.7.0.
|
||||
- Type C(macvlan) is added in community.general 6.6.0.
|
||||
- Type C(wireguard) is added in community.general 4.3.0.
|
||||
- Type C(vpn) is added in community.general 5.1.0.
|
||||
type: str
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm,
|
||||
wireguard, vpn ]
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan,
|
||||
wifi, gsm, wireguard, vpn ]
|
||||
mode:
|
||||
description:
|
||||
- This is the type of device or network connection that you wish to create for a bond or bridge.
|
||||
@@ -197,6 +198,7 @@ options:
|
||||
may_fail4:
|
||||
description:
|
||||
- If you need I(ip4) configured before C(network-online.target) is reached, set this option to C(false).
|
||||
- This option applies when C(method4) is not C(disabled).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 3.3.0
|
||||
@@ -302,8 +304,9 @@ options:
|
||||
addr_gen_mode6:
|
||||
description:
|
||||
- Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration.
|
||||
- C(default) and C(deafult-or-eui64) have been added in community.general 6.5.0.
|
||||
type: str
|
||||
choices: [eui64, stable-privacy]
|
||||
choices: [default, default-or-eui64, eui64, stable-privacy]
|
||||
version_added: 4.2.0
|
||||
mtu:
|
||||
description:
|
||||
@@ -411,6 +414,14 @@ options:
|
||||
type: str
|
||||
choices: [ same_all, by_active, only_active ]
|
||||
version_added: 3.4.0
|
||||
runner_fast_rate:
|
||||
description:
|
||||
- Option specifies the rate at which our link partner is asked to transmit LACPDU
|
||||
packets. If this is C(true) then packets will be sent once per second. Otherwise they
|
||||
will be sent every 30 seconds.
|
||||
- Only allowed for C(lacp) runner.
|
||||
type: bool
|
||||
version_added: 6.5.0
|
||||
vlanid:
|
||||
description:
|
||||
- This is only used with VLAN - VLAN ID in range <0-4095>.
|
||||
@@ -869,6 +880,38 @@ options:
|
||||
- The username used to authenticate with the network, if required.
|
||||
- Many providers do not require a username, or accept any username.
|
||||
- But if a username is required, it is specified here.
|
||||
macvlan:
|
||||
description:
|
||||
- The configuration of the MAC VLAN connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).'
|
||||
type: dict
|
||||
version_added: 6.6.0
|
||||
suboptions:
|
||||
mode:
|
||||
description:
|
||||
- The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device.
|
||||
- 'Following choices are allowed: C(1) B(vepa), C(2) B(bridge), C(3) B(private), C(4) B(passthru)
|
||||
and C(5) B(source)'
|
||||
type: int
|
||||
choices: [ 1, 2, 3, 4, 5 ]
|
||||
required: true
|
||||
parent:
|
||||
description:
|
||||
- If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should
|
||||
be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a
|
||||
"mac-address" property.
|
||||
type: str
|
||||
required: true
|
||||
promiscuous:
|
||||
description:
|
||||
- Whether the interface should be put in promiscuous mode.
|
||||
type: bool
|
||||
tap:
|
||||
description:
|
||||
- Whether the interface should be a MACVTAP.
|
||||
type: bool
|
||||
wireguard:
|
||||
description:
|
||||
- The configuration of the Wireguard connection.
|
||||
@@ -968,7 +1011,6 @@ options:
|
||||
- Enable or disable IPSec tunnel to L2TP host.
|
||||
- This option is need when C(service-type) is C(org.freedesktop.NetworkManager.l2tp).
|
||||
type: bool
|
||||
choices: [ yes, no ]
|
||||
ipsec-psk:
|
||||
description:
|
||||
- The pre-shared key in base64 encoding.
|
||||
@@ -1348,6 +1390,17 @@ EXAMPLES = r'''
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a macvlan connection
|
||||
community.general.nmcli:
|
||||
type: macvlan
|
||||
conn_name: my-macvlan-connection
|
||||
ifname: mymacvlan0
|
||||
macvlan:
|
||||
mode: 2
|
||||
parent: eth1
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a wireguard connection
|
||||
community.general.nmcli:
|
||||
type: wireguard
|
||||
@@ -1472,6 +1525,7 @@ class Nmcli(object):
|
||||
self.mac = module.params['mac']
|
||||
self.runner = module.params['runner']
|
||||
self.runner_hwaddr_policy = module.params['runner_hwaddr_policy']
|
||||
self.runner_fast_rate = module.params['runner_fast_rate']
|
||||
self.vlanid = module.params['vlanid']
|
||||
self.vlandev = module.params['vlandev']
|
||||
self.flags = module.params['flags']
|
||||
@@ -1492,13 +1546,14 @@ class Nmcli(object):
|
||||
self.wifi = module.params['wifi']
|
||||
self.wifi_sec = module.params['wifi_sec']
|
||||
self.gsm = module.params['gsm']
|
||||
self.macvlan = module.params['macvlan']
|
||||
self.wireguard = module.params['wireguard']
|
||||
self.vpn = module.params['vpn']
|
||||
self.transport_mode = module.params['transport_mode']
|
||||
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip4:
|
||||
elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4:
|
||||
self.ipv4_method = 'disabled'
|
||||
elif self.ip4:
|
||||
self.ipv4_method = 'manual'
|
||||
@@ -1507,7 +1562,7 @@ class Nmcli(object):
|
||||
|
||||
if self.method6:
|
||||
self.ipv6_method = self.method6
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip6:
|
||||
elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6:
|
||||
self.ipv6_method = 'disabled'
|
||||
elif self.ip6:
|
||||
self.ipv6_method = 'manual'
|
||||
@@ -1576,6 +1631,10 @@ class Nmcli(object):
|
||||
'ipv6.ip6-privacy': self.ip_privacy6,
|
||||
'ipv6.addr-gen-mode': self.addr_gen_mode6
|
||||
})
|
||||
# when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes'
|
||||
# force ignoring to save idempotency
|
||||
if self.ipv4_method and self.ipv4_method != 'disabled':
|
||||
options.update({'ipv4.may-fail': self.may_fail4})
|
||||
|
||||
# Layer 2 options.
|
||||
if self.mac:
|
||||
@@ -1615,11 +1674,19 @@ class Nmcli(object):
|
||||
'bridge.priority': self.priority,
|
||||
'bridge.stp': self.stp,
|
||||
})
|
||||
# priority make sense when stp enabed, otherwise nmcli keeps bridge-priority to 32768 regrdless of input.
|
||||
# force ignoring to save idempotency
|
||||
if self.stp:
|
||||
options.update({'bridge.priority': self.priority})
|
||||
elif self.type == 'team':
|
||||
options.update({
|
||||
'team.runner': self.runner,
|
||||
'team.runner-hwaddr-policy': self.runner_hwaddr_policy,
|
||||
})
|
||||
if self.runner_fast_rate is not None:
|
||||
options.update({
|
||||
'team.runner-fast-rate': self.runner_fast_rate,
|
||||
})
|
||||
elif self.type == 'bridge-slave':
|
||||
options.update({
|
||||
'connection.slave-type': 'bridge',
|
||||
@@ -1678,6 +1745,14 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'gsm.%s' % name: value,
|
||||
})
|
||||
elif self.type == 'macvlan':
|
||||
if self.macvlan:
|
||||
for name, value in self.macvlan.items():
|
||||
options.update({
|
||||
'macvlan.%s' % name: value,
|
||||
})
|
||||
elif self.state == 'present':
|
||||
raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan')
|
||||
elif self.type == 'wireguard':
|
||||
if self.wireguard:
|
||||
for name, value in self.wireguard.items():
|
||||
@@ -1755,6 +1830,7 @@ class Nmcli(object):
|
||||
'wifi',
|
||||
'802-11-wireless',
|
||||
'gsm',
|
||||
'macvlan',
|
||||
'wireguard',
|
||||
'vpn',
|
||||
)
|
||||
@@ -1772,6 +1848,7 @@ class Nmcli(object):
|
||||
'dummy',
|
||||
'ethernet',
|
||||
'team-slave',
|
||||
'vlan',
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -1879,7 +1956,8 @@ class Nmcli(object):
|
||||
'ipv4.may-fail',
|
||||
'ipv6.ignore-auto-dns',
|
||||
'ipv6.ignore-auto-routes',
|
||||
'802-11-wireless.hidden'):
|
||||
'802-11-wireless.hidden',
|
||||
'team.runner-fast-rate'):
|
||||
return bool
|
||||
elif setting in ('ipv4.addresses',
|
||||
'ipv6.addresses',
|
||||
@@ -1968,6 +2046,9 @@ class Nmcli(object):
|
||||
if key in self.SECRET_OPTIONS:
|
||||
self.edit_commands += ['set %s %s' % (key, value)]
|
||||
continue
|
||||
if key == 'xmit_hash_policy':
|
||||
cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value])
|
||||
continue
|
||||
cmd.extend([key, value])
|
||||
|
||||
return self.execute_command(cmd)
|
||||
@@ -2215,6 +2296,7 @@ def main():
|
||||
'vxlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
'macvlan',
|
||||
'wireguard',
|
||||
'vpn',
|
||||
]),
|
||||
@@ -2264,7 +2346,7 @@ def main():
|
||||
route_metric6=dict(type='int'),
|
||||
method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']),
|
||||
ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']),
|
||||
addr_gen_mode6=dict(type='str', choices=['eui64', 'stable-privacy']),
|
||||
addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']),
|
||||
# Bond Specific vars
|
||||
mode=dict(type='str', default='balance-rr',
|
||||
choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
|
||||
@@ -2294,6 +2376,8 @@ def main():
|
||||
choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']),
|
||||
# team active-backup runner specific options
|
||||
runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']),
|
||||
# team lacp runner specific options
|
||||
runner_fast_rate=dict(type='bool'),
|
||||
# vlan specific vars
|
||||
vlanid=dict(type='int'),
|
||||
vlandev=dict(type='str'),
|
||||
@@ -2316,6 +2400,11 @@ def main():
|
||||
wifi=dict(type='dict'),
|
||||
wifi_sec=dict(type='dict', no_log=True),
|
||||
gsm=dict(type='dict'),
|
||||
macvlan=dict(type='dict', options=dict(
|
||||
mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True),
|
||||
parent=dict(type='str', required=True),
|
||||
promiscuous=dict(type='bool'),
|
||||
tap=dict(type='bool'))),
|
||||
wireguard=dict(type='dict'),
|
||||
vpn=dict(type='dict'),
|
||||
transport_mode=dict(type='str', choices=['datagram', 'connected']),
|
||||
@@ -2340,6 +2429,8 @@ def main():
|
||||
if nmcli.type == "team":
|
||||
if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup":
|
||||
nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup")
|
||||
if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
|
||||
nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
|
||||
# team-slave checks
|
||||
if nmcli.type == 'team-slave':
|
||||
if nmcli.master is None:
|
||||
|
||||
@@ -13,7 +13,8 @@ module: office_365_connector_card
|
||||
short_description: Use webhooks to create Connector Card messages within an Office 365 group
|
||||
description:
|
||||
- Creates Connector Card messages through
|
||||
- Office 365 Connectors U(https://dev.outlook.com/Connectors)
|
||||
Office 365 Connectors
|
||||
U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups).
|
||||
author: "Marc Sensenich (@marc-sensenich)"
|
||||
notes:
|
||||
- This module is not idempotent, therefore if the same task is run twice
|
||||
@@ -62,7 +63,7 @@ options:
|
||||
elements: dict
|
||||
description:
|
||||
- Contains a list of sections to display in the card.
|
||||
- For more information see https://dev.outlook.com/Connectors/reference.
|
||||
- For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields).
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
|
||||
@@ -152,7 +152,11 @@ def execute_command(cmd, module):
|
||||
# This makes run_command() use shell=False which we need to not cause shell
|
||||
# expansion of special characters like '*'.
|
||||
cmd_args = shlex.split(cmd)
|
||||
return module.run_command(cmd_args)
|
||||
|
||||
# We set TERM to 'dumb' to keep pkg_add happy if the machine running
|
||||
# ansible is using a TERM that the managed machine does not know about,
|
||||
# e.g.: "No progress meter: failed termcap lookup on xterm-kitty".
|
||||
return module.run_command(cmd_args, environ_update={'TERM': 'dumb'})
|
||||
|
||||
|
||||
# Function used to find out if a package is currently installed.
|
||||
|
||||
@@ -148,6 +148,11 @@ class Opkg(StateModuleHelper):
|
||||
),
|
||||
)
|
||||
|
||||
if self.vars.update_cache:
|
||||
rc, dummy, dummy = self.runner("update_cache").run()
|
||||
if rc != 0:
|
||||
self.do_raise("could not update package db")
|
||||
|
||||
@staticmethod
|
||||
def split_name_and_version(package):
|
||||
""" Split the name and the version when using the NAME=VERSION syntax """
|
||||
@@ -164,10 +169,6 @@ class Opkg(StateModuleHelper):
|
||||
return want_installed == has_package
|
||||
|
||||
def state_present(self):
|
||||
if self.vars.update_cache:
|
||||
dummy, rc, dummy = self.runner("update_cache").run()
|
||||
if rc != 0:
|
||||
self.do_raise("could not update package db")
|
||||
with self.runner("state force package") as ctx:
|
||||
for package in self.vars.name:
|
||||
pkg_name, pkg_version = self.split_name_and_version(package)
|
||||
@@ -176,16 +177,14 @@ class Opkg(StateModuleHelper):
|
||||
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version):
|
||||
self.do_raise("failed to install %s" % package)
|
||||
self.vars.install_c += 1
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
if self.vars.install_c > 0:
|
||||
self.vars.msg = "installed %s package(s)" % (self.vars.install_c)
|
||||
else:
|
||||
self.vars.msg = "package(s) already present"
|
||||
|
||||
def state_absent(self):
|
||||
if self.vars.update_cache:
|
||||
dummy, rc, dummy = self.runner("update_cache").run()
|
||||
if rc != 0:
|
||||
self.do_raise("could not update package db")
|
||||
with self.runner("state force package") as ctx:
|
||||
for package in self.vars.name:
|
||||
package, dummy = self.split_name_and_version(package)
|
||||
@@ -194,6 +193,8 @@ class Opkg(StateModuleHelper):
|
||||
if not self._package_in_desired_state(package, want_installed=False):
|
||||
self.do_raise("failed to remove %s" % package)
|
||||
self.vars.remove_c += 1
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
if self.vars.remove_c > 0:
|
||||
self.vars.msg = "removed %s package(s)" % (self.vars.remove_c)
|
||||
else:
|
||||
|
||||
@@ -20,10 +20,11 @@ description:
|
||||
command line tool. For a full description of the fields and the options
|
||||
check the GNU parted manual.
|
||||
requirements:
|
||||
- This module requires parted version 1.8.3 and above
|
||||
- align option (except 'undefined') requires parted 2.1 and above
|
||||
- If the version of parted is below 3.1, it requires a Linux version running
|
||||
the sysfs file system C(/sys/).
|
||||
- This module requires C(parted) version 1.8.3 and above.
|
||||
- Option I(align) (except C(undefined)) requires C(parted) 2.1 or above.
|
||||
- If the version of C(parted) is below 3.1, it requires a Linux version running
|
||||
the C(sysfs) file system C(/sys/).
|
||||
- Requires the C(resizepart) command when using the I(resize) parameter.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
@@ -33,66 +34,70 @@ attributes:
|
||||
support: none
|
||||
options:
|
||||
device:
|
||||
description: The block device (disk) where to operate.
|
||||
description:
|
||||
- The block device (disk) where to operate.
|
||||
- Regular files can also be partitioned, but it is recommended to create a
|
||||
loopback device using C(losetup) to easily access its partitions.
|
||||
type: str
|
||||
required: true
|
||||
align:
|
||||
description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
|
||||
description:
|
||||
- Set alignment for newly created partitions. Use C(undefined) for parted default aligment.
|
||||
type: str
|
||||
choices: [ cylinder, minimal, none, optimal, undefined ]
|
||||
default: optimal
|
||||
number:
|
||||
description:
|
||||
- The number of the partition to work with or the number of the partition
|
||||
that will be created.
|
||||
- Required when performing any action on the disk, except fetching information.
|
||||
- The partition number being affected.
|
||||
- Required when performing any action on the disk, except fetching information.
|
||||
type: int
|
||||
unit:
|
||||
description:
|
||||
- Selects the current default unit that Parted will use to display
|
||||
locations and capacities on the disk and to interpret those given by the
|
||||
user if they are not suffixed by an unit.
|
||||
- When fetching information about a disk, it is always recommended to specify a unit.
|
||||
- Selects the current default unit that Parted will use to display
|
||||
locations and capacities on the disk and to interpret those given by the
|
||||
user if they are not suffixed by an unit.
|
||||
- When fetching information about a disk, it is recommended to always specify a unit.
|
||||
type: str
|
||||
choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
|
||||
default: KiB
|
||||
label:
|
||||
description:
|
||||
- Disk label type to use.
|
||||
- If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
|
||||
- Disk label type or partition table to use.
|
||||
- If I(device) already contains a different label, it will be changed to I(label)
|
||||
and any previous partitions will be lost.
|
||||
- A I(name) must be specified for a C(gpt) partition table.
|
||||
type: str
|
||||
choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
|
||||
default: msdos
|
||||
part_type:
|
||||
description:
|
||||
- May be specified only with 'msdos' or 'dvh' partition tables.
|
||||
- A C(name) must be specified for a 'gpt' partition table.
|
||||
- Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
|
||||
- May be specified only with I(label=msdos) or I(label=dvh).
|
||||
- Neither I(part_type) nor I(name) may be used with I(label=sun).
|
||||
type: str
|
||||
choices: [ extended, logical, primary ]
|
||||
default: primary
|
||||
part_start:
|
||||
description:
|
||||
- Where the partition will start as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Using negative values may require setting of C(fs_type) (see notes).
|
||||
- Where the partition will start as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Using negative values may require setting of I(fs_type) (see notes).
|
||||
type: str
|
||||
default: 0%
|
||||
part_end:
|
||||
description:
|
||||
- Where the partition will end as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Where the partition will end as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
type: str
|
||||
default: 100%
|
||||
name:
|
||||
description:
|
||||
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
|
||||
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
|
||||
type: str
|
||||
flags:
|
||||
description: A list of the flags that has to be set on the partition.
|
||||
@@ -100,15 +105,15 @@ options:
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete a partition.
|
||||
- If set to C(info) the module will only return the device information.
|
||||
- Whether to create or delete a partition.
|
||||
- If set to C(info) the module will only return the device information.
|
||||
type: str
|
||||
choices: [ absent, present, info ]
|
||||
default: info
|
||||
fs_type:
|
||||
description:
|
||||
- If specified and the partition does not exist, will set filesystem type to given partition.
|
||||
- Parameter optional, but see notes below about negative C(part_start) values.
|
||||
- If specified and the partition does not exist, will set filesystem type to given partition.
|
||||
- Parameter optional, but see notes below about negative I(part_start) values.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
resize:
|
||||
@@ -123,9 +128,9 @@ notes:
|
||||
installed on the system is before version 3.1, the module queries the kernel
|
||||
through C(/sys/) to obtain disk information. In this case the units CHS and
|
||||
CYL are not supported.
|
||||
- Negative C(part_start) start values were rejected if C(fs_type) was not given.
|
||||
This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
|
||||
specify C(fs_type) as well or make sure your system contains newer parted.
|
||||
- Negative I(part_start) start values were rejected if I(fs_type) was not given.
|
||||
This bug was fixed in parted 3.2.153. If you want to use negative I(part_start),
|
||||
specify I(fs_type) as well or make sure your system contains newer parted.
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
@@ -47,10 +47,17 @@ options:
|
||||
If the application source, such as a package with version specifier, or an URL,
|
||||
directory or any other accepted specification. See C(pipx) documentation for more details.
|
||||
- When specified, the C(pipx) command will use I(source) instead of I(name).
|
||||
install_apps:
|
||||
description:
|
||||
- Add apps from the injected packages.
|
||||
- Only used when I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
install_deps:
|
||||
description:
|
||||
- Include applications of dependent packages.
|
||||
- Only used when I(state=install) or I(state=upgrade).
|
||||
- Only used when I(state=install), I(state=latest), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
inject_packages:
|
||||
@@ -62,25 +69,33 @@ options:
|
||||
force:
|
||||
description:
|
||||
- Force modification of the application's virtual environment. See C(pipx) for details.
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject).
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), I(state=latest), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
include_injected:
|
||||
description:
|
||||
- Upgrade the injected packages along with the application.
|
||||
- Only used when I(state=upgrade) or I(state=upgrade_all).
|
||||
- Only used when I(state=upgrade), I(state=upgrade_all), or I(state=latest).
|
||||
- This is used with I(state=upgrade) and I(state=latest) since community.general 6.6.0.
|
||||
type: bool
|
||||
default: false
|
||||
index_url:
|
||||
description:
|
||||
- Base URL of Python Package Index.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=latest), or I(state=inject).
|
||||
type: str
|
||||
python:
|
||||
description:
|
||||
- Python version to be used when creating the application virtual environment. Must be 3.6+.
|
||||
- Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all).
|
||||
- Only used when I(state=install), I(state=latest), I(state=reinstall), or I(state=reinstall_all).
|
||||
type: str
|
||||
system_site_packages:
|
||||
description:
|
||||
- Give application virtual environment access to the system site-packages directory.
|
||||
- Only used when I(state=install) or I(state=latest).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.6.0
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(pipx) installed in the system.
|
||||
@@ -105,6 +120,7 @@ notes:
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- This module requires C(pipx) version 0.16.2.1 or above.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
@@ -161,12 +177,14 @@ class PipX(StateModuleHelper):
|
||||
'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']),
|
||||
name=dict(type='str'),
|
||||
source=dict(type='str'),
|
||||
install_apps=dict(type='bool', default=False),
|
||||
install_deps=dict(type='bool', default=False),
|
||||
inject_packages=dict(type='list', elements='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
system_site_packages=dict(type='bool', default=False),
|
||||
executable=dict(type='path'),
|
||||
editable=dict(type='bool', default=False),
|
||||
pip_args=dict(type='str'),
|
||||
@@ -234,7 +252,7 @@ class PipX(StateModuleHelper):
|
||||
def state_install(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.changed = True
|
||||
with self.runner('state index_url install_deps force python editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
ctx.run(name_source=[self.vars.name, self.vars.source])
|
||||
self._capture_results(ctx)
|
||||
|
||||
@@ -246,7 +264,7 @@ class PipX(StateModuleHelper):
|
||||
if self.vars.force:
|
||||
self.changed = True
|
||||
|
||||
with self.runner('state index_url install_deps force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
ctx.run()
|
||||
self._capture_results(ctx)
|
||||
|
||||
@@ -271,7 +289,7 @@ class PipX(StateModuleHelper):
|
||||
self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.changed = True
|
||||
with self.runner('state index_url force editable pip_args name inject_packages', check_mode_skip=True) as ctx:
|
||||
with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx:
|
||||
ctx.run()
|
||||
self._capture_results(ctx)
|
||||
|
||||
@@ -295,11 +313,11 @@ class PipX(StateModuleHelper):
|
||||
def state_latest(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.changed = True
|
||||
with self.runner('state index_url install_deps force python editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
ctx.run(state='install', name_source=[self.vars.name, self.vars.source])
|
||||
self._capture_results(ctx)
|
||||
|
||||
with self.runner('state index_url install_deps force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
ctx.run(state='upgrade')
|
||||
self._capture_results(ctx)
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ notes:
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- This module requires C(pipx) version 0.16.2.1 or above.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
|
||||
@@ -333,9 +333,9 @@ def emerge_packages(module, packages):
|
||||
"""Run emerge command against given list of atoms."""
|
||||
p = module.params
|
||||
|
||||
if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
|
||||
if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'):
|
||||
for package in packages:
|
||||
if p['noreplace'] and not query_package(module, package, 'emerge'):
|
||||
if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'):
|
||||
break
|
||||
else:
|
||||
module.exit_json(changed=False, msg='Packages already present.')
|
||||
@@ -383,14 +383,12 @@ def emerge_packages(module, packages):
|
||||
"""Fallback to default: don't use this argument at all."""
|
||||
continue
|
||||
|
||||
if not flag_val:
|
||||
"""Add the --flag=value pair."""
|
||||
if isinstance(flag_val, bool):
|
||||
args.extend((arg, to_native('y' if flag_val else 'n')))
|
||||
elif not flag_val:
|
||||
"""If the value is 0 or 0.0: add the flag, but not the value."""
|
||||
args.append(arg)
|
||||
continue
|
||||
|
||||
"""Add the --flag=value pair."""
|
||||
if isinstance(p[flag], bool):
|
||||
args.extend((arg, to_native('y' if flag_val else 'n')))
|
||||
else:
|
||||
args.extend((arg, to_native(flag_val)))
|
||||
|
||||
|
||||
@@ -22,6 +22,11 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
archive:
|
||||
description:
|
||||
- Specify a path to an archive to restore (instead of creating or cloning a VM).
|
||||
type: str
|
||||
version_added: 6.5.0
|
||||
acpi:
|
||||
description:
|
||||
- Specify if ACPI should be enabled/disabled.
|
||||
@@ -536,6 +541,14 @@ EXAMPLES = '''
|
||||
name: spynal
|
||||
node: sabrewulf
|
||||
|
||||
- name: Create a VM from archive (backup)
|
||||
community.general.proxmox_kvm:
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
api_host: helldorado
|
||||
archive: backup-storage:backup/vm/140/2023-03-08T06:41:23Z
|
||||
name: spynal
|
||||
|
||||
- name: Create new VM with minimal options and given vmid
|
||||
community.general.proxmox_kvm:
|
||||
api_user: root@pam
|
||||
@@ -750,15 +763,6 @@ EXAMPLES = '''
|
||||
node: sabrewulf
|
||||
state: absent
|
||||
|
||||
- name: Get VM current state
|
||||
community.general.proxmox_kvm:
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
api_host: helldorado
|
||||
name: spynal
|
||||
node: sabrewulf
|
||||
state: current
|
||||
|
||||
- name: Update VM configuration
|
||||
community.general.proxmox_kvm:
|
||||
api_user: root@pam
|
||||
@@ -1053,6 +1057,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
kvm_args = dict(
|
||||
archive=dict(type='str'),
|
||||
acpi=dict(type='bool'),
|
||||
agent=dict(type='str'),
|
||||
args=dict(type='str'),
|
||||
@@ -1256,6 +1261,7 @@ def main():
|
||||
module.fail_json(msg="node '%s' does not exist in cluster" % node)
|
||||
|
||||
proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
|
||||
archive=module.params['archive'],
|
||||
acpi=module.params['acpi'],
|
||||
agent=module.params['agent'],
|
||||
autostart=module.params['autostart'],
|
||||
|
||||
@@ -160,8 +160,7 @@ def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret'),
|
||||
('api_user', 'api_password')],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
@@ -81,6 +81,12 @@ options:
|
||||
- A list of puppet tags to be used.
|
||||
type: list
|
||||
elements: str
|
||||
skip_tags:
|
||||
description:
|
||||
- A list of puppet tags to be excluded.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 6.6.0
|
||||
execute:
|
||||
description:
|
||||
- Execute a specific piece of Puppet code.
|
||||
@@ -143,6 +149,8 @@ EXAMPLES = r'''
|
||||
tags:
|
||||
- update
|
||||
- nginx
|
||||
skip_tags:
|
||||
- service
|
||||
|
||||
- name: Run puppet agent in noop mode
|
||||
community.general.puppet:
|
||||
@@ -198,6 +206,7 @@ def main():
|
||||
environment=dict(type='str'),
|
||||
certname=dict(type='str'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
skip_tags=dict(type='list', elements='str'),
|
||||
execute=dict(type='str'),
|
||||
summarize=dict(type='bool', default=False),
|
||||
debug=dict(type='bool', default=False),
|
||||
@@ -232,11 +241,11 @@ def main():
|
||||
runner = puppet_utils.puppet_runner(module)
|
||||
|
||||
if not p['manifest'] and not p['execute']:
|
||||
args_order = "_agent_fixed puppetmaster show_diff confdir environment tags certname noop use_srv_records"
|
||||
args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records"
|
||||
with runner(args_order) as ctx:
|
||||
rc, stdout, stderr = ctx.run()
|
||||
else:
|
||||
args_order = "_apply_fixed logdest modulepath environment certname tags noop _execute summarize debug verbose"
|
||||
args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose"
|
||||
with runner(args_order) as ctx:
|
||||
rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']])
|
||||
|
||||
|
||||
@@ -321,6 +321,22 @@ EXAMPLES = '''
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get HPE Thermal Config
|
||||
community.general.redfish_info:
|
||||
category: Chassis
|
||||
command: GetHPEThermalConfig
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get HPE Fan Percent Minimum
|
||||
community.general.redfish_info:
|
||||
category: Chassis
|
||||
command: GetHPEFanPercentMin
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -340,7 +356,7 @@ CATEGORY_COMMANDS_ALL = {
|
||||
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
|
||||
"GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia"],
|
||||
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
|
||||
"GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
|
||||
"GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"],
|
||||
"Accounts": ["ListUsers"],
|
||||
"Sessions": ["GetSessions"],
|
||||
"Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory",
|
||||
@@ -482,6 +498,10 @@ def main():
|
||||
result["chassis"] = rf_utils.get_chassis_inventory()
|
||||
elif command == "GetHealthReport":
|
||||
result["health_report"] = rf_utils.get_multi_chassis_health_report()
|
||||
elif command == "GetHPEThermalConfig":
|
||||
result["hpe_thermal_config"] = rf_utils.get_hpe_thermal_config()
|
||||
elif command == "GetHPEFanPercentMin":
|
||||
result["hpe_fan_percent_min"] = rf_utils.get_hpe_fan_percent_min()
|
||||
|
||||
elif category == "Accounts":
|
||||
# execute only if we find an Account service resource
|
||||
|
||||
@@ -13,18 +13,38 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redhat_subscription
|
||||
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
|
||||
short_description: Manage registration and subscriptions to RHSM using C(subscription-manager)
|
||||
description:
|
||||
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
|
||||
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command,
|
||||
registering using D-Bus if possible.
|
||||
author: "Barnaby Court (@barnabycourt)"
|
||||
notes:
|
||||
- |
|
||||
The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager))
|
||||
to register, starting from community.general 6.5.0: this is done so credentials
|
||||
(username, password, activation keys) can be passed to C(rhsm) in a secure way.
|
||||
C(subscription-manager) itself gets credentials only as arguments of command line
|
||||
parameters, which is I(not) secure, as they can be easily stolen by checking the
|
||||
process listing on the system. Due to limitations of the D-Bus interface of C(rhsm),
|
||||
the module will I(not) use D-Bus for registation when trying either to register
|
||||
using I(token), or when specifying I(environment), or when the system is old
|
||||
(typically RHEL 6 and older).
|
||||
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
|
||||
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
|
||||
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
|
||||
I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
|
||||
config file and default to None.
|
||||
- It is possible to interact with C(subscription-manager) only as root,
|
||||
so root permissions are required to successfully run this module.
|
||||
- Since community.general 6.5.0, credentials (that is, I(username) and I(password),
|
||||
I(activationkey), or I(token)) are needed only in case the the system is not registered,
|
||||
or I(force_register) is specified; this makes it possible to use the module to tweak an
|
||||
already registered system, for example attaching pools to it (using I(pool), or I(pool_ids)),
|
||||
and modifying the C(syspurpose) attributes (using I(syspurpose)).
|
||||
requirements:
|
||||
- subscription-manager
|
||||
- Optionally the C(dbus) Python library; this is usually included in the OS
|
||||
as it is used by C(subscription-manager).
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
@@ -288,7 +308,7 @@ subscribed_pool_ids:
|
||||
'''
|
||||
|
||||
from os.path import isfile
|
||||
from os import unlink
|
||||
from os import getuid, unlink
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
@@ -297,6 +317,7 @@ import json
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from ansible.module_utils import distro
|
||||
|
||||
|
||||
SUBMAN_CMD = None
|
||||
@@ -410,13 +431,66 @@ class Rhsm(RegistrationBase):
|
||||
else:
|
||||
return False
|
||||
|
||||
def register(self, username, password, token, auto_attach, activationkey, org_id,
|
||||
def _can_connect_to_dbus(self):
|
||||
"""
|
||||
Checks whether it is possible to connect to the system D-Bus bus.
|
||||
|
||||
:returns: bool -- whether it is possible to connect to the system D-Bus bus.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Technically speaking, subscription-manager uses dbus-python
|
||||
# as D-Bus library, so this ought to work; better be safe than
|
||||
# sorry, I guess...
|
||||
import dbus
|
||||
except ImportError:
|
||||
self.module.debug('dbus Python module not available, will use CLI')
|
||||
return False
|
||||
|
||||
try:
|
||||
bus = dbus.SystemBus()
|
||||
msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test')
|
||||
bus.send_message(msg)
|
||||
bus.flush()
|
||||
|
||||
except dbus.exceptions.DBusException as e:
|
||||
self.module.debug('Failed to connect to system D-Bus bus, will use CLI: %s' % e)
|
||||
return False
|
||||
|
||||
self.module.debug('Verified system D-Bus bus as usable')
|
||||
return True
|
||||
|
||||
def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register, environment,
|
||||
release):
|
||||
'''
|
||||
Register the current system to the provided RHSM or Red Hat Satellite
|
||||
or Katello server
|
||||
|
||||
Raises:
|
||||
* Exception - if any error occurs during the registration
|
||||
'''
|
||||
# There is no support for token-based registration in the D-Bus API
|
||||
# of rhsm, so always use the CLI in that case;
|
||||
# also, since the specified environments are names, and the D-Bus APIs
|
||||
# require IDs for the environments, use the CLI also in that case
|
||||
if not token and not environment and self._can_connect_to_dbus():
|
||||
self._register_using_dbus(was_registered, username, password, auto_attach,
|
||||
activationkey, org_id, consumer_type,
|
||||
consumer_name, consumer_id,
|
||||
force_register, environment, release)
|
||||
return
|
||||
self._register_using_cli(username, password, token, auto_attach,
|
||||
activationkey, org_id, consumer_type,
|
||||
consumer_name, consumer_id,
|
||||
force_register, environment, release)
|
||||
|
||||
def _register_using_cli(self, username, password, token, auto_attach,
|
||||
activationkey, org_id, consumer_type, consumer_name,
|
||||
consumer_id, force_register, environment, release):
|
||||
'''
|
||||
Register using the 'subscription-manager' command
|
||||
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
@@ -459,6 +533,190 @@ class Rhsm(RegistrationBase):
|
||||
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
def _register_using_dbus(self, was_registered, username, password, auto_attach,
|
||||
activationkey, org_id, consumer_type, consumer_name,
|
||||
consumer_id, force_register, environment, release):
|
||||
'''
|
||||
Register using D-Bus (connecting to the rhsm service)
|
||||
|
||||
Raises:
|
||||
* Exception - if error occurs during the D-Bus communication
|
||||
'''
|
||||
import dbus
|
||||
|
||||
SUBSCRIPTION_MANAGER_LOCALE = 'C'
|
||||
# Seconds to wait for Registration to complete over DBus;
|
||||
# 10 minutes should be a pretty generous timeout.
|
||||
REGISTRATION_TIMEOUT = 600
|
||||
|
||||
def str2int(s, default=0):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return default
|
||||
|
||||
distro_id = distro.id()
|
||||
distro_version_parts = distro.version_parts()
|
||||
distro_version = tuple(str2int(p) for p in distro_version_parts)
|
||||
|
||||
# Stop the rhsm service when using systemd (which means Fedora or
|
||||
# RHEL 7+): this is because the service may not use new configuration bits
|
||||
# - with subscription-manager < 1.26.5-1 (in RHEL < 8.2);
|
||||
# fixed later by https://github.com/candlepin/subscription-manager/pull/2175
|
||||
# - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296
|
||||
if distro_id == 'fedora' or distro_version[0] >= 7:
|
||||
cmd = ['systemctl', 'stop', 'rhsm']
|
||||
self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
# While there is a 'force' options for the registration, it is actually
|
||||
# not implemented (and thus it does not work)
|
||||
# - in RHEL 7 and earlier
|
||||
# - in RHEL 8 before 8.8: https://bugzilla.redhat.com/show_bug.cgi?id=2118486
|
||||
# - in RHEL 9 before 9.2: https://bugzilla.redhat.com/show_bug.cgi?id=2121350
|
||||
# Hence, use it only when implemented, manually unregistering otherwise.
|
||||
# Match it on RHEL, since we know about it; other distributions
|
||||
# will need their own logic.
|
||||
dbus_force_option_works = False
|
||||
if (distro_id == 'rhel' and
|
||||
((distro_version[0] == 8 and distro_version[1] >= 8) or
|
||||
(distro_version[0] == 9 and distro_version[1] >= 2) or
|
||||
distro_version[0] > 9)):
|
||||
dbus_force_option_works = True
|
||||
|
||||
if force_register and not dbus_force_option_works and was_registered:
|
||||
self.unregister()
|
||||
|
||||
register_opts = {}
|
||||
if consumer_type:
|
||||
register_opts['consumer_type'] = consumer_type
|
||||
if consumer_name:
|
||||
register_opts['name'] = consumer_name
|
||||
if consumer_id:
|
||||
register_opts['consumerid'] = consumer_id
|
||||
if environment:
|
||||
# The option for environments used to be 'environment' in versions
|
||||
# of RHEL before 8.6, and then it changed to 'environments'; since
|
||||
# the Register*() D-Bus functions reject unknown options, we have
|
||||
# to pass the right option depending on the version -- funky.
|
||||
def supports_option_environments():
|
||||
# subscription-manager in any supported Fedora version
|
||||
# has the new option.
|
||||
if distro_id == 'fedora':
|
||||
return True
|
||||
# Check for RHEL 8 >= 8.6, or RHEL >= 9.
|
||||
if distro_id == 'rhel' and \
|
||||
((distro_version[0] == 8 and distro_version[1] >= 6) or
|
||||
distro_version[0] >= 9):
|
||||
return True
|
||||
# CentOS: similar checks as for RHEL, with one extra bit:
|
||||
# if the 2nd part of the version is empty, it means it is
|
||||
# CentOS Stream, and thus we can assume it has the latest
|
||||
# version of subscription-manager.
|
||||
if distro_id == 'centos' and \
|
||||
((distro_version[0] == 8 and
|
||||
(distro_version[1] >= 6 or distro_version_parts[1] == '')) or
|
||||
distro_version[0] >= 9):
|
||||
return True
|
||||
# Unknown or old distro: assume it does not support
|
||||
# the new option.
|
||||
return False
|
||||
|
||||
environment_key = 'environment'
|
||||
if supports_option_environments():
|
||||
environment_key = 'environments'
|
||||
register_opts[environment_key] = environment
|
||||
if force_register and dbus_force_option_works and was_registered:
|
||||
register_opts['force'] = True
|
||||
# Wrap it as proper D-Bus dict
|
||||
register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1)
|
||||
|
||||
connection_opts = {}
|
||||
# Wrap it as proper D-Bus dict
|
||||
connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1)
|
||||
|
||||
bus = dbus.SystemBus()
|
||||
register_server = bus.get_object('com.redhat.RHSM1',
|
||||
'/com/redhat/RHSM1/RegisterServer')
|
||||
address = register_server.Start(
|
||||
SUBSCRIPTION_MANAGER_LOCALE,
|
||||
dbus_interface='com.redhat.RHSM1.RegisterServer',
|
||||
)
|
||||
|
||||
try:
|
||||
# Use the private bus to register the system
|
||||
self.module.debug('Connecting to the private DBus')
|
||||
private_bus = dbus.connection.Connection(address)
|
||||
|
||||
try:
|
||||
if activationkey:
|
||||
args = (
|
||||
org_id,
|
||||
[activationkey],
|
||||
register_opts,
|
||||
connection_opts,
|
||||
SUBSCRIPTION_MANAGER_LOCALE,
|
||||
)
|
||||
private_bus.call_blocking(
|
||||
'com.redhat.RHSM1',
|
||||
'/com/redhat/RHSM1/Register',
|
||||
'com.redhat.RHSM1.Register',
|
||||
'RegisterWithActivationKeys',
|
||||
'sasa{sv}a{sv}s',
|
||||
args,
|
||||
timeout=REGISTRATION_TIMEOUT,
|
||||
)
|
||||
else:
|
||||
args = (
|
||||
org_id or '',
|
||||
username,
|
||||
password,
|
||||
register_opts,
|
||||
connection_opts,
|
||||
SUBSCRIPTION_MANAGER_LOCALE,
|
||||
)
|
||||
private_bus.call_blocking(
|
||||
'com.redhat.RHSM1',
|
||||
'/com/redhat/RHSM1/Register',
|
||||
'com.redhat.RHSM1.Register',
|
||||
'Register',
|
||||
'sssa{sv}a{sv}s',
|
||||
args,
|
||||
timeout=REGISTRATION_TIMEOUT,
|
||||
)
|
||||
|
||||
except dbus.exceptions.DBusException as e:
|
||||
# Sometimes we get NoReply but the registration has succeeded.
|
||||
# Check the registration status before deciding if this is an error.
|
||||
if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply':
|
||||
if not self.is_registered():
|
||||
# Host is not registered so re-raise the error
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
# Host was registered so continue
|
||||
finally:
|
||||
# Always shut down the private bus
|
||||
self.module.debug('Shutting down private DBus instance')
|
||||
register_server.Stop(
|
||||
SUBSCRIPTION_MANAGER_LOCALE,
|
||||
dbus_interface='com.redhat.RHSM1.RegisterServer',
|
||||
)
|
||||
|
||||
# Make sure to refresh all the local data: this will fetch all the
|
||||
# certificates, update redhat.repo, etc.
|
||||
self.module.run_command([SUBMAN_CMD, 'refresh'],
|
||||
check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
if auto_attach:
|
||||
args = [SUBMAN_CMD, 'attach', '--auto']
|
||||
self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
# There is no support for setting the release via D-Bus, so invoke
|
||||
# the CLI for this.
|
||||
if release:
|
||||
args = [SUBMAN_CMD, 'release', '--set', release]
|
||||
self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
def unsubscribe(self, serials=None):
|
||||
'''
|
||||
Unsubscribe a system from subscribed channels
|
||||
@@ -853,9 +1111,14 @@ def main():
|
||||
['activationkey', 'environment'],
|
||||
['activationkey', 'auto_attach'],
|
||||
['pool', 'pool_ids']],
|
||||
required_if=[['state', 'present', ['username', 'activationkey', 'token'], True]],
|
||||
required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]],
|
||||
)
|
||||
|
||||
if getuid() != 0:
|
||||
module.fail_json(
|
||||
msg="Interacting with subscription-manager requires root permissions ('become: true')"
|
||||
)
|
||||
|
||||
rhsm.module = module
|
||||
state = module.params['state']
|
||||
username = module.params['username']
|
||||
@@ -907,8 +1170,11 @@ def main():
|
||||
# Ensure system is registered
|
||||
if state == 'present':
|
||||
|
||||
# Cache the status of the system before the changes
|
||||
was_registered = rhsm.is_registered
|
||||
|
||||
# Register system
|
||||
if rhsm.is_registered and not force_register:
|
||||
if was_registered and not force_register:
|
||||
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
|
||||
try:
|
||||
rhsm.sync_syspurpose()
|
||||
@@ -930,10 +1196,12 @@ def main():
|
||||
else:
|
||||
module.exit_json(changed=False, msg="System already registered.")
|
||||
else:
|
||||
if not username and not activationkey and not token:
|
||||
module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token")
|
||||
try:
|
||||
rhsm.enable()
|
||||
rhsm.configure(**module.params)
|
||||
rhsm.register(username, password, token, auto_attach, activationkey, org_id,
|
||||
rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register,
|
||||
environment, release)
|
||||
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
|
||||
|
||||
@@ -18,6 +18,8 @@ notes:
|
||||
- This module will fail on an unregistered system.
|
||||
Use the C(redhat_subscription) module to register a system
|
||||
prior to setting the RHSM release.
|
||||
- It is possible to interact with C(subscription-manager) only as root,
|
||||
so root permissions are required to successfully run this module.
|
||||
requirements:
|
||||
- Red Hat Enterprise Linux 6+ with subscription-manager installed
|
||||
extends_documentation_fragment:
|
||||
@@ -30,8 +32,8 @@ attributes:
|
||||
options:
|
||||
release:
|
||||
description:
|
||||
- RHSM release version to use (use null to unset)
|
||||
required: true
|
||||
- RHSM release version to use.
|
||||
- To unset either pass C(null) for this option, or omit this option.
|
||||
type: str
|
||||
author:
|
||||
- Sean Myers (@seandst)
|
||||
@@ -41,17 +43,17 @@ EXAMPLES = '''
|
||||
# Set release version to 7.1
|
||||
- name: Set RHSM release version
|
||||
community.general.rhsm_release:
|
||||
release: "7.1"
|
||||
release: "7.1"
|
||||
|
||||
# Set release version to 6Server
|
||||
- name: Set RHSM release version
|
||||
community.general.rhsm_release:
|
||||
release: "6Server"
|
||||
release: "6Server"
|
||||
|
||||
# Unset release version
|
||||
- name: Unset RHSM release release
|
||||
community.general.rhsm_release:
|
||||
release: null
|
||||
release: null
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -63,6 +65,7 @@ current_release:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
# Matches release-like values such as 7.2, 5.10, 6Server, 8
|
||||
@@ -104,11 +107,16 @@ def set_release(module, release):
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
release=dict(type='str', required=True),
|
||||
release=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if os.getuid() != 0:
|
||||
module.fail_json(
|
||||
msg="Interacting with subscription-manager requires root permissions ('become: true')"
|
||||
)
|
||||
|
||||
target_release = module.params['release']
|
||||
|
||||
# sanity check: the target release at least looks like a valid release
|
||||
|
||||
@@ -19,6 +19,8 @@ author: Giovanni Sciortino (@giovannisciortino)
|
||||
notes:
|
||||
- In order to manage RHSM repositories the system must be already registered
|
||||
to RHSM manually or using the Ansible C(redhat_subscription) module.
|
||||
- It is possible to interact with C(subscription-manager) only as root,
|
||||
so root permissions are required to successfully run this module.
|
||||
|
||||
requirements:
|
||||
- subscription-manager
|
||||
@@ -100,9 +102,7 @@ def run_subscription_manager(module, arguments):
|
||||
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
|
||||
|
||||
if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
|
||||
module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
|
||||
elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
|
||||
if rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
|
||||
module.fail_json(msg='This system has no repositories available through subscriptions')
|
||||
elif rc == 1:
|
||||
module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
|
||||
@@ -243,6 +243,12 @@ def main():
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if os.getuid() != 0:
|
||||
module.fail_json(
|
||||
msg="Interacting with subscription-manager requires root permissions ('become: true')"
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
purge = module.params['purge']
|
||||
|
||||
@@ -36,22 +36,10 @@ options:
|
||||
description:
|
||||
- Sets the project name.
|
||||
required: true
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Sets the rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Sets the API version used by module.
|
||||
- API version must be at least 14.
|
||||
default: 14
|
||||
token:
|
||||
type: str
|
||||
api_token:
|
||||
description:
|
||||
- Sets the token to authenticate against Rundeck API.
|
||||
required: true
|
||||
aliases: ["token"]
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
@@ -82,8 +70,9 @@ options:
|
||||
validate_certs:
|
||||
version_added: '0.2.0'
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- community.general.rundeck
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -107,7 +96,7 @@ EXAMPLES = '''
|
||||
|
||||
- name: Remove a rundeck system policy
|
||||
community.general.rundeck_acl_policy:
|
||||
name: "Project_02"
|
||||
name: "Project_01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
state: absent
|
||||
@@ -129,49 +118,25 @@ after:
|
||||
'''
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
import json
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request,
|
||||
)
|
||||
|
||||
|
||||
class RundeckACLManager:
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def handle_http_code_if_needed(self, infos):
|
||||
if infos["status"] == 403:
|
||||
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
|
||||
"permissions.", rundeck_response=infos["body"])
|
||||
elif infos["status"] >= 500:
|
||||
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
|
||||
|
||||
def request_rundeck_api(self, query, data=None, method="GET"):
|
||||
resp, info = fetch_url(self.module,
|
||||
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": self.module.params["token"]
|
||||
})
|
||||
|
||||
self.handle_http_code_if_needed(info)
|
||||
if resp is not None:
|
||||
resp = resp.read()
|
||||
if resp != b"":
|
||||
try:
|
||||
json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
|
||||
return json_resp, info
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
|
||||
"Object was: %s" % (str(e), resp))
|
||||
return resp, info
|
||||
|
||||
def get_acl(self):
|
||||
resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
def create_or_update_acl(self):
|
||||
@@ -181,9 +146,12 @@ class RundeckACLManager:
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
|
||||
|
||||
dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="POST",
|
||||
data={"contents": self.module.params["policy"]})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="POST",
|
||||
data={"contents": self.module.params["policy"]},
|
||||
)
|
||||
|
||||
if info["status"] == 201:
|
||||
self.module.exit_json(changed=True, before={}, after=self.get_acl())
|
||||
@@ -202,9 +170,12 @@ class RundeckACLManager:
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before=facts, after=facts)
|
||||
|
||||
dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="PUT",
|
||||
data={"contents": self.module.params["policy"]})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="PUT",
|
||||
data={"contents": self.module.params["policy"]},
|
||||
)
|
||||
|
||||
if info["status"] == 200:
|
||||
self.module.exit_json(changed=True, before=facts, after=self.get_acl())
|
||||
@@ -216,34 +187,39 @@ class RundeckACLManager:
|
||||
|
||||
def remove_acl(self):
|
||||
facts = self.get_acl()
|
||||
|
||||
if facts is None:
|
||||
self.module.exit_json(changed=False, before={}, after={})
|
||||
else:
|
||||
# If not in check mode, remove the project
|
||||
if not self.module.check_mode:
|
||||
self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="DELETE",
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
|
||||
|
||||
def main():
|
||||
# Also allow the user to set values for fetch_url
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(required=True, type='str'),
|
||||
url=dict(required=True, type='str'),
|
||||
api_version=dict(type='int', default=14),
|
||||
token=dict(required=True, type='str', no_log=True),
|
||||
policy=dict(type='str'),
|
||||
project=dict(type='str'),
|
||||
))
|
||||
|
||||
argument_spec['api_token']['aliases'] = ['token']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
['state', 'present', ['policy']],
|
||||
],
|
||||
supports_check_mode=True
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
|
||||
|
||||
@@ -38,22 +38,10 @@ options:
|
||||
description:
|
||||
- Sets the project name.
|
||||
required: true
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Sets the rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Sets the API version used by module.
|
||||
- API version must be at least 14.
|
||||
default: 14
|
||||
token:
|
||||
type: str
|
||||
api_token:
|
||||
description:
|
||||
- Sets the token to authenticate against Rundeck API.
|
||||
required: true
|
||||
aliases: ["token"]
|
||||
client_cert:
|
||||
version_added: '0.2.0'
|
||||
client_key:
|
||||
@@ -73,24 +61,27 @@ options:
|
||||
validate_certs:
|
||||
version_added: '0.2.0'
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- community.general.rundeck
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a rundeck project
|
||||
community.general.rundeck_project:
|
||||
name: "Project_01"
|
||||
api_version: 18
|
||||
label: "Project 01"
|
||||
description: "My Project 01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
state: present
|
||||
|
||||
- name: Remove a rundeck project
|
||||
community.general.rundeck_project:
|
||||
name: "Project_02"
|
||||
name: "Project_01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
api_token: "mytoken"
|
||||
state: absent
|
||||
'''
|
||||
|
||||
@@ -111,60 +102,47 @@ after:
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
import json
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request,
|
||||
)
|
||||
|
||||
|
||||
class RundeckProjectManager(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def handle_http_code_if_needed(self, infos):
|
||||
if infos["status"] == 403:
|
||||
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
|
||||
"permissions.", rundeck_response=infos["body"])
|
||||
elif infos["status"] >= 500:
|
||||
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
|
||||
|
||||
def request_rundeck_api(self, query, data=None, method="GET"):
|
||||
resp, info = fetch_url(self.module,
|
||||
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": self.module.params["token"]
|
||||
})
|
||||
|
||||
self.handle_http_code_if_needed(info)
|
||||
if resp is not None:
|
||||
resp = resp.read()
|
||||
if resp != "":
|
||||
try:
|
||||
json_resp = json.loads(resp)
|
||||
return json_resp, info
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
|
||||
"Object was: %s" % (to_native(e), resp))
|
||||
return resp, info
|
||||
|
||||
def get_project_facts(self):
|
||||
resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="project/%s" % self.module.params["name"],
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
def create_or_update_project(self):
|
||||
facts = self.get_project_facts()
|
||||
|
||||
if facts is None:
|
||||
# If in check mode don't create project, simulate a fake project creation
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
before={},
|
||||
after={
|
||||
"name": self.module.params["name"]
|
||||
},
|
||||
)
|
||||
|
||||
resp, info = self.request_rundeck_api("projects", method="POST", data={
|
||||
"name": self.module.params["name"],
|
||||
"config": {}
|
||||
})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="projects",
|
||||
method="POST",
|
||||
data={
|
||||
"name": self.module.params["name"],
|
||||
"config": {},
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 201:
|
||||
self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
|
||||
@@ -181,21 +159,25 @@ class RundeckProjectManager(object):
|
||||
else:
|
||||
# If not in check mode, remove the project
|
||||
if not self.module.check_mode:
|
||||
self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="project/%s" % self.module.params["name"],
|
||||
method="DELETE",
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
|
||||
|
||||
def main():
|
||||
# Also allow the user to set values for fetch_url
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(required=True, type='str'),
|
||||
url=dict(required=True, type='str'),
|
||||
api_version=dict(type='int', default=14),
|
||||
token=dict(required=True, type='str', no_log=True),
|
||||
))
|
||||
|
||||
argument_spec['api_token']['aliases'] = ['token']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
|
||||
@@ -98,7 +98,7 @@ _state_map = dict(
|
||||
|
||||
|
||||
class SnapAlias(StateModuleHelper):
|
||||
_RE_ALIAS_LIST = re.compile(r"^(?P<snap>[\w-]+)\s+(?P<alias>[\w-]+)\s+.*$")
|
||||
_RE_ALIAS_LIST = re.compile(r"^(?P<snap>\S+)\s+(?P<alias>[\w-]+)\s+.*$")
|
||||
|
||||
module = dict(
|
||||
argument_spec={
|
||||
@@ -142,7 +142,10 @@ class SnapAlias(StateModuleHelper):
|
||||
return results
|
||||
|
||||
with self.runner("state name", check_rc=True, output_process=process) as ctx:
|
||||
return ctx.run(state="info")
|
||||
aliases = ctx.run(state="info")
|
||||
if self.verbosity >= 4:
|
||||
self.vars.get_aliases_run_info = ctx.run_info
|
||||
return aliases
|
||||
|
||||
def _get_aliases_for(self, name):
|
||||
return self._get_aliases().get(name, [])
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
@@ -83,7 +84,14 @@ options:
|
||||
proxycommand:
|
||||
description:
|
||||
- Sets the C(ProxyCommand) option.
|
||||
- Mutually exclusive with I(proxyjump).
|
||||
type: str
|
||||
proxyjump:
|
||||
description:
|
||||
- Sets the C(ProxyJump) option.
|
||||
- Mutually exclusive with I(proxycommand).
|
||||
type: str
|
||||
version_added: 6.5.0
|
||||
forward_agent:
|
||||
description:
|
||||
- Sets the C(ForwardAgent) option.
|
||||
@@ -101,7 +109,7 @@ options:
|
||||
type: str
|
||||
version_added: 6.1.0
|
||||
requirements:
|
||||
- StormSSH
|
||||
- paramiko
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -160,26 +168,20 @@ hosts_change_diff:
|
||||
'''
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
STORM_IMP_ERR = None
|
||||
try:
|
||||
from storm.parsers.ssh_config_parser import ConfigParser
|
||||
HAS_STORM = True
|
||||
except ImportError:
|
||||
HAS_STORM = False
|
||||
STORM_IMP_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR
|
||||
from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file
|
||||
|
||||
|
||||
class SSHConfig():
|
||||
class SSHConfig(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
if not HAS_PARAMIKO:
|
||||
module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR)
|
||||
self.params = module.params
|
||||
self.user = self.params.get('user')
|
||||
self.group = self.params.get('group') or self.user
|
||||
@@ -215,6 +217,7 @@ class SSHConfig():
|
||||
strict_host_key_checking=self.params.get('strict_host_key_checking'),
|
||||
user_known_hosts_file=self.params.get('user_known_hosts_file'),
|
||||
proxycommand=self.params.get('proxycommand'),
|
||||
proxyjump=self.params.get('proxyjump'),
|
||||
host_key_algorithms=self.params.get('host_key_algorithms'),
|
||||
)
|
||||
|
||||
@@ -265,7 +268,8 @@ class SSHConfig():
|
||||
try:
|
||||
self.config.write_to_ssh_config()
|
||||
except PermissionError as perm_exec:
|
||||
self.module.fail_json(msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec)))
|
||||
self.module.fail_json(
|
||||
msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec)))
|
||||
# Make sure we set the permission
|
||||
perm_mode = '0600'
|
||||
if self.config_file == '/etc/ssh/ssh_config':
|
||||
@@ -310,6 +314,7 @@ def main():
|
||||
identity_file=dict(type='path'),
|
||||
port=dict(type='str'),
|
||||
proxycommand=dict(type='str', default=None),
|
||||
proxyjump=dict(type='str', default=None),
|
||||
forward_agent=dict(type='bool'),
|
||||
remote_user=dict(type='str'),
|
||||
ssh_config_file=dict(default=None, type='path'),
|
||||
@@ -324,13 +329,10 @@ def main():
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['user', 'ssh_config_file'],
|
||||
['proxycommand', 'proxyjump'],
|
||||
],
|
||||
)
|
||||
|
||||
if not HAS_STORM:
|
||||
module.fail_json(changed=False, msg=missing_required_lib("stormssh"),
|
||||
exception=STORM_IMP_ERR)
|
||||
|
||||
ssh_config_obj = SSHConfig(module)
|
||||
ssh_config_obj.ensure_state()
|
||||
|
||||
|
||||
@@ -218,7 +218,7 @@ EXAMPLES = """
|
||||
community.general.terraform:
|
||||
project_path: '{{ project_dir }}'
|
||||
state: present
|
||||
camplex_vars: true
|
||||
complex_vars: true
|
||||
variables:
|
||||
vm_name: "{{ inventory_hostname }}"
|
||||
vm_vcpus: 2
|
||||
@@ -312,11 +312,11 @@ def preflight_validation(bin_path, project_path, version, variables_args=None, p
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
if state_file and os.path.exists(state_file):
|
||||
return ['-state', state_file]
|
||||
if state_file and not os.path.exists(state_file):
|
||||
module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
|
||||
return []
|
||||
if not state_file:
|
||||
return []
|
||||
if not os.path.exists(state_file):
|
||||
module.warn('Could not find state_file "{0}", the process will not destroy any resources, please check your state file path.'.format(state_file))
|
||||
return ['-state', state_file]
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace):
|
||||
|
||||
@@ -97,19 +97,9 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''#'''
|
||||
|
||||
HAVE_UNIVENTION = False
|
||||
HAVE_IPADDRESS = False
|
||||
try:
|
||||
from univention.admin.handlers.dns import (
|
||||
forward_zone,
|
||||
reverse_zone,
|
||||
)
|
||||
HAVE_UNIVENTION = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils import deps
|
||||
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
|
||||
umc_module_for_add,
|
||||
umc_module_for_edit,
|
||||
@@ -118,27 +108,26 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i
|
||||
config,
|
||||
uldap,
|
||||
)
|
||||
try:
|
||||
|
||||
|
||||
with deps.declare("univention", msg="This module requires univention python bindings"):
|
||||
from univention.admin.handlers.dns import (
|
||||
forward_zone,
|
||||
reverse_zone,
|
||||
)
|
||||
|
||||
with deps.declare("ipaddress"):
|
||||
import ipaddress
|
||||
HAVE_IPADDRESS = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
type=dict(required=True,
|
||||
type='str'),
|
||||
zone=dict(required=True,
|
||||
type='str'),
|
||||
name=dict(required=True,
|
||||
type='str'),
|
||||
data=dict(default={},
|
||||
type='dict'),
|
||||
state=dict(default='present',
|
||||
choices=['present', 'absent'],
|
||||
type='str')
|
||||
type=dict(required=True, type='str'),
|
||||
zone=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
data=dict(default={}, type='dict'),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_if=([
|
||||
@@ -146,8 +135,7 @@ def main():
|
||||
])
|
||||
)
|
||||
|
||||
if not HAVE_UNIVENTION:
|
||||
module.fail_json(msg="This module requires univention python bindings")
|
||||
deps.validate(module, "univention")
|
||||
|
||||
type = module.params['type']
|
||||
zone = module.params['zone']
|
||||
@@ -159,8 +147,8 @@ def main():
|
||||
|
||||
workname = name
|
||||
if type == 'ptr_record':
|
||||
if not HAVE_IPADDRESS:
|
||||
module.fail_json(msg=missing_required_lib('ipaddress'))
|
||||
deps.validate(module, "ipaddress")
|
||||
|
||||
try:
|
||||
if 'arpa' not in zone:
|
||||
raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)")
|
||||
@@ -196,7 +184,7 @@ def main():
|
||||
'(zoneName={0})'.format(zone),
|
||||
scope='domain',
|
||||
)
|
||||
if len(so) == 0:
|
||||
if not so == 0:
|
||||
raise Exception("Did not find zone '{0}' in Univention".format(zone))
|
||||
obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
|
||||
else:
|
||||
|
||||
@@ -58,9 +58,6 @@ options:
|
||||
- Should the login data be stripped when proxying the request to the backend host
|
||||
type: bool
|
||||
default: true
|
||||
choices:
|
||||
- True
|
||||
- False
|
||||
backend_user_prefix:
|
||||
type: str
|
||||
description:
|
||||
@@ -118,9 +115,6 @@ options:
|
||||
- Allow session persistency
|
||||
type: bool
|
||||
default: false
|
||||
choices:
|
||||
- True
|
||||
- False
|
||||
frontend_session_lifetime:
|
||||
type: int
|
||||
description:
|
||||
@@ -131,9 +125,6 @@ options:
|
||||
- Specifies if limitation of session lifetime is active
|
||||
type: bool
|
||||
default: true
|
||||
choices:
|
||||
- True
|
||||
- False
|
||||
frontend_session_lifetime_scope:
|
||||
type: str
|
||||
description:
|
||||
@@ -153,9 +144,6 @@ options:
|
||||
- Specifies if session timeout is active
|
||||
type: bool
|
||||
default: true
|
||||
choices:
|
||||
- True
|
||||
- False
|
||||
frontend_session_timeout_scope:
|
||||
type: str
|
||||
description:
|
||||
@@ -184,9 +172,6 @@ options:
|
||||
- Should a redirect to the requested URL be made
|
||||
type: bool
|
||||
default: false
|
||||
choices:
|
||||
- True
|
||||
- False
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.utm
|
||||
@@ -336,7 +321,7 @@ def main():
|
||||
aaa=dict(type='list', elements='str', required=True),
|
||||
basic_prompt=dict(type='str', required=True),
|
||||
backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
|
||||
backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]),
|
||||
backend_strip_basic_auth=dict(type='bool', required=False, default=True),
|
||||
backend_user_prefix=dict(type='str', required=False, default=""),
|
||||
backend_user_suffix=dict(type='str', required=False, default=""),
|
||||
comment=dict(type='str', required=False, default=""),
|
||||
@@ -348,16 +333,16 @@ def main():
|
||||
frontend_logout=dict(type='str', required=False),
|
||||
frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
|
||||
frontend_realm=dict(type='str', required=False),
|
||||
frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]),
|
||||
frontend_session_allow_persistency=dict(type='bool', required=False, default=False),
|
||||
frontend_session_lifetime=dict(type='int', required=True),
|
||||
frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]),
|
||||
frontend_session_lifetime_limited=dict(type='bool', required=False, default=True),
|
||||
frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
|
||||
frontend_session_timeout=dict(type='int', required=True),
|
||||
frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]),
|
||||
frontend_session_timeout_enabled=dict(type='bool', required=False, default=True),
|
||||
frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
|
||||
logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
|
||||
logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
|
||||
redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False])
|
||||
redirect_to_requested_url=dict(type='bool', required=False, default=False)
|
||||
)
|
||||
)
|
||||
try:
|
||||
|
||||
@@ -300,31 +300,35 @@ def main():
|
||||
prj_set = False
|
||||
break
|
||||
|
||||
if not prj_set and not module.check_mode:
|
||||
cmd = "project -s"
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Could not get quota realtime block report.", **result
|
||||
)
|
||||
if state == "present" and not prj_set:
|
||||
if not module.check_mode:
|
||||
cmd = "project -s %s" % name
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Could not get quota realtime block report.", **result
|
||||
)
|
||||
|
||||
result["changed"] = True
|
||||
|
||||
elif not prj_set and module.check_mode:
|
||||
result["changed"] = True
|
||||
elif state == "absent" and prj_set and name != quota_default:
|
||||
if not module.check_mode:
|
||||
cmd = "project -C %s" % name
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Failed to clear managed tree from project quota control.", **result
|
||||
)
|
||||
|
||||
# Set limits
|
||||
if state == "absent":
|
||||
bhard = 0
|
||||
bsoft = 0
|
||||
ihard = 0
|
||||
isoft = 0
|
||||
rtbhard = 0
|
||||
rtbsoft = 0
|
||||
result["changed"] = True
|
||||
|
||||
current_bsoft, current_bhard = quota_report(
|
||||
module, xfs_quota_bin, mountpoint, name, quota_type, "b"
|
||||
@@ -336,6 +340,23 @@ def main():
|
||||
module, xfs_quota_bin, mountpoint, name, quota_type, "rtb"
|
||||
)
|
||||
|
||||
# Set limits
|
||||
if state == "absent":
|
||||
bhard = 0
|
||||
bsoft = 0
|
||||
ihard = 0
|
||||
isoft = 0
|
||||
rtbhard = 0
|
||||
rtbsoft = 0
|
||||
|
||||
# Ensure that a non-existing quota does not trigger a change
|
||||
current_bsoft = current_bsoft if current_bsoft is not None else 0
|
||||
current_bhard = current_bhard if current_bhard is not None else 0
|
||||
current_isoft = current_isoft if current_isoft is not None else 0
|
||||
current_ihard = current_ihard if current_ihard is not None else 0
|
||||
current_rtbsoft = current_rtbsoft if current_rtbsoft is not None else 0
|
||||
current_rtbhard = current_rtbhard if current_rtbhard is not None else 0
|
||||
|
||||
result["xfs_quota"] = dict(
|
||||
bsoft=current_bsoft,
|
||||
bhard=current_bhard,
|
||||
@@ -370,25 +391,23 @@ def main():
|
||||
limit.append("rtbhard=%s" % rtbhard)
|
||||
result["rtbhard"] = int(rtbhard)
|
||||
|
||||
if len(limit) > 0 and not module.check_mode:
|
||||
if name == quota_default:
|
||||
cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
|
||||
else:
|
||||
cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
|
||||
if len(limit) > 0:
|
||||
if not module.check_mode:
|
||||
if name == quota_default:
|
||||
cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
|
||||
else:
|
||||
cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
|
||||
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(msg="Could not set limits.", **result)
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(msg="Could not set limits.", **result)
|
||||
|
||||
result["changed"] = True
|
||||
|
||||
elif len(limit) > 0 and module.check_mode:
|
||||
result["changed"] = True
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
|
||||
@@ -179,15 +179,11 @@ class Yarn(object):
|
||||
self.registry = kwargs['registry']
|
||||
self.production = kwargs['production']
|
||||
self.ignore_scripts = kwargs['ignore_scripts']
|
||||
self.executable = kwargs['executable']
|
||||
|
||||
# Specify a version of package if version arg passed in
|
||||
self.name_version = None
|
||||
|
||||
if kwargs['executable']:
|
||||
self.executable = kwargs['executable'].split(' ')
|
||||
else:
|
||||
self.executable = [module.get_bin_path('yarn', True)]
|
||||
|
||||
if kwargs['version'] and self.name is not None:
|
||||
self.name_version = self.name + '@' + str(self.version)
|
||||
elif self.name is not None:
|
||||
@@ -230,6 +226,15 @@ class Yarn(object):
|
||||
|
||||
return None, None
|
||||
|
||||
def _process_yarn_error(self, err):
|
||||
try:
|
||||
# We need to filter for errors, since Yarn warnings are included in stderr
|
||||
for line in err.splitlines():
|
||||
if json.loads(line)['type'] == 'error':
|
||||
self.module.fail_json(msg=err)
|
||||
except Exception:
|
||||
self.module.fail_json(msg="Unexpected stderr output from Yarn: %s" % err, stderr=err)
|
||||
|
||||
def list(self):
|
||||
cmd = ['list', '--depth=0', '--json']
|
||||
|
||||
@@ -244,8 +249,7 @@ class Yarn(object):
|
||||
# because it only only lists binaries, but `yarn global add` can install libraries too.
|
||||
result, error = self._exec(cmd, run_in_check_mode=True, check_rc=False, unsupported_with_global=True)
|
||||
|
||||
if error:
|
||||
self.module.fail_json(msg=error)
|
||||
self._process_yarn_error(error)
|
||||
|
||||
for json_line in result.strip().split('\n'):
|
||||
data = json.loads(json_line)
|
||||
@@ -283,9 +287,7 @@ class Yarn(object):
|
||||
cmd_result, err = self._exec(['outdated', '--json'], True, False, unsupported_with_global=True)
|
||||
|
||||
# the package.json in the global dir is missing a license field, so warnings are expected on stderr
|
||||
for line in err.splitlines():
|
||||
if json.loads(line)['type'] == 'error':
|
||||
self.module.fail_json(msg=err)
|
||||
self._process_yarn_error(err)
|
||||
|
||||
if not cmd_result:
|
||||
return outdated
|
||||
@@ -328,7 +330,6 @@ def main():
|
||||
version = module.params['version']
|
||||
globally = module.params['global']
|
||||
production = module.params['production']
|
||||
executable = module.params['executable']
|
||||
registry = module.params['registry']
|
||||
state = module.params['state']
|
||||
ignore_scripts = module.params['ignore_scripts']
|
||||
@@ -345,9 +346,14 @@ def main():
|
||||
if state == 'latest':
|
||||
version = 'latest'
|
||||
|
||||
if module.params['executable']:
|
||||
executable = module.params['executable'].split(' ')
|
||||
else:
|
||||
executable = [module.get_bin_path('yarn', True)]
|
||||
|
||||
# When installing globally, use the defined path for global node_modules
|
||||
if globally:
|
||||
_rc, out, _err = module.run_command([executable, 'global', 'dir'], check_rc=True)
|
||||
_rc, out, _err = module.run_command(executable + ['global', 'dir'], check_rc=True)
|
||||
path = out.strip()
|
||||
|
||||
yarn = Yarn(module,
|
||||
|
||||
@@ -64,7 +64,7 @@ EXAMPLES = r'''
|
||||
- name: Remove lock from Apache / httpd to be updated again
|
||||
community.general.yum_versionlock:
|
||||
state: absent
|
||||
package: httpd
|
||||
name: httpd
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
@@ -62,7 +62,7 @@ EXAMPLES = '''
|
||||
name: rpool/myfs
|
||||
state: present
|
||||
extra_zfs_properties:
|
||||
setuid: off
|
||||
setuid: 'off'
|
||||
|
||||
- name: Create a new volume called myvol in pool rpool.
|
||||
community.general.zfs:
|
||||
|
||||
@@ -72,6 +72,13 @@ options:
|
||||
type: str
|
||||
required: false
|
||||
version_added: 5.8.0
|
||||
use_tls:
|
||||
description:
|
||||
- Using TLS/SSL or not.
|
||||
type: bool
|
||||
default: false
|
||||
required: false
|
||||
version_added: '6.5.0'
|
||||
requirements:
|
||||
- kazoo >= 2.1
|
||||
- python >= 2.6
|
||||
@@ -155,6 +162,7 @@ def main():
|
||||
recursive=dict(default=False, type='bool'),
|
||||
auth_scheme=dict(default='digest', choices=['digest', 'sasl']),
|
||||
auth_credential=dict(type='str', no_log=True),
|
||||
use_tls=dict(default=False, type='bool'),
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
@@ -208,7 +216,7 @@ def check_params(params):
|
||||
class KazooCommandProxy():
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.zk = KazooClient(module.params['hosts'])
|
||||
self.zk = KazooClient(module.params['hosts'], use_ssl=module.params['use_tls'])
|
||||
|
||||
def absent(self):
|
||||
return self._absent(self.module.params['name'])
|
||||
|
||||
@@ -324,10 +324,11 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
|
||||
m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
|
||||
else:
|
||||
return {}, rc, stdout, stderr
|
||||
elif rc in [0, 106, 103]:
|
||||
elif rc in [0, 102, 103, 106]:
|
||||
# zypper exit codes
|
||||
# 0: success
|
||||
# 106: signature verification failed
|
||||
# 102: ZYPPER_EXIT_INF_REBOOT_NEEDED - Returned after a successful installation of a patch which requires reboot of computer.
|
||||
# 103: zypper was upgraded, run same command again
|
||||
if packages is None:
|
||||
firstrun = True
|
||||
@@ -587,12 +588,12 @@ def main():
|
||||
elif state in ['installed', 'present', 'latest']:
|
||||
packages_changed, retvals = package_present(module, name, state == 'latest')
|
||||
|
||||
retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
|
||||
retvals['changed'] = retvals['rc'] in [0, 102] and bool(packages_changed)
|
||||
|
||||
if module._diff:
|
||||
set_diff(module, retvals, packages_changed)
|
||||
|
||||
if retvals['rc'] != 0:
|
||||
if retvals['rc'] not in [0, 102]:
|
||||
module.fail_json(msg="Zypper run failed.", **retvals)
|
||||
|
||||
if not retvals['changed']:
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
integration_tests_dependencies:
|
||||
collections:
|
||||
- ansible.posix
|
||||
- community.crypto
|
||||
unit_tests_dependencies:
|
||||
- community.internal_test_tools
|
||||
- community.docker
|
||||
@@ -61,7 +61,7 @@
|
||||
device: en1
|
||||
attributes:
|
||||
mtu: 900
|
||||
arp: off
|
||||
arp: 'off'
|
||||
state: present
|
||||
|
||||
- name: Configure IP, netmask and set en1 up.
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
- include_tasks: tests.yml
|
||||
with_nested:
|
||||
- [ True, False ] # with_link
|
||||
- [ True, False ] # with_alternatives
|
||||
- [ true, false ] # with_link
|
||||
- [ true, false ] # with_alternatives
|
||||
- [ 'auto', 'manual' ] # mode
|
||||
loop_control:
|
||||
loop_var: test_conf
|
||||
@@ -34,7 +34,7 @@
|
||||
- include_tasks: tests_set_priority.yml
|
||||
with_sequence: start=3 end=4
|
||||
vars:
|
||||
with_alternatives: True
|
||||
with_alternatives: true
|
||||
mode: auto
|
||||
|
||||
- block:
|
||||
@@ -44,7 +44,7 @@
|
||||
- include_tasks: tests_set_priority.yml
|
||||
with_sequence: start=3 end=4
|
||||
vars:
|
||||
with_alternatives: False
|
||||
with_alternatives: false
|
||||
mode: auto
|
||||
|
||||
# Test that path is checked: alternatives must fail when path is nonexistent
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
name: dummy
|
||||
path: '/non/existent/path/there'
|
||||
link: '/usr/bin/dummy'
|
||||
ignore_errors: True
|
||||
ignore_errors: true
|
||||
register: alternative
|
||||
|
||||
- name: Check previous task failed
|
||||
|
||||
@@ -83,7 +83,7 @@
|
||||
- name: Execute the current dummysubcmd command
|
||||
command: dummysubcmd
|
||||
register: cmd
|
||||
ignore_errors: True
|
||||
ignore_errors: true
|
||||
|
||||
- name: Ensure that the subcommand is gone
|
||||
assert:
|
||||
@@ -166,7 +166,7 @@
|
||||
- name: Execute the current dummysubcmd command
|
||||
command: dummysubcmd
|
||||
register: cmd
|
||||
ignore_errors: True
|
||||
ignore_errors: true
|
||||
|
||||
- name: Ensure that the subcommand is gone
|
||||
assert:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user