mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 09:26:44 +00:00
Compare commits
203 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5914c1df8e | ||
|
|
f847531a35 | ||
|
|
df01cde23d | ||
|
|
cdd9ced441 | ||
|
|
dfb61b283d | ||
|
|
fe7b151a26 | ||
|
|
9b983fba86 | ||
|
|
2d27dbd9ea | ||
|
|
55cbccf0fc | ||
|
|
675860c392 | ||
|
|
5e8914e00c | ||
|
|
c0230342b4 | ||
|
|
ebf15447f0 | ||
|
|
86d10f53fd | ||
|
|
d679f51018 | ||
|
|
e9e494e1ff | ||
|
|
bf90b4e88a | ||
|
|
874d7f7050 | ||
|
|
3a19fbc89c | ||
|
|
2d8e1339d1 | ||
|
|
4892f954fa | ||
|
|
6fd58ba388 | ||
|
|
49967547df | ||
|
|
492170b414 | ||
|
|
25b9391fb4 | ||
|
|
1e30afa92f | ||
|
|
7d4e69cdf7 | ||
|
|
ff23e41c21 | ||
|
|
3ce7d2fc7e | ||
|
|
20a9d120aa | ||
|
|
f4a40592a1 | ||
|
|
3d5145e924 | ||
|
|
1ae5a2cd05 | ||
|
|
75aa353281 | ||
|
|
5fe10915a8 | ||
|
|
eca6494503 | ||
|
|
b12d422fcc | ||
|
|
33809395ab | ||
|
|
60fe6ebc3c | ||
|
|
957f3e6eca | ||
|
|
dbd918865f | ||
|
|
46a83df85e | ||
|
|
57f262504d | ||
|
|
29671cb54c | ||
|
|
fd91e94279 | ||
|
|
b001e36fb3 | ||
|
|
5f63476404 | ||
|
|
eff452e4a5 | ||
|
|
d393b16064 | ||
|
|
40b5967fc3 | ||
|
|
ccabc342b9 | ||
|
|
570f6a8791 | ||
|
|
b10cf1e357 | ||
|
|
acfe464a31 | ||
|
|
f32a8dc740 | ||
|
|
ea8f109056 | ||
|
|
7e7b84348b | ||
|
|
baf552337d | ||
|
|
3ff9161ab0 | ||
|
|
aa8728b22c | ||
|
|
590ff351b4 | ||
|
|
a5824a2a9d | ||
|
|
71975be3c1 | ||
|
|
564f87c775 | ||
|
|
6750a866c6 | ||
|
|
c6316c1153 | ||
|
|
2c825f04e7 | ||
|
|
8a33e070be | ||
|
|
d2a5fdfc71 | ||
|
|
b6b2419206 | ||
|
|
4c399f1c01 | ||
|
|
923d335646 | ||
|
|
be96c33257 | ||
|
|
e68b1017e3 | ||
|
|
44e4b1d202 | ||
|
|
24378fd944 | ||
|
|
c3e1715233 | ||
|
|
ec86ebed98 | ||
|
|
5aa2779a48 | ||
|
|
f6d15ec818 | ||
|
|
f0320b5ac9 | ||
|
|
79578e5db3 | ||
|
|
43beaf4b00 | ||
|
|
6b0cc3c1de | ||
|
|
81a95a347d | ||
|
|
8026337c44 | ||
|
|
8fb1aaaf53 | ||
|
|
fda610cc23 | ||
|
|
8ecb322816 | ||
|
|
1260a63241 | ||
|
|
f6aad230c2 | ||
|
|
f3a4e5ea27 | ||
|
|
e26eaa0565 | ||
|
|
3432be3a2f | ||
|
|
4f78e7979e | ||
|
|
8a0b7dcdc9 | ||
|
|
9cd3f37686 | ||
|
|
5e1508e0df | ||
|
|
be4d294b61 | ||
|
|
1d36db1806 | ||
|
|
16a18d1456 | ||
|
|
493fa405e2 | ||
|
|
25464602d9 | ||
|
|
8a670a3929 | ||
|
|
47b8df8019 | ||
|
|
9c411586ea | ||
|
|
e0465d1f48 | ||
|
|
ad6c28069a | ||
|
|
31a1c8e185 | ||
|
|
20bd0d130c | ||
|
|
e5ecaffa1a | ||
|
|
8fd89721cd | ||
|
|
b1231c1315 | ||
|
|
48b20894b3 | ||
|
|
1676b09573 | ||
|
|
94efecaf67 | ||
|
|
e9bc32528e | ||
|
|
8e84b3ef8e | ||
|
|
944bc78360 | ||
|
|
a2a81f11c3 | ||
|
|
1a72aea4c5 | ||
|
|
36eff2fd99 | ||
|
|
47514e1401 | ||
|
|
186b4200f6 | ||
|
|
6057c3c7c4 | ||
|
|
b12113c182 | ||
|
|
4860c20108 | ||
|
|
8efbd8172c | ||
|
|
8c1b7e3ddb | ||
|
|
ec228bf32f | ||
|
|
1d39bbefd9 | ||
|
|
59fb0cf506 | ||
|
|
6c4080e78d | ||
|
|
1a81760d47 | ||
|
|
c3db6343e5 | ||
|
|
e87c2c9eb4 | ||
|
|
90a1743acf | ||
|
|
eb6ef5ae2e | ||
|
|
89dd500159 | ||
|
|
9bab144d06 | ||
|
|
31eddc0ffe | ||
|
|
bc64c4035e | ||
|
|
3ed65a0a37 | ||
|
|
93008fd41c | ||
|
|
1462ed0b4a | ||
|
|
139fcdba88 | ||
|
|
e9b8692025 | ||
|
|
3d8049190c | ||
|
|
54025a2efc | ||
|
|
94015c2096 | ||
|
|
b1a711633b | ||
|
|
79d15d526a | ||
|
|
3aeaab2708 | ||
|
|
470f4e8c02 | ||
|
|
f7f79defab | ||
|
|
d4d1c847cf | ||
|
|
044c706d64 | ||
|
|
e22667b72f | ||
|
|
4fc0c9a6d8 | ||
|
|
93650233e4 | ||
|
|
a41d1851a5 | ||
|
|
bd4da7f2c0 | ||
|
|
bf853f6f35 | ||
|
|
5dc34829c4 | ||
|
|
061b861211 | ||
|
|
e4ce977079 | ||
|
|
4776ee20e3 | ||
|
|
fdfcd15960 | ||
|
|
b934e06569 | ||
|
|
5589bcb659 | ||
|
|
41af1c3693 | ||
|
|
3802d54922 | ||
|
|
ff4aff0bef | ||
|
|
86b19a2bf4 | ||
|
|
1fc53eea22 | ||
|
|
3ecbadf694 | ||
|
|
2181c2b090 | ||
|
|
0c295d4f61 | ||
|
|
db451bf68a | ||
|
|
91095240f4 | ||
|
|
e92908b66e | ||
|
|
ef09ea519c | ||
|
|
611e024550 | ||
|
|
94b4034fd2 | ||
|
|
1bfdee0830 | ||
|
|
9350954aa3 | ||
|
|
2f05cd3330 | ||
|
|
27906ca76b | ||
|
|
278b0607f5 | ||
|
|
00d1160b56 | ||
|
|
4266163c13 | ||
|
|
ec7f885e2f | ||
|
|
4f71c9384e | ||
|
|
53e5f51e57 | ||
|
|
83ff925417 | ||
|
|
8ff611089b | ||
|
|
4def87bc53 | ||
|
|
3d70bfa1e4 | ||
|
|
554ec94110 | ||
|
|
ab4f96105c | ||
|
|
d8cf32e6c4 | ||
|
|
ead9524dc3 | ||
|
|
5d5d403415 |
@@ -29,14 +29,14 @@ schedules:
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-7
|
||||
- stable-6
|
||||
- stable-5
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-4
|
||||
- stable-5
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -73,6 +73,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_15
|
||||
displayName: Sanity 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.15/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
@@ -112,19 +125,6 @@ stages:
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.11/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
@@ -143,6 +143,16 @@ stages:
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_15
|
||||
displayName: Units 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/units/{0}/1
|
||||
targets:
|
||||
- test: "3.10"
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
@@ -152,7 +162,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
@@ -176,17 +185,6 @@ stages:
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.8
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel_extra_vms
|
||||
@@ -215,20 +213,34 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: macOS 13.2
|
||||
test: macos/13.2
|
||||
- name: RHEL 9.1
|
||||
test: rhel/9.1
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
- name: FreeBSD 13.2
|
||||
test: freebsd/13.2
|
||||
- name: FreeBSD 12.4
|
||||
test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_15
|
||||
displayName: Remote 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/{0}
|
||||
targets:
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
@@ -279,22 +291,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
@@ -305,8 +301,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: openSUSE 15
|
||||
@@ -321,6 +315,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_15
|
||||
displayName: Docker 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
@@ -371,24 +379,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
@@ -402,7 +392,7 @@ stages:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
test: archlinux/3.11
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
@@ -422,6 +412,16 @@ stages:
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_15
|
||||
displayName: Generic 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
@@ -452,48 +452,37 @@ stages:
|
||||
testFormat: 2.12/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Generic_2_11
|
||||
displayName: Generic 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Sanity_2_15
|
||||
- Units_devel
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Units_2_15
|
||||
- Remote_devel_extra_vms
|
||||
- Remote_devel
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Remote_2_15
|
||||
- Docker_devel
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_2_15
|
||||
- Docker_community_devel
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_11
|
||||
# - Generic_2_12
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
# - Generic_2_15
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
37
.github/BOTMETA.yml
vendored
37
.github/BOTMETA.yml
vendored
@@ -241,6 +241,8 @@ files:
|
||||
$lookups/manifold.py:
|
||||
labels: manifold
|
||||
maintainers: galanoff
|
||||
$lookups/merge_variables.py:
|
||||
maintainers: rlenferink m-a-r-k-e
|
||||
$lookups/onepass:
|
||||
labels: onepassword
|
||||
maintainers: samdoran
|
||||
@@ -265,6 +267,8 @@ files:
|
||||
maintainers: delineaKrehl tylerezimmerman
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/btrfs.py:
|
||||
maintainers: gnfzdz
|
||||
$module_utils/deps.py:
|
||||
maintainers: russoz
|
||||
$module_utils/gconftool2.py:
|
||||
@@ -294,7 +298,6 @@ files:
|
||||
maintainers: $team_manageiq
|
||||
$module_utils/memset.py:
|
||||
labels: cloud memset
|
||||
maintainers: glitchcrab
|
||||
$module_utils/mh/:
|
||||
labels: module_helper
|
||||
maintainers: russoz
|
||||
@@ -394,6 +397,8 @@ files:
|
||||
maintainers: catcombo
|
||||
$modules/bower.py:
|
||||
maintainers: mwarkentin
|
||||
$modules/btrfs_:
|
||||
maintainers: gnfzdz
|
||||
$modules/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/bzr.py:
|
||||
@@ -440,7 +445,7 @@ files:
|
||||
labels: datadog_event
|
||||
maintainers: n0ts
|
||||
$modules/datadog_monitor.py:
|
||||
maintainers: skornehl
|
||||
ignore: skornehl
|
||||
$modules/dconf.py:
|
||||
maintainers: azaghal
|
||||
$modules/deploy_helper.py:
|
||||
@@ -588,7 +593,7 @@ files:
|
||||
ignore: jose-delarosa
|
||||
maintainers: $team_redfish
|
||||
$modules/ilo_:
|
||||
ignore: jose-delarosa
|
||||
ignore: jose-delarosa varini-hp
|
||||
maintainers: $team_redfish
|
||||
$modules/imc_rest.py:
|
||||
labels: cisco
|
||||
@@ -667,16 +672,22 @@ files:
|
||||
ignore: DWSR
|
||||
labels: jira
|
||||
maintainers: Slezhuk tarka pertoft
|
||||
$modules/kdeconfig.py:
|
||||
maintainers: smeso
|
||||
$modules/kernel_blacklist.py:
|
||||
maintainers: matze
|
||||
$modules/keycloak_:
|
||||
maintainers: $team_keycloak
|
||||
$modules/keycloak_authentication.py:
|
||||
maintainers: elfelip Gaetan2907
|
||||
$modules/keycloak_authz_authorization_scope.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_client_rolemapping.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope_type.py:
|
||||
maintainers: simonpahl
|
||||
$modules/keycloak_clientsecret_info.py:
|
||||
maintainers: fynncfchen johncant
|
||||
$modules/keycloak_clientsecret_regenerate.py:
|
||||
@@ -785,7 +796,7 @@ files:
|
||||
labels: maven_artifact
|
||||
maintainers: tumbl3w33d turb
|
||||
$modules/memset_:
|
||||
maintainers: glitchcrab
|
||||
ignore: glitchcrab
|
||||
$modules/mksysb.py:
|
||||
labels: aix mksysb
|
||||
maintainers: $team_aix
|
||||
@@ -987,7 +998,7 @@ files:
|
||||
maintainers: sysadmind
|
||||
$modules/puppet.py:
|
||||
labels: puppet
|
||||
maintainers: nibalizer emonty
|
||||
maintainers: emonty
|
||||
$modules/pushbullet.py:
|
||||
maintainers: willybarro
|
||||
$modules/pushover.py:
|
||||
@@ -1039,10 +1050,11 @@ files:
|
||||
maintainers: dagwieers
|
||||
$modules/redfish_:
|
||||
ignore: jose-delarosa
|
||||
maintainers: $team_redfish
|
||||
maintainers: $team_redfish TSKushal
|
||||
$modules/redhat_subscription.py:
|
||||
labels: redhat_subscription
|
||||
maintainers: barnabycourt alikins kahowell
|
||||
maintainers: $team_rhsm
|
||||
ignore: barnabycourt alikins kahowell
|
||||
$modules/redis.py:
|
||||
maintainers: slok
|
||||
$modules/redis_data.py:
|
||||
@@ -1065,9 +1077,9 @@ files:
|
||||
labels: rhn_register
|
||||
maintainers: jlaska $team_rhn
|
||||
$modules/rhsm_release.py:
|
||||
maintainers: seandst
|
||||
maintainers: seandst $team_rhsm
|
||||
$modules/rhsm_repository.py:
|
||||
maintainers: giovannisciortino
|
||||
maintainers: giovannisciortino $team_rhsm
|
||||
$modules/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/rocketchat.py:
|
||||
@@ -1320,7 +1332,7 @@ files:
|
||||
labels: m:xml xml
|
||||
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
|
||||
$modules/yarn.py:
|
||||
maintainers: chrishoffman verkaufer
|
||||
ignore: chrishoffman verkaufer
|
||||
$modules/yum_versionlock.py:
|
||||
maintainers: gyptazy aminvakil
|
||||
$modules/zfs:
|
||||
@@ -1386,7 +1398,7 @@ macros:
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_keycloak: eikef ndclt mattock
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
@@ -1394,8 +1406,9 @@ macros:
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_rhsm: cnsnyder ptoscano
|
||||
team_scaleway: remyleone abarbare
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
|
||||
193
.github/workflows/ansible-test.yml
vendored
Normal file
193
.github/workflows/ansible-test.yml
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
|
||||
# https://github.com/marketplace/actions/ansible-test
|
||||
|
||||
name: EOL CI
|
||||
on:
|
||||
# Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run EOL CI once per day (at 10:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 10 * * *'
|
||||
|
||||
concurrency:
|
||||
# Make sure there is at most one active run per PR, but do not cancel any non-PR runs
|
||||
group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sanity:
|
||||
name: EOL Sanity (Ⓐ${{ matrix.ansible }})
|
||||
strategy:
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.11'
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Perform sanity testing
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
testing-type: sanity
|
||||
|
||||
units:
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
|
||||
strategy:
|
||||
# As soon as the first unit test fails, cancel the others to free up the CI queue
|
||||
fail-fast: true
|
||||
matrix:
|
||||
ansible:
|
||||
- ''
|
||||
python:
|
||||
- ''
|
||||
exclude:
|
||||
- ansible: ''
|
||||
include:
|
||||
- ansible: '2.11'
|
||||
python: '2.7'
|
||||
- ansible: '2.11'
|
||||
python: '3.5'
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
Perform unit testing against
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pre-test-cmd: >-
|
||||
mkdir -p ../../ansible
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
|
||||
pull-request-change-detection: 'true'
|
||||
target-python-version: ${{ matrix.python }}
|
||||
testing-type: units
|
||||
|
||||
integration:
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
# shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
|
||||
# for the latest list.
|
||||
runs-on: >-
|
||||
${{ contains(fromJson(
|
||||
'["2.9", "2.10", "2.11"]'
|
||||
), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ansible:
|
||||
- ''
|
||||
docker:
|
||||
- ''
|
||||
python:
|
||||
- ''
|
||||
target:
|
||||
- ''
|
||||
exclude:
|
||||
- ansible: ''
|
||||
include:
|
||||
# 2.11
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: fedora32
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: fedora33
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.11'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '2.7'
|
||||
# target: azp/generic/1/
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '3.5'
|
||||
# target: azp/generic/2/
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
Perform integration testing against
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
under Python ${{ matrix.python }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
docker-image: ${{ matrix.docker }}
|
||||
integration-continue-on-error: 'false'
|
||||
integration-diff: 'false'
|
||||
integration-retry-on-error: 'true'
|
||||
pre-test-cmd: >-
|
||||
mkdir -p ../../ansible
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
|
||||
pull-request-change-detection: 'true'
|
||||
target: ${{ matrix.target }}
|
||||
target-python-version: ${{ matrix.python }}
|
||||
testing-type: integration
|
||||
203
CHANGELOG.rst
203
CHANGELOG.rst
@@ -6,6 +6,209 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 5.0.0.
|
||||
|
||||
v6.6.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008, https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- portage - update the logic for generating the emerge command arguments to ensure that ``withbdeps: false`` results in a passing an ``n`` argument with the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451, https://github.com/ansible-collections/community.general/pull/6456).
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from ``required_together`` as it causes to require ``api_password`` even when API token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has been fixed (https://github.com/ansible-collections/community.general/issues/6452, https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
|
||||
v6.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- dconf - be forgiving about boolean values: convert them to GVariant booleans automatically (https://github.com/ansible-collections/community.general/pull/6206).
|
||||
- dconf - minor refactoring improving parameters and dependencies validation (https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects (https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response (https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address`` and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade`` and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in the native implementation of terraform will not cause errors due to the non-existent file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing Flatpaks because of a parameter witch only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob`` to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade`` (https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment`` is specified, so it possible to specify again the environment names for registering, as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when ``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258, https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices, not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/pull/5887, https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers or the underscore character, failing to identify valid snap names such as ``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- btrfs_info - Query btrfs filesystem info
|
||||
- btrfs_subvolume - Manage btrfs subvolumes
|
||||
- ilo_redfish_command - Manages Out-Of-Band controllers using Redfish APIs
|
||||
- keycloak_authz_authorization_scope - Allows administration of Keycloak client authorization scopes via Keycloak API
|
||||
- keycloak_clientscope_type - Set the type of aclientscope in realm or client via Keycloak API
|
||||
|
||||
v6.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
|
||||
- dconf - parse GVariants for equality comparison when the Python module ``gi.repository`` is available (https://github.com/ansible-collections/community.general/pull/6049).
|
||||
- gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
|
||||
- jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209, https://github.com/ansible-collections/community.general/pull/6210).
|
||||
- ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
|
||||
- make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
|
||||
- nmap inventory plugin - add new option ``open`` for only returning open ports (https://github.com/ansible-collections/community.general/pull/6200).
|
||||
- nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
|
||||
- nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
|
||||
- nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections (https://github.com/ansible-collections/community.general/issues/6065).
|
||||
- openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make module less dependant on the ``TERM`` environment variable set on the Ansible controller (https://github.com/ansible-collections/community.general/pull/6149).
|
||||
- pipx - optional ``install_apps`` parameter added to install applications from injected packages (https://github.com/ansible-collections/community.general/pull/6198).
|
||||
- proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
|
||||
- redfish_info - adds commands to retrieve the HPE ThermalConfiguration and FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
|
||||
- redhat_subscription - credentials (``username``, ``activationkey``, and so on) are required now only if a system needs to be registered, or ``force_register`` is specified (https://github.com/ansible-collections/community.general/pull/5664).
|
||||
- redhat_subscription - the registration is done using the D-Bus ``rhsm`` service instead of spawning a ``subscription-manager register`` command, if possible; this avoids passing plain-text credentials as arguments to ``subscription-manager register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
|
||||
- ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
|
||||
- ssh_config - vendored StormSSH's config parser to avoid having to install StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
|
||||
- znode module - optional ``use_tls`` parameter added for encrypted communication (https://github.com/ansible-collections/community.general/issues/6154).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
|
||||
- gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
|
||||
- influxdb_user - fix running in check mode when the user does not exist yet (https://github.com/ansible-collections/community.general/pull/6111).
|
||||
- interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
|
||||
- jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
|
||||
- memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
|
||||
- nmcli - fixed idempotency issue for bridge connections. Module forced default value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216, https://github.com/ansible-collections/community.general/issues/4683).
|
||||
- nmcli - fixed idempotency issue when module params is set to ``may_fail4=false`` and ``method4=disabled``; in this case nmcli ignores change and keeps their own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
|
||||
- nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
|
||||
- opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
|
||||
- redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not running as root, rather than hanging on an interactive ``console-helper`` prompt; they all interact with ``subscription-manager``, which already requires to be run as root (https://github.com/ansible-collections/community.general/issues/734, https://github.com/ansible-collections/community.general/pull/6211).
|
||||
- xenorchestra inventory plugin - fix failure to receive objects from server due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
|
||||
- yarn - fix ``global=true`` to not fail when `executable` wasn't specified (https://github.com/ansible-collections/community.general/pull/6132)
|
||||
- yarn - fixes bug where yarn module tasks would fail when warnings were emitted from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Lookup
|
||||
~~~~~~
|
||||
|
||||
- merge_variables - merge variables with a certain suffix
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- kdeconfig - Manage KDE configuration files
|
||||
|
||||
v6.4.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- dnsimple - set custom User-Agent for API requests to DNSimple (https://github.com/ansible-collections/community.general/pull/5927).
|
||||
- flatpak_remote - add new boolean option ``enabled``. It controls, whether the remote is enabled or not (https://github.com/ansible-collections/community.general/pull/5926).
|
||||
- gitlab_project - add ``releases_access_level``, ``environments_access_level``, ``feature_flags_access_level``, ``infrastructure_access_level``, ``monitor_access_level``, and ``security_and_compliance_access_level`` options (https://github.com/ansible-collections/community.general/pull/5986).
|
||||
- jc filter plugin - added the ability to use parser plugins (https://github.com/ansible-collections/community.general/pull/6043).
|
||||
- keycloak_group - add new optional module parameter ``parents`` to properly handle keycloak subgroups (https://github.com/ansible-collections/community.general/pull/5814).
|
||||
- keycloak_user_federation - make ``org.keycloak.storage.ldap.mappers.LDAPStorageMapper`` the default value for mappers ``providerType`` (https://github.com/ansible-collections/community.general/pull/5863).
|
||||
- ldap modules - add ``xorder_discovery`` option (https://github.com/ansible-collections/community.general/issues/6045, https://github.com/ansible-collections/community.general/pull/6109).
|
||||
- lxd_container - add diff and check mode (https://github.com/ansible-collections/community.general/pull/5866).
|
||||
- mattermost, rocketchat, slack - replace missing default favicon with docs.ansible.com favicon (https://github.com/ansible-collections/community.general/pull/5928).
|
||||
- modprobe - add ``persistent`` option (https://github.com/ansible-collections/community.general/issues/4028, https://github.com/ansible-collections/community.general/pull/542).
|
||||
- osx_defaults - include stderr in error messages (https://github.com/ansible-collections/community.general/pull/6011).
|
||||
- proxmox - suppress urllib3 ``InsecureRequestWarnings`` when ``validate_certs`` option is ``false`` (https://github.com/ansible-collections/community.general/pull/5931).
|
||||
- redfish_command - adding ``EnableSecureBoot`` functionality (https://github.com/ansible-collections/community.general/pull/5899).
|
||||
- redfish_command - adding ``VerifyBiosAttributes`` functionality (https://github.com/ansible-collections/community.general/pull/5900).
|
||||
- sefcontext - add support for path substitutions (https://github.com/ansible-collections/community.general/issues/1193).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- gitlab_runner - the option ``access_level`` will lose its default value in community.general 8.0.0. From that version on, you have set this option to ``ref_protected`` explicitly, if you want to have a protected runner (https://github.com/ansible-collections/community.general/issues/5925).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- cartesian and flattened lookup plugins - adjust to parameter deprecation in ansible-core 2.14's ``listify_lookup_plugin_terms`` helper function (https://github.com/ansible-collections/community.general/pull/6074).
|
||||
- cloudflare_dns - fixed the idempotency for SRV DNS records (https://github.com/ansible-collections/community.general/pull/5972).
|
||||
- cloudflare_dns - fixed the possiblity of setting a root-level SRV DNS record (https://github.com/ansible-collections/community.general/pull/5972).
|
||||
- github_webhook - fix always changed state when no secret is provided (https://github.com/ansible-collections/community.general/pull/5994).
|
||||
- jenkins_plugin - fix error due to undefined variable when updates file is not downloaded (https://github.com/ansible-collections/community.general/pull/6100).
|
||||
- keycloak_client - fix accidental replacement of value for attribute ``saml.signing.private.key`` with ``no_log`` in wrong contexts (https://github.com/ansible-collections/community.general/pull/5934).
|
||||
- lxd_* modules, lxd inventory plugin - fix TLS/SSL certificate validation problems by using the correct purpose when creating the TLS context (https://github.com/ansible-collections/community.general/issues/5616, https://github.com/ansible-collections/community.general/pull/6034).
|
||||
- nmcli - fix change handling of values specified as an integer 0 (https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - fix failure to handle WIFI settings when connection type not specified (https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - fix improper detection of changes to ``wifi.wake-on-wlan`` (https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - order is significant for lists of addresses (https://github.com/ansible-collections/community.general/pull/6048).
|
||||
- onepassword lookup plugin - Changed to ignore errors from "op account get" calls. Previously, errors would prevent auto-signin code from executing (https://github.com/ansible-collections/community.general/pull/5942).
|
||||
- terraform and timezone - slight refactoring to avoid linter reporting potentially undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
|
||||
- various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.general/pull/5940).
|
||||
- yarn - fix ``global=true`` to check for the configured global folder instead of assuming the default (https://github.com/ansible-collections/community.general/pull/5829)
|
||||
- yarn - fix ``state=absent`` not working with ``global=true`` when the package does not include a binary (https://github.com/ansible-collections/community.general/pull/5829)
|
||||
- yarn - fix ``state=latest`` not working with ``global=true`` (https://github.com/ansible-collections/community.general/issues/5712).
|
||||
- zfs_delegate_admin - zfs allow output can now be parsed when uids/gids are not known to the host system (https://github.com/ansible-collections/community.general/pull/5943).
|
||||
- zypper - make package managing work on readonly filesystem of openSUSE MicroOS (https://github.com/ansible-collections/community.general/pull/5615).
|
||||
|
||||
v6.3.0
|
||||
======
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://github.com/ansible-collections/community.general/actions)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
|
||||
@@ -1024,3 +1024,381 @@ releases:
|
||||
name: ocapi_info
|
||||
namespace: ''
|
||||
release_date: '2023-01-31'
|
||||
6.4.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- cartesian and flattened lookup plugins - adjust to parameter deprecation in
|
||||
ansible-core 2.14's ``listify_lookup_plugin_terms`` helper function (https://github.com/ansible-collections/community.general/pull/6074).
|
||||
- cloudflare_dns - fixed the idempotency for SRV DNS records (https://github.com/ansible-collections/community.general/pull/5972).
|
||||
- cloudflare_dns - fixed the possiblity of setting a root-level SRV DNS record
|
||||
(https://github.com/ansible-collections/community.general/pull/5972).
|
||||
- github_webhook - fix always changed state when no secret is provided (https://github.com/ansible-collections/community.general/pull/5994).
|
||||
- jenkins_plugin - fix error due to undefined variable when updates file is
|
||||
not downloaded (https://github.com/ansible-collections/community.general/pull/6100).
|
||||
- keycloak_client - fix accidental replacement of value for attribute ``saml.signing.private.key``
|
||||
with ``no_log`` in wrong contexts (https://github.com/ansible-collections/community.general/pull/5934).
|
||||
- lxd_* modules, lxd inventory plugin - fix TLS/SSL certificate validation problems
|
||||
by using the correct purpose when creating the TLS context (https://github.com/ansible-collections/community.general/issues/5616,
|
||||
https://github.com/ansible-collections/community.general/pull/6034).
|
||||
- nmcli - fix change handling of values specified as an integer 0 (https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - fix failure to handle WIFI settings when connection type not specified
|
||||
(https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - fix improper detection of changes to ``wifi.wake-on-wlan`` (https://github.com/ansible-collections/community.general/pull/5431).
|
||||
- nmcli - order is significant for lists of addresses (https://github.com/ansible-collections/community.general/pull/6048).
|
||||
- onepassword lookup plugin - Changed to ignore errors from "op account get"
|
||||
calls. Previously, errors would prevent auto-signin code from executing (https://github.com/ansible-collections/community.general/pull/5942).
|
||||
- terraform and timezone - slight refactoring to avoid linter reporting potentially
|
||||
undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
|
||||
- various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.general/pull/5940).
|
||||
- yarn - fix ``global=true`` to check for the configured global folder instead
|
||||
of assuming the default (https://github.com/ansible-collections/community.general/pull/5829)
|
||||
- yarn - fix ``state=absent`` not working with ``global=true`` when the package
|
||||
does not include a binary (https://github.com/ansible-collections/community.general/pull/5829)
|
||||
- yarn - fix ``state=latest`` not working with ``global=true`` (https://github.com/ansible-collections/community.general/issues/5712).
|
||||
- zfs_delegate_admin - zfs allow output can now be parsed when uids/gids are
|
||||
not known to the host system (https://github.com/ansible-collections/community.general/pull/5943).
|
||||
- zypper - make package managing work on readonly filesystem of openSUSE MicroOS
|
||||
(https://github.com/ansible-collections/community.general/pull/5615).
|
||||
deprecated_features:
|
||||
- gitlab_runner - the option ``access_level`` will lose its default value in
|
||||
community.general 8.0.0. From that version on, you have set this option to
|
||||
``ref_protected`` explicitly, if you want to have a protected runner (https://github.com/ansible-collections/community.general/issues/5925).
|
||||
minor_changes:
|
||||
- dnsimple - set custom User-Agent for API requests to DNSimple (https://github.com/ansible-collections/community.general/pull/5927).
|
||||
- flatpak_remote - add new boolean option ``enabled``. It controls, whether
|
||||
the remote is enabled or not (https://github.com/ansible-collections/community.general/pull/5926).
|
||||
- gitlab_project - add ``releases_access_level``, ``environments_access_level``,
|
||||
``feature_flags_access_level``, ``infrastructure_access_level``, ``monitor_access_level``,
|
||||
and ``security_and_compliance_access_level`` options (https://github.com/ansible-collections/community.general/pull/5986).
|
||||
- jc filter plugin - added the ability to use parser plugins (https://github.com/ansible-collections/community.general/pull/6043).
|
||||
- keycloak_group - add new optional module parameter ``parents`` to properly
|
||||
handle keycloak subgroups (https://github.com/ansible-collections/community.general/pull/5814).
|
||||
- keycloak_user_federation - make ``org.keycloak.storage.ldap.mappers.LDAPStorageMapper``
|
||||
the default value for mappers ``providerType`` (https://github.com/ansible-collections/community.general/pull/5863).
|
||||
- ldap modules - add ``xorder_discovery`` option (https://github.com/ansible-collections/community.general/issues/6045,
|
||||
https://github.com/ansible-collections/community.general/pull/6109).
|
||||
- lxd_container - add diff and check mode (https://github.com/ansible-collections/community.general/pull/5866).
|
||||
- mattermost, rocketchat, slack - replace missing default favicon with docs.ansible.com
|
||||
favicon (https://github.com/ansible-collections/community.general/pull/5928).
|
||||
- modprobe - add ``persistent`` option (https://github.com/ansible-collections/community.general/issues/4028,
|
||||
https://github.com/ansible-collections/community.general/pull/542).
|
||||
- osx_defaults - include stderr in error messages (https://github.com/ansible-collections/community.general/pull/6011).
|
||||
- proxmox - suppress urllib3 ``InsecureRequestWarnings`` when ``validate_certs``
|
||||
option is ``false`` (https://github.com/ansible-collections/community.general/pull/5931).
|
||||
- redfish_command - adding ``EnableSecureBoot`` functionality (https://github.com/ansible-collections/community.general/pull/5899).
|
||||
- redfish_command - adding ``VerifyBiosAttributes`` functionality (https://github.com/ansible-collections/community.general/pull/5900).
|
||||
- sefcontext - add support for path substitutions (https://github.com/ansible-collections/community.general/issues/1193).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 4028-modprobe-persistent-option.yml
|
||||
- 5431-nmcli-wifi.yml
|
||||
- 5615-zypper-transactional-update.yml
|
||||
- 5814-support-keycloak-subgroups.yml
|
||||
- 5829-fix-yarn-global.yml
|
||||
- 5830-sefcontext-path-subs.yml
|
||||
- 5863-providerType-defaulted-keycloak_userfed-mappers.yml
|
||||
- 5866-lxd_container-diff-and-check-mode.yml
|
||||
- 5899-adding-enablesecureboot-functionality-to-redfish-config.yml
|
||||
- 5900-adding-verifybiosattribute-fucntionality-to-redfish-command.yml
|
||||
- 5915-suppress-urllib3-insecure-request-warnings.yml
|
||||
- 5925-align_gitlab_runner_access_level_default_with_gitlab.yml
|
||||
- 5926-flatpak-remote-enabled.yml
|
||||
- 5927-set-user-agent-dnsimple.yml
|
||||
- 5928-fix-favicon-url.yml
|
||||
- 5933-linting.yml
|
||||
- 5934-fix-keycloak-sanitize_cr.yml
|
||||
- 5942-onepassword-ignore-errors-from-op-account-get.yml
|
||||
- 5943-zfs_delegate_admin-fix-zfs-allow-cannot-parse-unknown-uid-gid.yml
|
||||
- 5972-cloudflare-dns-srv-record.yml
|
||||
- 5985-add-new-gitlab-api-features.yml
|
||||
- 5994-github-webhook-secret.yml
|
||||
- 6.4.0.yml
|
||||
- 6011-osx-defaults-errors.yml
|
||||
- 6034-lxd-tls.yml
|
||||
- 6043-jc_plugin_parser_support.yml
|
||||
- 6045-xorder-discovery.yml
|
||||
- 6048-nmcli-addres-order.yml
|
||||
- 6074-loader_in_listify.yml.yml
|
||||
- 6100-jenkins_plugin.yml
|
||||
- remove-unneeded-imports.yml
|
||||
release_date: '2023-02-27'
|
||||
6.5.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
|
||||
- gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
|
||||
- influxdb_user - fix running in check mode when the user does not exist yet
|
||||
(https://github.com/ansible-collections/community.general/pull/6111).
|
||||
- interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
|
||||
- jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``.
|
||||
This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
|
||||
- memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
|
||||
- nmcli - fixed idempotency issue for bridge connections. Module forced default
|
||||
value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled
|
||||
nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216,
|
||||
https://github.com/ansible-collections/community.general/issues/4683).
|
||||
- nmcli - fixed idempotency issue when module params is set to ``may_fail4=false``
|
||||
and ``method4=disabled``; in this case nmcli ignores change and keeps their
|
||||
own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
|
||||
- nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
|
||||
- opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
|
||||
- redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not
|
||||
running as root, rather than hanging on an interactive ``console-helper``
|
||||
prompt; they all interact with ``subscription-manager``, which already requires
|
||||
to be run as root (https://github.com/ansible-collections/community.general/issues/734,
|
||||
https://github.com/ansible-collections/community.general/pull/6211).
|
||||
- xenorchestra inventory plugin - fix failure to receive objects from server
|
||||
due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
|
||||
- yarn - fix ``global=true`` to not fail when `executable` wasn't specified
|
||||
(https://github.com/ansible-collections/community.general/pull/6132)
|
||||
- yarn - fixes bug where yarn module tasks would fail when warnings were emitted
|
||||
from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
|
||||
minor_changes:
|
||||
- apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters
|
||||
for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
|
||||
- dconf - parse GVariants for equality comparison when the Python module ``gi.repository``
|
||||
is available (https://github.com/ansible-collections/community.general/pull/6049).
|
||||
- gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
|
||||
- jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209,
|
||||
https://github.com/ansible-collections/community.general/pull/6210).
|
||||
- ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
|
||||
- make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
|
||||
- nmap inventory plugin - add new option ``open`` for only returning open ports
|
||||
(https://github.com/ansible-collections/community.general/pull/6200).
|
||||
- nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
|
||||
- nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices
|
||||
for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
|
||||
- nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections
|
||||
(https://github.com/ansible-collections/community.general/issues/6065).
|
||||
- openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make
|
||||
module less dependant on the ``TERM`` environment variable set on the Ansible
|
||||
controller (https://github.com/ansible-collections/community.general/pull/6149).
|
||||
- pipx - optional ``install_apps`` parameter added to install applications from
|
||||
injected packages (https://github.com/ansible-collections/community.general/pull/6198).
|
||||
- proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM
|
||||
from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
|
||||
- redfish_info - adds commands to retrieve the HPE ThermalConfiguration and
|
||||
FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
|
||||
- redhat_subscription - credentials (``username``, ``activationkey``, and so
|
||||
on) are required now only if a system needs to be registered, or ``force_register``
|
||||
is specified (https://github.com/ansible-collections/community.general/pull/5664).
|
||||
- redhat_subscription - the registration is done using the D-Bus ``rhsm`` service
|
||||
instead of spawning a ``subscription-manager register`` command, if possible;
|
||||
this avoids passing plain-text credentials as arguments to ``subscription-manager
|
||||
register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
|
||||
- ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
|
||||
- ssh_config - vendored StormSSH's config parser to avoid having to install
|
||||
StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
|
||||
- znode module - optional ``use_tls`` parameter added for encrypted communication
|
||||
(https://github.com/ansible-collections/community.general/issues/6154).
|
||||
release_summary: Feature and bugfix release.
|
||||
fragments:
|
||||
- 3216-nmcli-bridge-idempotency-fix.yml
|
||||
- 3935-add-gitlab-group-runner.yml
|
||||
- 4387-nmcli-mtu-for-vlan-connection-fix.yml
|
||||
- 5664-redhat_subscription-credentials-when-needed.yaml
|
||||
- 5867-apt_rpm-add-clean-and-upgrade.yml
|
||||
- 5970-add-proxyjump-option-to-ssh-config.yml
|
||||
- 5974-nmcli_add_new_addr_gen_mode6_options.yml
|
||||
- 6.5.0.yml
|
||||
- 6049-dconf-strings.yml
|
||||
- 6065-nmcli-add-runner-fast-rate-option.yml
|
||||
- 6106-nmcli-ipv4-mayfail-idempotency-fix.yml
|
||||
- 6111-influxdb_user-check-mode.yaml
|
||||
- 6112-fix_key_error_in_gitlab_runner_creation_update.yml
|
||||
- 6114-memset-add-url-error-handling.yml
|
||||
- 6117-remove-stormssh-depend.yml
|
||||
- 6118-jail-plugin-fix-default-inventory_hostname.yml
|
||||
- 6119-opkg-update.yaml
|
||||
- 6122-redhat_subscription-subscribe-via-dbus.yaml
|
||||
- 6127-yarn-ignore-warnings.yml
|
||||
- 6131-fix-interfaces_file-for-no-leading-spaces.yml
|
||||
- 6138-fix-yarn-global.yml
|
||||
- 6149-openbsd_pkg-term.yml
|
||||
- 6154-znode-optional-tls.yml
|
||||
- 6158-create-proxmox-vm-from-archive.yml
|
||||
- 6160-add-command-make-output.yml
|
||||
- 6165-nmap-port.yml
|
||||
- 6180-replace-deprecated-badzipfile.yml
|
||||
- 6198-pipx-inject-install-apps.yml
|
||||
- 6200-adding-open-option-to-nmap.yml
|
||||
- 6208-hpe-thermal-fan-percent.yaml
|
||||
- 6210-add-worklog-functionality-to-jira.yml
|
||||
- 6211-rhsm-require-root.yml
|
||||
- 6227-xen-orchestra-check-response-id.yml
|
||||
- xxxx-ldap-ca-cert-file.yml
|
||||
modules:
|
||||
- description: Manage KDE configuration files
|
||||
name: kdeconfig
|
||||
namespace: ''
|
||||
plugins:
|
||||
lookup:
|
||||
- description: merge variables with a certain suffix
|
||||
name: merge_variables
|
||||
namespace: null
|
||||
release_date: '2023-03-27'
|
||||
6.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could
|
||||
fail to properly detect already existing Flatpaks because of a parameter witch
|
||||
only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates
|
||||
and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob``
|
||||
to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass
|
||||
``null`` as a value. This only was possible in the past due to a bug in ansible-core
|
||||
that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade``
|
||||
(https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment``
|
||||
is specified, so it possible to specify again the environment names for registering,
|
||||
as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when
|
||||
``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258,
|
||||
https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering
|
||||
a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to
|
||||
pass ``null`` as a value. This only was possible in the past due to a bug
|
||||
in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices,
|
||||
not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck``
|
||||
functions (https://github.com/ansible-collections/community.general/pull/5887,
|
||||
https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions
|
||||
(https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers
|
||||
or the underscore character, failing to identify valid snap names such as
|
||||
``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
minor_changes:
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- 'dconf - be forgiving about boolean values: convert them to GVariant booleans
|
||||
automatically (https://github.com/ansible-collections/community.general/pull/6206).'
|
||||
- dconf - minor refactoring improving parameters and dependencies validation
|
||||
(https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check
|
||||
the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in
|
||||
docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects
|
||||
(https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response
|
||||
(https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing
|
||||
hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation
|
||||
of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address``
|
||||
and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to
|
||||
system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade``
|
||||
and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources
|
||||
during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in
|
||||
the native implementation of terraform will not cause errors due to the non-existent
|
||||
file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
release_summary: Bugfix and feature release.
|
||||
fragments:
|
||||
- 2090-idrac-redfish-resource-id-fix.yml
|
||||
- 5603-redfish-idrac-job-id-in-response.yml
|
||||
- 6.6.0.yml
|
||||
- 6199-archive-generate-checksum-in-chunks.yml
|
||||
- 6203-add-append-option-to-ipa-hostgroup.yml
|
||||
- 6206-dconf-booleans.yml
|
||||
- 6212-pipx-include-injected.yml
|
||||
- 6259-redhat_subscription-fix-force.yaml
|
||||
- 6267-homebrew-cask-upgrade-all-greedy.yml
|
||||
- 6269-mksysb-output.yml
|
||||
- 6275-redhat_subscription-fix-environments-centos.yaml
|
||||
- 6277-add-topics-gitlab-project.yml
|
||||
- 6286-icinga2_host-template-and-template-vars.yml
|
||||
- 6289-bugfix-flatpak-check-if-already-installed.yml
|
||||
- 6293-add-puppet-skip-tags-option.yaml
|
||||
- 6294-fix-one_vm-instantiation.yml
|
||||
- 6296-LanceNero-Terraform_statefile_check.yml
|
||||
- 6300-rundeck-modules-fixes-and-improvements.yml
|
||||
- 6303-pipx-fix-state-latest-and-add-system-site-packages.yml
|
||||
- 6308-pipx-add-system-site-packages.yml
|
||||
- 6312-nmcli-add-macvlan-connection-type.yml
|
||||
- 6318-add-form-flow.yml
|
||||
- 6319-redhat_subscription-fix-environment-parameter.yaml
|
||||
- 6334-dig-support-multiple-domains.yml
|
||||
- 6336-dconf-refactor.yml
|
||||
- 6351-support-env-variables-to-nmap-dynamic-inventoiry.yaml
|
||||
- 6361-snap-alias-regex-bugfix.yml
|
||||
- 6382-udm-dns-record-refactor.yml
|
||||
- 6383-deps-failed.yml
|
||||
- 6385-cpan-mh-feat.yml
|
||||
- 6401-rhsm_release-required.yml
|
||||
- 6404-ini_file-section.yml
|
||||
modules:
|
||||
- description: Query btrfs filesystem info
|
||||
name: btrfs_info
|
||||
namespace: ''
|
||||
- description: Manage btrfs subvolumes
|
||||
name: btrfs_subvolume
|
||||
namespace: ''
|
||||
- description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
name: ilo_redfish_command
|
||||
namespace: ''
|
||||
- description: Allows administration of Keycloak client authorization scopes via
|
||||
Keycloak API
|
||||
name: keycloak_authz_authorization_scope
|
||||
namespace: ''
|
||||
- description: Set the type of aclientscope in realm or client via Keycloak API
|
||||
name: keycloak_clientscope_type
|
||||
namespace: ''
|
||||
release_date: '2023-04-24'
|
||||
6.6.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008,
|
||||
https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- 'portage - update the logic for generating the emerge command arguments to
|
||||
ensure that ``withbdeps: false`` results in a passing an ``n`` argument with
|
||||
the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451,
|
||||
https://github.com/ansible-collections/community.general/pull/6456).'
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from
|
||||
``required_together`` as it causes to require ``api_password`` even when API
|
||||
token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has
|
||||
been fixed (https://github.com/ansible-collections/community.general/issues/6452,
|
||||
https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not
|
||||
initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly
|
||||
now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
minor_changes:
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter
|
||||
that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5143-fix-xfs-quota-project-init.yml
|
||||
- 6.6.1.yml
|
||||
- 6456-fix-portage-withbdeps-false.yml
|
||||
- 6458-puppet-noop.yml
|
||||
- 6491-dconf-respawn.yml
|
||||
- 6497-terraform-fix.yml
|
||||
- 6527-nmcli-bond-fix-xmit_hash_policy.yml
|
||||
- 6534-zypper-exitcode-102-handled.yaml
|
||||
- 6548-portage-changed_use-newuse.yml
|
||||
- 6554-proxmox-tasks-info-fix-required-password.yaml
|
||||
- deps.yml
|
||||
- passwordstore-lock.yml
|
||||
release_date: '2023-05-22'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 6.3.0
|
||||
version: 6.6.1
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
2
plugins/cache/memcached.py
vendored
2
plugins/cache/memcached.py
vendored
@@ -52,11 +52,9 @@ import time
|
||||
from multiprocessing import Lock
|
||||
from itertools import chain
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common._collections_compat import MutableSet
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
|
||||
2
plugins/cache/redis.py
vendored
2
plugins/cache/redis.py
vendored
@@ -67,12 +67,10 @@ import re
|
||||
import time
|
||||
import json
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||
from ansible.plugins.cache import BaseCacheModule
|
||||
from ansible.release import __version__ as ansible_base_version
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
|
||||
@@ -27,7 +27,6 @@ DOCUMENTATION = '''
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.template import Templar
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
|
||||
|
||||
|
||||
@@ -786,10 +786,6 @@ playbook.yml: >
|
||||
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from ansible import constants as C
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.template import Templar
|
||||
from ansible.vars.manager import VariableManager
|
||||
from ansible.plugins.callback.default import CallbackModule as Default
|
||||
|
||||
@@ -54,7 +54,6 @@ examples: |
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import socket
|
||||
|
||||
@@ -79,7 +79,6 @@ import re
|
||||
import email.utils
|
||||
import smtplib
|
||||
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -67,9 +67,6 @@ DOCUMENTATION = '''
|
||||
type: string
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
@@ -54,9 +54,6 @@ DOCUMENTATION = '''
|
||||
version_added: 4.5.0
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
|
||||
|
||||
@@ -25,12 +25,10 @@ import yaml
|
||||
import json
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
|
||||
from ansible.plugins.callback import strip_internal_keys, module_response_deepcopy
|
||||
from ansible.plugins.callback.default import CallbackModule as Default
|
||||
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ DOCUMENTATION = '''
|
||||
- Path to the jail
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_jail_host
|
||||
remote_user:
|
||||
|
||||
@@ -29,8 +29,7 @@ options:
|
||||
required: true
|
||||
|
||||
requirements:
|
||||
- hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
|
||||
- hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk).
|
||||
- WSAPI service should be enabled on the 3PAR storage array.
|
||||
notes:
|
||||
- check_mode not supported
|
||||
'''
|
||||
|
||||
@@ -24,6 +24,11 @@ options:
|
||||
- The password to use with I(bind_dn).
|
||||
type: str
|
||||
default: ''
|
||||
ca_path:
|
||||
description:
|
||||
- Set the path to PEM file with CA certs.
|
||||
type: path
|
||||
version_added: "6.5.0"
|
||||
dn:
|
||||
required: true
|
||||
description:
|
||||
@@ -65,4 +70,15 @@ options:
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
version_added: "2.0.0"
|
||||
xorder_discovery:
|
||||
description:
|
||||
- Set the behavior on how to process Xordered DNs.
|
||||
- C(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
|
||||
- C(disable) will always use the DN unmodified (as passed by the I(dn) parameter).
|
||||
- C(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
|
||||
- Possible choices are C(enable), C(auto), C(disable).
|
||||
type: str
|
||||
choices: ['enable', 'auto', 'disable']
|
||||
default: auto
|
||||
version_added: "6.4.0"
|
||||
'''
|
||||
|
||||
@@ -80,13 +80,13 @@ from ansible.errors import AnsibleError, AnsibleFilterError
|
||||
import importlib
|
||||
|
||||
try:
|
||||
import jc
|
||||
import jc # noqa: F401, pylint: disable=unused-import
|
||||
HAS_LIB = True
|
||||
except ImportError:
|
||||
HAS_LIB = False
|
||||
|
||||
|
||||
def jc(data, parser, quiet=True, raw=False):
|
||||
def jc_filter(data, parser, quiet=True, raw=False):
|
||||
"""Convert returned command output to JSON using the JC library
|
||||
|
||||
Arguments:
|
||||
@@ -138,8 +138,14 @@ def jc(data, parser, quiet=True, raw=False):
|
||||
raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
|
||||
|
||||
try:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
return jc_parser.parse(data, quiet=quiet, raw=raw)
|
||||
# new API (jc v1.18.0 and higher) allows use of plugin parsers
|
||||
if hasattr(jc, 'parse'):
|
||||
return jc.parse(parser, data, quiet=quiet, raw=raw)
|
||||
|
||||
# old API (jc v1.17.7 and lower)
|
||||
else:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
return jc_parser.parse(data, quiet=quiet, raw=raw)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
|
||||
@@ -150,5 +156,5 @@ class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'jc': jc
|
||||
'jc': jc_filter,
|
||||
}
|
||||
|
||||
@@ -102,8 +102,6 @@ from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.release import __version__ as ansible_version
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
|
||||
@@ -121,10 +121,7 @@ compose:
|
||||
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
|
||||
|
||||
|
||||
@@ -150,12 +150,10 @@ groupby:
|
||||
attribute: internals
|
||||
'''
|
||||
|
||||
import binascii
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import os
|
||||
import socket
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
@@ -30,12 +30,27 @@ DOCUMENTATION = '''
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: true
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_ADDRESS
|
||||
version_added: 6.6.0
|
||||
exclude:
|
||||
description: list of addresses to exclude
|
||||
description:
|
||||
- List of addresses to exclude.
|
||||
- For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_EXCLUDE
|
||||
version_added: 6.6.0
|
||||
port:
|
||||
description:
|
||||
- Only scan specific port or port range (C(-p)).
|
||||
- For example, you could pass C(22) for a single port, C(1-65535) for a range of ports,
|
||||
or C(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
|
||||
type: string
|
||||
version_added: 6.5.0
|
||||
ports:
|
||||
description: Enable/disable scanning for open ports
|
||||
description: Enable/disable scanning ports.
|
||||
type: boolean
|
||||
default: true
|
||||
ipv4:
|
||||
@@ -60,6 +75,11 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
open:
|
||||
description: Only scan for open (or possibly open) ports.
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
dns_resolve:
|
||||
description: Whether to always (C(true)) or never (C(false)) do DNS resolution.
|
||||
type: boolean
|
||||
@@ -81,6 +101,14 @@ plugin: community.general.nmap
|
||||
sudo: true
|
||||
strict: false
|
||||
address: 192.168.0.0/24
|
||||
|
||||
# an nmap scan specifying ports and classifying results to an inventory group
|
||||
plugin: community.general.nmap
|
||||
address: 192.168.0.0/24
|
||||
exclude: 192.168.0.1, web.example.com
|
||||
port: 22, 443
|
||||
groups:
|
||||
web_servers: "ports | selectattr('port', 'equalto', '443')"
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -171,6 +199,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self._options['sudo']:
|
||||
cmd.insert(0, 'sudo')
|
||||
|
||||
if self._options['port']:
|
||||
cmd.append('-p')
|
||||
cmd.append(self._options['port'])
|
||||
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
@@ -194,6 +226,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self._options['icmp_timestamp']:
|
||||
cmd.append('-PP')
|
||||
|
||||
if self._options['open']:
|
||||
cmd.append('--open')
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
|
||||
@@ -65,7 +65,7 @@ from sys import version as python_version
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
||||
from ansible.module_utils.six.moves.urllib.parse import urljoin
|
||||
|
||||
|
||||
@@ -277,6 +277,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
|
||||
|
||||
a = self._get_session()
|
||||
|
||||
if a.verify is False:
|
||||
from requests.packages.urllib3 import disable_warnings
|
||||
disable_warnings()
|
||||
|
||||
ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
|
||||
|
||||
json = ret.json()
|
||||
|
||||
@@ -78,6 +78,7 @@ compose:
|
||||
|
||||
import json
|
||||
import ssl
|
||||
from time import sleep
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
@@ -138,21 +139,42 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self.conn = create_connection(
|
||||
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
|
||||
|
||||
CALL_TIMEOUT = 100
|
||||
"""Number of 1/10ths of a second to wait before method call times out."""
|
||||
|
||||
def call(self, method, params):
|
||||
"""Calls a method on the XO server with the provided parameters."""
|
||||
id = self.pointer
|
||||
self.conn.send(json.dumps({
|
||||
'id': id,
|
||||
'jsonrpc': '2.0',
|
||||
'method': method,
|
||||
'params': params
|
||||
}))
|
||||
|
||||
waited = 0
|
||||
while waited < self.CALL_TIMEOUT:
|
||||
response = json.loads(self.conn.recv())
|
||||
if 'id' in response and response['id'] == id:
|
||||
return response
|
||||
else:
|
||||
sleep(0.1)
|
||||
waited += 1
|
||||
|
||||
raise AnsibleError(
|
||||
'Method call {method} timed out after {timeout} seconds.'.format(method=method, timeout=self.CALL_TIMEOUT / 10))
|
||||
|
||||
def login(self, user, password):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
|
||||
'username': user, 'password': password}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
result = json.loads(self.conn.recv())
|
||||
result = self.call('session.signIn', {
|
||||
'username': user, 'password': password
|
||||
})
|
||||
|
||||
if 'error' in result:
|
||||
raise AnsibleError(
|
||||
'Could not connect: {0}'.format(result['error']))
|
||||
|
||||
def get_object(self, name):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0',
|
||||
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
answer = json.loads(self.conn.recv())
|
||||
answer = self.call('xo.getAllObjects', {'filter': {'type': name}})
|
||||
|
||||
if 'error' in answer:
|
||||
raise AnsibleError(
|
||||
|
||||
@@ -66,7 +66,12 @@ class LookupModule(LookupBase):
|
||||
"""
|
||||
results = []
|
||||
for x in terms:
|
||||
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
|
||||
try:
|
||||
intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
|
||||
except TypeError:
|
||||
# The loader argument is deprecated in ansible-core 2.14+. Fall back to
|
||||
# pre-2.14 behavior for older ansible-core versions.
|
||||
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
|
||||
results.append(intermediate)
|
||||
return results
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ RETURN = """
|
||||
type: dict
|
||||
"""
|
||||
|
||||
import os
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
@@ -93,8 +93,6 @@ RETURN = """
|
||||
type: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
|
||||
@@ -80,7 +80,6 @@ from subprocess import Popen
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.parsing.splitter import parse_kv
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
|
||||
from ansible.utils.display import Display
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
|
||||
- The default for this option will likely change to C(true) in the future.
|
||||
- This option will be forced to C(true) if multiple domains to be queried are specified.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.0.0
|
||||
@@ -95,6 +96,21 @@ EXAMPLES = """
|
||||
msg: "MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}"
|
||||
|
||||
- name: Lookup multiple names at once
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}"
|
||||
|
||||
- name: Lookup multiple names at once (from list variable)
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', *hosts) }}"
|
||||
vars:
|
||||
hosts:
|
||||
- example.org.
|
||||
- example.com.
|
||||
- gmail.com.
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
@@ -308,7 +324,7 @@ class LookupModule(LookupBase):
|
||||
edns_size = 4096
|
||||
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
|
||||
|
||||
domain = None
|
||||
domains = []
|
||||
qtype = self.get_option('qtype')
|
||||
flat = self.get_option('flat')
|
||||
fail_on_error = self.get_option('fail_on_error')
|
||||
@@ -365,63 +381,71 @@ class LookupModule(LookupBase):
|
||||
if '/' in t:
|
||||
try:
|
||||
domain, qtype = t.split('/')
|
||||
domains.append(domain)
|
||||
except Exception:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
else:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
|
||||
# print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
reversed_domains = []
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
reversed_domains.append(n.to_text())
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
domains = reversed_domains
|
||||
|
||||
if len(domains) > 1:
|
||||
real_empty = True
|
||||
|
||||
ret = []
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
domain = n.to_text()
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
|
||||
return ret
|
||||
|
||||
@@ -136,12 +136,11 @@ RETURN = '''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.errors import AnsibleLookupError
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError, AnsibleLookupError
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
import etcd3
|
||||
|
||||
@@ -67,7 +67,12 @@ class LookupModule(LookupBase):
|
||||
|
||||
if isinstance(term, string_types):
|
||||
# convert a variable to a list
|
||||
term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
|
||||
try:
|
||||
term2 = listify_lookup_plugin_terms(term, templar=self._templar)
|
||||
except TypeError:
|
||||
# The loader argument is deprecated in ansible-core 2.14+. Fall back to
|
||||
# pre-2.14 behavior for older ansible-core versions.
|
||||
term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
|
||||
# but avoid converting a plain string to a list of one string
|
||||
if term2 != [term]:
|
||||
term = term2
|
||||
|
||||
@@ -61,8 +61,6 @@ RETURN = """
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
@@ -69,7 +69,6 @@ from ansible.utils.display import Display
|
||||
from traceback import format_exception
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
212
plugins/lookup/merge_variables.py
Normal file
212
plugins/lookup/merge_variables.py
Normal file
@@ -0,0 +1,212 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Thales Netherlands
|
||||
# Copyright (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
author:
|
||||
- Roy Lenferink (@rlenferink)
|
||||
- Mark Ettema (@m-a-r-k-e)
|
||||
name: merge_variables
|
||||
short_description: merge variables with a certain suffix
|
||||
description:
|
||||
- This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
|
||||
regular expressions, optionally.
|
||||
version_added: 6.5.0
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- Depending on the value of I(pattern_type), this is a list of prefixes, suffixes, or regular expressions
|
||||
that will be used to match all variables that should be merged.
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
pattern_type:
|
||||
description:
|
||||
- Change the way of searching for the specified pattern.
|
||||
type: str
|
||||
default: 'regex'
|
||||
choices:
|
||||
- prefix
|
||||
- suffix
|
||||
- regex
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: pattern_type
|
||||
initial_value:
|
||||
description:
|
||||
- An initial value to start with.
|
||||
type: raw
|
||||
override:
|
||||
description:
|
||||
- Return an error, print a warning or ignore it when a key will be overwritten.
|
||||
- The default behavior C(error) makes the plugin fail when a key would be overwritten.
|
||||
- When C(warn) and C(ignore) are used, note that it is important to know that the variables
|
||||
are sorted by name before being merged. Keys for later variables in this order will overwrite
|
||||
keys of the same name for variables earlier in this order. To avoid potential confusion,
|
||||
better use I(override=error) whenever possible.
|
||||
type: str
|
||||
default: 'error'
|
||||
choices:
|
||||
- error
|
||||
- warn
|
||||
- ignore
|
||||
env:
|
||||
- name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
|
||||
ini:
|
||||
- section: merge_variables_lookup
|
||||
key: override
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Some example variables, they can be defined anywhere as long as they are in scope
|
||||
test_init_list:
|
||||
- "list init item 1"
|
||||
- "list init item 2"
|
||||
|
||||
testa__test_list:
|
||||
- "test a item 1"
|
||||
|
||||
testb__test_list:
|
||||
- "test b item 1"
|
||||
|
||||
testa__test_dict:
|
||||
ports:
|
||||
- 1
|
||||
|
||||
testb__test_dict:
|
||||
ports:
|
||||
- 3
|
||||
|
||||
|
||||
# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
|
||||
example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
|
||||
|
||||
# The variable example_a now contains:
|
||||
# ports:
|
||||
# - 1
|
||||
# - 3
|
||||
|
||||
|
||||
# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
|
||||
# result in a variable 'example_b'
|
||||
example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
|
||||
|
||||
# The variable example_b now contains:
|
||||
# - "list init item 1"
|
||||
# - "list init item 2"
|
||||
# - "test a item 1"
|
||||
# - "test b item 1"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
|
||||
dict will be returned.
|
||||
type: raw
|
||||
elements: raw
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
def _verify_and_get_type(variable):
|
||||
if isinstance(variable, list):
|
||||
return "list"
|
||||
elif isinstance(variable, dict):
|
||||
return "dict"
|
||||
else:
|
||||
raise AnsibleError("Not supported type detected, variable must be a list or a dict")
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(direct=kwargs)
|
||||
initial_value = self.get_option("initial_value", None)
|
||||
self._override = self.get_option('override', 'error')
|
||||
self._pattern_type = self.get_option('pattern_type', 'regex')
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
if not isinstance(term, str):
|
||||
raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
|
||||
|
||||
ret.append(self._merge_vars(term, initial_value, variables))
|
||||
|
||||
return ret
|
||||
|
||||
def _var_matches(self, key, search_pattern):
|
||||
if self._pattern_type == "prefix":
|
||||
return key.startswith(search_pattern)
|
||||
elif self._pattern_type == "suffix":
|
||||
return key.endswith(search_pattern)
|
||||
elif self._pattern_type == "regex":
|
||||
matcher = re.compile(search_pattern)
|
||||
return matcher.search(key)
|
||||
|
||||
return False
|
||||
|
||||
def _merge_vars(self, search_pattern, initial_value, variables):
|
||||
display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
|
||||
var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
|
||||
display.vvv("The following variables will be merged: {0}".format(var_merge_names))
|
||||
|
||||
prev_var_type = None
|
||||
result = None
|
||||
|
||||
if initial_value is not None:
|
||||
prev_var_type = _verify_and_get_type(initial_value)
|
||||
result = initial_value
|
||||
|
||||
for var_name in var_merge_names:
|
||||
var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
|
||||
var_type = _verify_and_get_type(var_value)
|
||||
|
||||
if prev_var_type is None:
|
||||
prev_var_type = var_type
|
||||
elif prev_var_type != var_type:
|
||||
raise AnsibleError("Unable to merge, not all variables are of the same type")
|
||||
|
||||
if result is None:
|
||||
result = var_value
|
||||
continue
|
||||
|
||||
if var_type == "dict":
|
||||
result = self._merge_dict(var_value, result, [var_name])
|
||||
else: # var_type == "list"
|
||||
result += var_value
|
||||
|
||||
return result
|
||||
|
||||
def _merge_dict(self, src, dest, path):
|
||||
for key, value in src.items():
|
||||
if isinstance(value, dict):
|
||||
node = dest.setdefault(key, {})
|
||||
self._merge_dict(value, node, path + [key])
|
||||
elif isinstance(value, list) and key in dest:
|
||||
dest[key] += value
|
||||
else:
|
||||
if (key in dest) and dest[key] != value:
|
||||
msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format(
|
||||
key, dest[key], value, ".".join(path))
|
||||
|
||||
if self._override == "error":
|
||||
raise AnsibleError(msg)
|
||||
if self._override == "warn":
|
||||
display.warning(msg)
|
||||
|
||||
dest[key] = value
|
||||
|
||||
return dest
|
||||
@@ -488,7 +488,7 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
rc, out, err = self._run(args)
|
||||
rc, out, err = self._run(args, ignore_errors=True)
|
||||
|
||||
return not bool(rc)
|
||||
|
||||
|
||||
@@ -209,7 +209,6 @@ import time
|
||||
import yaml
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.module_utils.common.file import FileLock
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.utils.display import Display
|
||||
@@ -217,6 +216,8 @@ from ansible.utils.encrypt import random_password
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible import constants as C
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils._filelock import FileLock
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
|
||||
@@ -73,8 +73,6 @@ _raw:
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
HAVE_REDIS = False
|
||||
try:
|
||||
import redis
|
||||
|
||||
109
plugins/module_utils/_filelock.py
Normal file
109
plugins/module_utils/_filelock.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
# NOTE:
|
||||
# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import fcntl
|
||||
import sys
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class LockTimeout(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FileLock:
|
||||
'''
|
||||
Currently FileLock is implemented via fcntl.flock on a lock file, however this
|
||||
behaviour may change in the future. Avoid mixing lock types fcntl.flock,
|
||||
fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
|
||||
unwanted and/or unexpected behaviour
|
||||
'''
|
||||
def __init__(self):
|
||||
self.lockfd = None
|
||||
|
||||
@contextmanager
|
||||
def lock_file(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Context for lock acquisition
|
||||
'''
|
||||
try:
|
||||
self.set_lock(path, tmpdir, lock_timeout)
|
||||
yield
|
||||
finally:
|
||||
self.unlock()
|
||||
|
||||
def set_lock(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Create a lock file based on path with flock to prevent other processes
|
||||
using given path.
|
||||
Please note that currently file locking only works when it's executed by
|
||||
the same user, I.E single user scenarios
|
||||
|
||||
:kw path: Path (file) to lock
|
||||
:kw tmpdir: Path where to place the temporary .lock file
|
||||
:kw lock_timeout:
|
||||
Wait n seconds for lock acquisition, fail if timeout is reached.
|
||||
0 = Do not wait, fail if lock cannot be acquired immediately,
|
||||
Default is None, wait indefinitely until lock is released.
|
||||
:returns: True
|
||||
'''
|
||||
lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
|
||||
l_wait = 0.1
|
||||
r_exception = IOError
|
||||
if sys.version_info[0] == 3:
|
||||
r_exception = BlockingIOError
|
||||
|
||||
self.lockfd = open(lock_path, 'w')
|
||||
|
||||
if lock_timeout <= 0:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
|
||||
if lock_timeout:
|
||||
e_secs = 0
|
||||
while e_secs < lock_timeout:
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
except r_exception:
|
||||
time.sleep(l_wait)
|
||||
e_secs += l_wait
|
||||
continue
|
||||
|
||||
self.lockfd.close()
|
||||
raise LockTimeout('{0} sec'.format(lock_timeout))
|
||||
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
|
||||
return True
|
||||
|
||||
def unlock(self):
|
||||
'''
|
||||
Make sure lock file is available for everyone and Unlock the file descriptor
|
||||
locked by set_lock
|
||||
|
||||
:returns: True
|
||||
'''
|
||||
if not self.lockfd:
|
||||
return True
|
||||
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_UN)
|
||||
self.lockfd.close()
|
||||
except ValueError: # file wasn't opened, let context manager fail gracefully
|
||||
pass
|
||||
|
||||
return True
|
||||
258
plugins/module_utils/_stormssh.py
Normal file
258
plugins/module_utils/_stormssh.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is based on
|
||||
# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py
|
||||
# Copyright (C) <2013> <Emre Yilmaz>
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from operator import itemgetter
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from paramiko.config import SSHConfig
|
||||
except ImportError:
|
||||
SSHConfig = object
|
||||
HAS_PARAMIKO = False
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
|
||||
else:
|
||||
HAS_PARAMIKO = True
|
||||
PARAMIKO_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class StormConfig(SSHConfig):
|
||||
def parse(self, file_obj):
|
||||
"""
|
||||
Read an OpenSSH config from the given file object.
|
||||
@param file_obj: a file-like object to read the config file from
|
||||
@type file_obj: file
|
||||
"""
|
||||
order = 1
|
||||
host = {"host": ['*'], "config": {}, }
|
||||
for line in file_obj:
|
||||
line = line.rstrip('\n').lstrip()
|
||||
if line == '':
|
||||
self._config.append({
|
||||
'type': 'empty_line',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
self._config.append({
|
||||
'type': 'comment',
|
||||
'value': line,
|
||||
'host': '',
|
||||
'order': order,
|
||||
})
|
||||
order += 1
|
||||
continue
|
||||
|
||||
if '=' in line:
|
||||
# Ensure ProxyCommand gets properly split
|
||||
if line.lower().strip().startswith('proxycommand'):
|
||||
proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
|
||||
match = proxy_re.match(line)
|
||||
key, value = match.group(1).lower(), match.group(2)
|
||||
else:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip().lower()
|
||||
else:
|
||||
# find first whitespace, and split there
|
||||
i = 0
|
||||
while (i < len(line)) and not line[i].isspace():
|
||||
i += 1
|
||||
if i == len(line):
|
||||
raise Exception('Unparsable line: %r' % line)
|
||||
key = line[:i].lower()
|
||||
value = line[i:].lstrip()
|
||||
if key == 'host':
|
||||
self._config.append(host)
|
||||
value = value.split()
|
||||
host = {
|
||||
key: value,
|
||||
'config': {},
|
||||
'type': 'entry',
|
||||
'order': order
|
||||
}
|
||||
order += 1
|
||||
elif key in ['identityfile', 'localforward', 'remoteforward']:
|
||||
if key in host['config']:
|
||||
host['config'][key].append(value)
|
||||
else:
|
||||
host['config'][key] = [value]
|
||||
elif key not in host['config']:
|
||||
host['config'].update({key: value})
|
||||
self._config.append(host)
|
||||
|
||||
|
||||
class ConfigParser(object):
|
||||
"""
|
||||
Config parser for ~/.ssh/config files.
|
||||
"""
|
||||
|
||||
def __init__(self, ssh_config_file=None):
|
||||
if not ssh_config_file:
|
||||
ssh_config_file = self.get_default_ssh_config_file()
|
||||
|
||||
self.defaults = {}
|
||||
|
||||
self.ssh_config_file = ssh_config_file
|
||||
|
||||
if not os.path.exists(self.ssh_config_file):
|
||||
if not os.path.exists(os.path.dirname(self.ssh_config_file)):
|
||||
os.makedirs(os.path.dirname(self.ssh_config_file))
|
||||
open(self.ssh_config_file, 'w+').close()
|
||||
os.chmod(self.ssh_config_file, 0o600)
|
||||
|
||||
self.config_data = []
|
||||
|
||||
def get_default_ssh_config_file(self):
|
||||
return os.path.expanduser("~/.ssh/config")
|
||||
|
||||
def load(self):
|
||||
config = StormConfig()
|
||||
|
||||
with open(self.ssh_config_file) as fd:
|
||||
config.parse(fd)
|
||||
|
||||
for entry in config.__dict__.get("_config"):
|
||||
if entry.get("host") == ["*"]:
|
||||
self.defaults.update(entry.get("config"))
|
||||
|
||||
if entry.get("type") in ["comment", "empty_line"]:
|
||||
self.config_data.append(entry)
|
||||
continue
|
||||
|
||||
host_item = {
|
||||
'host': entry["host"][0],
|
||||
'options': entry.get("config"),
|
||||
'type': 'entry',
|
||||
'order': entry.get("order", 0),
|
||||
}
|
||||
|
||||
if len(entry["host"]) > 1:
|
||||
host_item.update({
|
||||
'host': " ".join(entry["host"]),
|
||||
})
|
||||
# minor bug in paramiko.SSHConfig that duplicates
|
||||
# "Host *" entries.
|
||||
if entry.get("config") and len(entry.get("config")) > 0:
|
||||
self.config_data.append(host_item)
|
||||
|
||||
return self.config_data
|
||||
|
||||
def add_host(self, host, options):
|
||||
self.config_data.append({
|
||||
'host': host,
|
||||
'options': options,
|
||||
'order': self.get_last_index(),
|
||||
})
|
||||
|
||||
return self
|
||||
|
||||
def update_host(self, host, options, use_regex=False):
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host or \
|
||||
(use_regex and re.match(host, host_entry.get("host"))):
|
||||
|
||||
if 'deleted_fields' in options:
|
||||
deleted_fields = options.pop("deleted_fields")
|
||||
for deleted_field in deleted_fields:
|
||||
del self.config_data[index]["options"][deleted_field]
|
||||
|
||||
self.config_data[index]["options"].update(options)
|
||||
|
||||
return self
|
||||
|
||||
def search_host(self, search_string):
|
||||
results = []
|
||||
for host_entry in self.config_data:
|
||||
if host_entry.get("type") != 'entry':
|
||||
continue
|
||||
if host_entry.get("host") == "*":
|
||||
continue
|
||||
|
||||
searchable_information = host_entry.get("host")
|
||||
for key, value in host_entry.get("options").items():
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
if isinstance(value, int):
|
||||
value = str(value)
|
||||
|
||||
searchable_information += " " + value
|
||||
|
||||
if search_string in searchable_information:
|
||||
results.append(host_entry)
|
||||
|
||||
return results
|
||||
|
||||
def delete_host(self, host):
|
||||
found = 0
|
||||
for index, host_entry in enumerate(self.config_data):
|
||||
if host_entry.get("host") == host:
|
||||
del self.config_data[index]
|
||||
found += 1
|
||||
|
||||
if found == 0:
|
||||
raise ValueError('No host found')
|
||||
return self
|
||||
|
||||
def delete_all_hosts(self):
|
||||
self.config_data = []
|
||||
self.write_to_ssh_config()
|
||||
|
||||
return self
|
||||
|
||||
def dump(self):
|
||||
if len(self.config_data) < 1:
|
||||
return
|
||||
|
||||
file_content = ""
|
||||
self.config_data = sorted(self.config_data, key=itemgetter("order"))
|
||||
|
||||
for host_item in self.config_data:
|
||||
if host_item.get("type") in ['comment', 'empty_line']:
|
||||
file_content += host_item.get("value") + "\n"
|
||||
continue
|
||||
host_item_content = "Host {0}\n".format(host_item.get("host"))
|
||||
for key, value in host_item.get("options").items():
|
||||
if isinstance(value, list):
|
||||
sub_content = ""
|
||||
for value_ in value:
|
||||
sub_content += " {0} {1}\n".format(
|
||||
key, value_
|
||||
)
|
||||
host_item_content += sub_content
|
||||
else:
|
||||
host_item_content += " {0} {1}\n".format(
|
||||
key, value
|
||||
)
|
||||
file_content += host_item_content
|
||||
|
||||
return file_content
|
||||
|
||||
def write_to_ssh_config(self):
|
||||
with open(self.ssh_config_file, 'w+') as f:
|
||||
data = self.dump()
|
||||
if data:
|
||||
f.write(data)
|
||||
return self
|
||||
|
||||
def get_last_index(self):
|
||||
last_index = 0
|
||||
indexes = []
|
||||
for item in self.config_data:
|
||||
if item.get("order"):
|
||||
indexes.append(item.get("order"))
|
||||
if len(indexes) > 0:
|
||||
last_index = max(indexes)
|
||||
|
||||
return last_index
|
||||
464
plugins/module_utils/btrfs.py
Normal file
464
plugins/module_utils/btrfs.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def normalize_subvolume_path(path):
|
||||
"""
|
||||
Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
|
||||
In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
|
||||
"""
|
||||
fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
|
||||
result = re.sub(r'/+$', '', re.sub(r'/+', '/', '/' + fstree_stripped))
|
||||
return result if len(result) > 0 else '/'
|
||||
|
||||
|
||||
class BtrfsModuleException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BtrfsCommands(object):
|
||||
|
||||
"""
|
||||
Provides access to a subset of the Btrfs command line
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs = self.__module.get_bin_path("btrfs", required=True)
|
||||
|
||||
def filesystem_show(self):
|
||||
command = "%s filesystem show -d" % (self.__btrfs)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.strip() for x in result[1].splitlines()]
|
||||
filesystems = []
|
||||
current = None
|
||||
for line in stdout:
|
||||
if line.startswith('Label'):
|
||||
current = self.__parse_filesystem(line)
|
||||
filesystems.append(current)
|
||||
elif line.startswith('devid'):
|
||||
current['devices'].append(self.__parse_filesystem_device(line))
|
||||
return filesystems
|
||||
|
||||
def __parse_filesystem(self, line):
|
||||
label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
|
||||
id = re.sub(r'^.*uuid:\s*', '', line)
|
||||
|
||||
filesystem = {}
|
||||
filesystem['label'] = label.strip("'") if label != 'none' else None
|
||||
filesystem['uuid'] = id
|
||||
filesystem['devices'] = []
|
||||
filesystem['mountpoints'] = []
|
||||
filesystem['subvolumes'] = []
|
||||
filesystem['default_subvolid'] = None
|
||||
return filesystem
|
||||
|
||||
def __parse_filesystem_device(self, line):
|
||||
return re.sub(r'^.*path\s', '', line)
|
||||
|
||||
def subvolumes_list(self, filesystem_path):
|
||||
command = "%s subvolume list -tap %s" % (self.__btrfs, filesystem_path)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.split('\t') for x in result[1].splitlines()]
|
||||
subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
|
||||
if len(stdout) > 2:
|
||||
subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
|
||||
return subvolumes
|
||||
|
||||
def __parse_subvolume_list_record(self, item):
|
||||
return {
|
||||
'id': int(item[0]),
|
||||
'parent': int(item[2]),
|
||||
'path': normalize_subvolume_path(item[5]),
|
||||
}
|
||||
|
||||
def subvolume_get_default(self, filesystem_path):
|
||||
command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
# ID [n] ...
|
||||
return int(result[1].strip().split()[1])
|
||||
|
||||
def subvolume_set_default(self, filesystem_path, subvolume_id):
|
||||
command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_create(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_snapshot(self, snapshot_source, snapshot_destination):
|
||||
command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_delete(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
|
||||
class BtrfsInfoProvider(object):
|
||||
|
||||
"""
|
||||
Utility providing details of the currently available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True)
|
||||
|
||||
def get_filesystems(self):
|
||||
filesystems = self.__btrfs_api.filesystem_show()
|
||||
mountpoints = self.__find_mountpoints()
|
||||
for filesystem in filesystems:
|
||||
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
|
||||
filesystem['mountpoints'] = device_mountpoints
|
||||
|
||||
if len(device_mountpoints) > 0:
|
||||
|
||||
# any path within the filesystem can be used to query metadata
|
||||
mountpoint = device_mountpoints[0]['mountpoint']
|
||||
filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
|
||||
filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
|
||||
|
||||
return filesystems
|
||||
|
||||
def get_mountpoints(self, filesystem_devices):
|
||||
mountpoints = self.__find_mountpoints()
|
||||
return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices)
|
||||
|
||||
def get_subvolumes(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolumes_list(filesystem_path)
|
||||
|
||||
def get_default_subvolume_id(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolume_get_default(filesystem_path)
|
||||
|
||||
def __filter_mountpoints_for_devices(self, mountpoints, devices):
|
||||
return [m for m in mountpoints if (m['device'] in devices)]
|
||||
|
||||
def __find_mountpoints(self):
|
||||
command = "%s -t btrfs -nvP" % self.__findmnt_path
|
||||
result = self.__module.run_command(command)
|
||||
mountpoints = []
|
||||
if result[0] == 0:
|
||||
lines = result[1].splitlines()
|
||||
for line in lines:
|
||||
mountpoint = self.__parse_mountpoint_pairs(line)
|
||||
mountpoints.append(mountpoint)
|
||||
return mountpoints
|
||||
|
||||
def __parse_mountpoint_pairs(self, line):
|
||||
pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
|
||||
match = pattern.search(line)
|
||||
if match is not None:
|
||||
groups = match.groupdict()
|
||||
|
||||
return {
|
||||
'mountpoint': groups['target'],
|
||||
'device': groups['source'],
|
||||
'subvolid': self.__extract_mount_subvolid(groups['options']),
|
||||
}
|
||||
else:
|
||||
raise BtrfsModuleException("Failed to parse findmnt result for line: '%s'" % line)
|
||||
|
||||
def __extract_mount_subvolid(self, mount_options):
|
||||
for option in mount_options.split(','):
|
||||
if option.startswith('subvolid='):
|
||||
return int(option[len('subvolid='):])
|
||||
raise BtrfsModuleException("Failed to find subvolid for mountpoint in options '%s'" % mount_options)
|
||||
|
||||
|
||||
class BtrfsSubvolume(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs subvolume
|
||||
"""
|
||||
|
||||
def __init__(self, filesystem, subvolume_id):
|
||||
self.__filesystem = filesystem
|
||||
self.__subvolume_id = subvolume_id
|
||||
|
||||
def get_filesystem(self):
|
||||
return self.__filesystem
|
||||
|
||||
def is_mounted(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
return mountpoints is not None and len(mountpoints) > 0
|
||||
|
||||
def is_filesystem_root(self):
|
||||
return 5 == self.__subvolume_id
|
||||
|
||||
def is_filesystem_default(self):
|
||||
return self.__filesystem.default_subvolid == self.__subvolume_id
|
||||
|
||||
def get_mounted_path(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
if mountpoints is not None and len(mountpoints) > 0:
|
||||
return mountpoints[0]
|
||||
elif self.parent is not None:
|
||||
parent = self.__filesystem.get_subvolume_by_id(self.parent)
|
||||
parent_path = parent.get_mounted_path()
|
||||
if parent_path is not None:
|
||||
return parent_path + os.path.sep + self.name
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_mountpoints(self):
|
||||
return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id)
|
||||
|
||||
def get_child_relative_path(self, absolute_child_path):
|
||||
"""
|
||||
Get the relative path from this subvolume to the named child subvolume.
|
||||
The provided parameter is expected to be normalized as by normalize_subvolume_path.
|
||||
"""
|
||||
path = self.path
|
||||
if absolute_child_path.startswith(path):
|
||||
relative = absolute_child_path[len(path):]
|
||||
return re.sub(r'^/*', '', relative)
|
||||
else:
|
||||
raise BtrfsModuleException("Path '%s' doesn't start with '%s'" % (absolute_child_path, path))
|
||||
|
||||
def get_parent_subvolume(self):
|
||||
parent_id = self.parent
|
||||
return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None
|
||||
|
||||
def get_child_subvolumes(self):
|
||||
return self.__filesystem.get_subvolume_children(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def __info(self):
|
||||
return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.__subvolume_id
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.path.split('/').pop()
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.__info['path']
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self.__info['parent']
|
||||
|
||||
|
||||
class BtrfsFilesystem(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs filesystem
|
||||
"""
|
||||
|
||||
def __init__(self, info, provider, module):
|
||||
self.__provider = provider
|
||||
|
||||
# constant for module execution
|
||||
self.__uuid = info['uuid']
|
||||
self.__label = info['label']
|
||||
self.__devices = info['devices']
|
||||
|
||||
# refreshable
|
||||
self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
|
||||
self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
|
||||
self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.__uuid
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.__label
|
||||
|
||||
@property
|
||||
def default_subvolid(self):
|
||||
return self.__default_subvolid
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
return list(self.__devices)
|
||||
|
||||
def refresh(self):
|
||||
self.refresh_mountpoints()
|
||||
self.refresh_subvolumes()
|
||||
self.refresh_default_subvolume()
|
||||
|
||||
def refresh_mountpoints(self):
|
||||
mountpoints = self.__provider.get_mountpoints(list(self.__devices))
|
||||
self.__update_mountpoints(mountpoints)
|
||||
|
||||
def __update_mountpoints(self, mountpoints):
|
||||
self.__mountpoints = dict()
|
||||
for i in mountpoints:
|
||||
subvolid = i['subvolid']
|
||||
mountpoint = i['mountpoint']
|
||||
if subvolid not in self.__mountpoints:
|
||||
self.__mountpoints[subvolid] = []
|
||||
self.__mountpoints[subvolid].append(mountpoint)
|
||||
|
||||
def refresh_subvolumes(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
subvolumes = self.__provider.get_subvolumes(filesystem_path)
|
||||
self.__update_subvolumes(subvolumes)
|
||||
|
||||
def __update_subvolumes(self, subvolumes):
|
||||
# TODO strategy for retaining information on deleted subvolumes?
|
||||
self.__subvolumes = dict()
|
||||
for subvolume in subvolumes:
|
||||
self.__subvolumes[subvolume['id']] = subvolume
|
||||
|
||||
def refresh_default_subvolume(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path)
|
||||
|
||||
def contains_device(self, device):
|
||||
return device in self.__devices
|
||||
|
||||
def contains_subvolume(self, subvolume):
|
||||
return self.get_subvolume_by_name(subvolume) is not None
|
||||
|
||||
def get_subvolume_by_id(self, subvolume_id):
|
||||
return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_info_for_id(self, subvolume_id):
|
||||
return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_by_name(self, subvolume):
|
||||
for subvolume_info in self.__subvolumes.values():
|
||||
if subvolume_info['path'] == subvolume:
|
||||
return BtrfsSubvolume(self, subvolume_info['id'])
|
||||
return None
|
||||
|
||||
def get_any_mountpoint(self):
|
||||
for subvol_mountpoints in self.__mountpoints.values():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return subvol_mountpoints[0]
|
||||
# maybe error?
|
||||
return None
|
||||
|
||||
def get_any_mounted_subvolume(self):
|
||||
for subvolid, subvol_mountpoints in self.__mountpoints.items():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return self.get_subvolume_by_id(subvolid)
|
||||
return None
|
||||
|
||||
def get_mountpoints_by_subvolume_id(self, subvolume_id):
|
||||
return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else []
|
||||
|
||||
def get_nearest_subvolume(self, subvolume):
|
||||
"""Return the identified subvolume if existing, else the closest matching parent"""
|
||||
subvolumes_by_path = self.__get_subvolumes_by_path()
|
||||
while len(subvolume) > 1:
|
||||
if subvolume in subvolumes_by_path:
|
||||
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
|
||||
else:
|
||||
subvolume = re.sub(r'/[^/]+$', '', subvolume)
|
||||
|
||||
return BtrfsSubvolume(self, 5)
|
||||
|
||||
def get_mountpath_as_child(self, subvolume_name):
|
||||
"""Find a path to the target subvolume through a mounted ancestor"""
|
||||
nearest = self.get_nearest_subvolume(subvolume_name)
|
||||
if nearest.path == subvolume_name:
|
||||
nearest = nearest.get_parent_subvolume()
|
||||
if nearest is None or nearest.get_mounted_path() is None:
|
||||
raise BtrfsModuleException("Failed to find a path '%s' through a mounted parent subvolume" % subvolume_name)
|
||||
else:
|
||||
return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
|
||||
|
||||
def get_subvolume_children(self, subvolume_id):
|
||||
return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
|
||||
|
||||
def __get_subvolumes_by_path(self):
|
||||
result = {}
|
||||
for s in self.__subvolumes.values():
|
||||
path = s['path']
|
||||
result[path] = s
|
||||
return result
|
||||
|
||||
def is_mounted(self):
|
||||
return self.__mountpoints is not None and len(self.__mountpoints) > 0
|
||||
|
||||
def get_summary(self):
|
||||
subvolumes = []
|
||||
sources = self.__subvolumes.values() if self.__subvolumes is not None else []
|
||||
for subvolume in sources:
|
||||
id = subvolume['id']
|
||||
subvolumes.append({
|
||||
'id': id,
|
||||
'path': subvolume['path'],
|
||||
'parent': subvolume['parent'],
|
||||
'mountpoints': self.get_mountpoints_by_subvolume_id(id),
|
||||
})
|
||||
|
||||
return {
|
||||
'default_subvolume': self.__default_subvolid,
|
||||
'devices': self.__devices,
|
||||
'label': self.__label,
|
||||
'uuid': self.__uuid,
|
||||
'subvolumes': subvolumes,
|
||||
}
|
||||
|
||||
|
||||
class BtrfsFilesystemsProvider(object):
|
||||
|
||||
"""
|
||||
Provides methods to query available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__provider = BtrfsInfoProvider(module)
|
||||
self.__filesystems = None
|
||||
|
||||
def get_matching_filesystem(self, criteria):
|
||||
if criteria['device'] is not None:
|
||||
criteria['device'] = os.path.realpath(criteria['device'])
|
||||
|
||||
self.__check_init()
|
||||
matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
|
||||
if len(matching) == 1:
|
||||
return matching[0]
|
||||
else:
|
||||
raise BtrfsModuleException("Found %d filesystems matching criteria uuid=%s label=%s device=%s" % (
|
||||
len(matching),
|
||||
criteria['uuid'],
|
||||
criteria['label'],
|
||||
criteria['device']
|
||||
))
|
||||
|
||||
def __filesystem_matches_criteria(self, filesystem, criteria):
|
||||
return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
|
||||
(criteria['label'] is None or filesystem.label == criteria['label']) and
|
||||
(criteria['device'] is None or filesystem.contains_device(criteria['device'])))
|
||||
|
||||
def get_filesystem_for_device(self, device):
|
||||
real_device = os.path.realpath(device)
|
||||
self.__check_init()
|
||||
for fs in self.__filesystems.values():
|
||||
if fs.contains_device(real_device):
|
||||
return fs
|
||||
return None
|
||||
|
||||
def get_filesystems(self):
|
||||
self.__check_init()
|
||||
return list(self.__filesystems.values())
|
||||
|
||||
def __check_init(self):
|
||||
if self.__filesystems is None:
|
||||
self.__filesystems = dict()
|
||||
for f in self.__provider.get_filesystems():
|
||||
uuid = f['uuid']
|
||||
self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)
|
||||
@@ -50,7 +50,7 @@ class _Dependency(object):
|
||||
def failed(self):
|
||||
return self.state == 1
|
||||
|
||||
def verify(self, module):
|
||||
def validate(self, module):
|
||||
if self.failed:
|
||||
module.fail_json(msg=self.message, exception=self.trace)
|
||||
|
||||
@@ -71,20 +71,28 @@ def declare(name, *args, **kwargs):
|
||||
_deps[name] = dep
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
def _select_names(spec):
|
||||
dep_names = sorted(_deps)
|
||||
|
||||
if spec is not None:
|
||||
if spec:
|
||||
if spec.startswith("-"):
|
||||
spec_split = spec[1:].split(":")
|
||||
for d in spec_split:
|
||||
dep_names.remove(d)
|
||||
else:
|
||||
spec_split = spec[1:].split(":")
|
||||
spec_split = spec.split(":")
|
||||
dep_names = []
|
||||
for d in spec_split:
|
||||
_deps[d] # ensure it exists
|
||||
dep_names.append(d)
|
||||
|
||||
for dep in dep_names:
|
||||
_deps[dep].verify(module)
|
||||
return dep_names
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
for dep in _select_names(spec):
|
||||
_deps[dep].validate(module)
|
||||
|
||||
|
||||
def failed(spec=None):
|
||||
return any(_deps[d].failed for d in _select_names(spec))
|
||||
|
||||
@@ -19,15 +19,16 @@ import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
# (TODO: remove AnsibleModule from next line!)
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from os.path import expanduser
|
||||
from uuid import UUID
|
||||
|
||||
LIBCLOUD_IMP_ERR = None
|
||||
try:
|
||||
from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
|
||||
from libcloud.compute.base import Node, NodeLocation
|
||||
from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus # noqa: F401, pylint: disable=unused-import
|
||||
from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.compute.types import Provider
|
||||
|
||||
|
||||
@@ -42,12 +42,23 @@ URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
|
||||
URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
|
||||
URL_GROUPS = "{url}/admin/realms/{realm}/groups"
|
||||
URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
|
||||
URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children"
|
||||
|
||||
URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes"
|
||||
URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes"
|
||||
URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}"
|
||||
URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes"
|
||||
URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes"
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
@@ -79,6 +90,9 @@ URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/ins
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -1162,6 +1176,131 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
|
||||
% (mapper_rep, realm, str(e)))
|
||||
|
||||
def get_default_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The default clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'default', client_id)
|
||||
|
||||
def get_optional_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The optinal clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
|
||||
|
||||
def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param url_template the template for the right type
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The clientscopes of the specified type of this realm
|
||||
"""
|
||||
if client_id is None:
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
|
||||
else:
|
||||
cid = self.get_client_id(client_id=client_id, realm=realm)
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
|
||||
|
||||
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
|
||||
"""Decides which url to use.
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
if client_id is None:
|
||||
if scope_type == "default":
|
||||
return URL_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_OPTIONAL_CLIENTSCOPE
|
||||
else:
|
||||
if scope_type == "default":
|
||||
return URL_CLIENT_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_CLIENT_OPTIONAL_CLIENTSCOPE
|
||||
|
||||
def add_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'add')
|
||||
|
||||
def add_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'add')
|
||||
|
||||
def delete_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'delete')
|
||||
|
||||
def delete_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'delete')
|
||||
|
||||
def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'):
|
||||
""" Delete or add a clientscope of type.
|
||||
:param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
|
||||
:param client_id: The ID of the clientscope (preferred to name).
|
||||
:param scope_type 'default' or 'optional'
|
||||
:param realm: The realm in which this group resides, default "master".
|
||||
"""
|
||||
cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm)
|
||||
# should have a good cid by here.
|
||||
clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
|
||||
try:
|
||||
method = 'PUT' if action == "add" else 'DELETE'
|
||||
return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
except Exception as e:
|
||||
place = 'realm' if client_id is None else 'client ' + client_id
|
||||
self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
|
||||
|
||||
def create_clientsecret(self, id, realm="master"):
|
||||
""" Generate a new client secret by id
|
||||
|
||||
@@ -1249,7 +1388,7 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
|
||||
% (gid, realm, str(e)))
|
||||
|
||||
def get_group_by_name(self, name, realm="master"):
|
||||
def get_group_by_name(self, name, realm="master", parents=None):
|
||||
""" Fetch a keycloak group within a realm based on its name.
|
||||
|
||||
The Keycloak API does not allow filtering of the Groups resource by name.
|
||||
@@ -1259,10 +1398,19 @@ class KeycloakAPI(object):
|
||||
If the group does not exist, None is returned.
|
||||
:param name: Name of the group to fetch.
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
:param parents: Optional list of parents when group to look for is a subgroup
|
||||
"""
|
||||
groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
all_groups = self.get_groups(realm=realm)
|
||||
if parents:
|
||||
parent = self.get_subgroup_direct_parent(parents, realm)
|
||||
|
||||
if not parent:
|
||||
return None
|
||||
|
||||
all_groups = parent['subGroups']
|
||||
else:
|
||||
all_groups = self.get_groups(realm=realm)
|
||||
|
||||
for group in all_groups:
|
||||
if group['name'] == name:
|
||||
@@ -1274,6 +1422,102 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
|
||||
% (name, realm, str(e)))
|
||||
|
||||
def _get_normed_group_parent(self, parent):
|
||||
""" Converts parent dict information into a more easy to use form.
|
||||
|
||||
:param parent: parent describing dict
|
||||
"""
|
||||
if parent['id']:
|
||||
return (parent['id'], True)
|
||||
|
||||
return (parent['name'], False)
|
||||
|
||||
def get_subgroup_by_chain(self, name_chain, realm="master"):
|
||||
""" Access a subgroup API object by walking down a given name/id chain.
|
||||
|
||||
Groups can be given either as by name or by ID, the first element
|
||||
must either be a toplvl group or given as ID, all parents must exist.
|
||||
|
||||
If the group cannot be found, None is returned.
|
||||
:param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
"""
|
||||
cp = name_chain[0]
|
||||
|
||||
# for 1st parent in chain we must query the server
|
||||
cp, is_id = self._get_normed_group_parent(cp)
|
||||
|
||||
if is_id:
|
||||
tmp = self.get_group_by_groupid(cp, realm=realm)
|
||||
else:
|
||||
# given as name, assume toplvl group
|
||||
tmp = self.get_group_by_name(cp, realm=realm)
|
||||
|
||||
if not tmp:
|
||||
return None
|
||||
|
||||
for p in name_chain[1:]:
|
||||
for sg in tmp['subGroups']:
|
||||
pv, is_id = self._get_normed_group_parent(p)
|
||||
|
||||
if is_id:
|
||||
cmpkey = "id"
|
||||
else:
|
||||
cmpkey = "name"
|
||||
|
||||
if pv == sg[cmpkey]:
|
||||
tmp = sg
|
||||
break
|
||||
|
||||
if not tmp:
|
||||
return None
|
||||
|
||||
return tmp
|
||||
|
||||
def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None):
|
||||
""" Get keycloak direct parent group API object for a given chain of parents.
|
||||
|
||||
To succesfully work the API for subgroups we actually dont need
|
||||
to "walk the whole tree" for nested groups but only need to know
|
||||
the ID for the direct predecessor of current subgroup. This
|
||||
method will guarantee us this information getting there with
|
||||
as minimal work as possible.
|
||||
|
||||
Note that given parent list can and might be incomplete at the
|
||||
upper levels as long as it starts with an ID instead of a name
|
||||
|
||||
If the group does not exist, None is returned.
|
||||
:param parents: Topdown ordered list of subgroup parents
|
||||
:param realm: Realm in which the group resides; default 'master'
|
||||
"""
|
||||
if children_to_resolve is None:
|
||||
# start recursion by reversing parents (in optimal cases
|
||||
# we dont need to walk the whole tree upwarts)
|
||||
parents = list(reversed(parents))
|
||||
children_to_resolve = []
|
||||
|
||||
if not parents:
|
||||
# walk complete parents list to the top, all names, no id's,
|
||||
# try to resolve it assuming list is complete and 1st
|
||||
# element is a toplvl group
|
||||
return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm)
|
||||
|
||||
cp = parents[0]
|
||||
unused, is_id = self._get_normed_group_parent(cp)
|
||||
|
||||
if is_id:
|
||||
# current parent is given as ID, we can stop walking
|
||||
# upwards searching for an entry point
|
||||
return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm)
|
||||
else:
|
||||
# current parent is given as name, it must be resolved
|
||||
# later, try next parent (recurse)
|
||||
children_to_resolve.append(cp)
|
||||
return self.get_subgroup_direct_parent(
|
||||
parents[1:],
|
||||
realm=realm, children_to_resolve=children_to_resolve
|
||||
)
|
||||
|
||||
def create_group(self, grouprep, realm="master"):
|
||||
""" Create a Keycloak group.
|
||||
|
||||
@@ -1288,6 +1532,34 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not create group %s in realm %s: %s"
|
||||
% (grouprep['name'], realm, str(e)))
|
||||
|
||||
def create_subgroup(self, parents, grouprep, realm="master"):
|
||||
""" Create a Keycloak subgroup.
|
||||
|
||||
:param parents: list of one or more parent groups
|
||||
:param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
parent_id = "---UNDETERMINED---"
|
||||
try:
|
||||
parent_id = self.get_subgroup_direct_parent(parents, realm)
|
||||
|
||||
if not parent_id:
|
||||
raise Exception(
|
||||
"Could not determine subgroup parent ID for given"
|
||||
" parent chain {0}. Assure that all parents exist"
|
||||
" already and the list is complete and properly"
|
||||
" ordered, starts with an ID or starts at the"
|
||||
" top level".format(parents)
|
||||
)
|
||||
|
||||
parent_id = parent_id["id"]
|
||||
url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id)
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(grouprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not create subgroup %s for parent group %s in realm %s: %s"
|
||||
% (grouprep['name'], parent_id, realm, str(e)))
|
||||
|
||||
def update_group(self, grouprep, realm="master"):
|
||||
""" Update an existing group.
|
||||
|
||||
@@ -1661,6 +1933,9 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(updatedExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
|
||||
(flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
|
||||
|
||||
@@ -1685,7 +1960,7 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
|
||||
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master'):
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
|
||||
""" Create new sublow on the flow
|
||||
|
||||
:param subflowName: name of the subflow to create
|
||||
@@ -1696,7 +1971,7 @@ class KeycloakAPI(object):
|
||||
newSubFlow = {}
|
||||
newSubFlow["alias"] = subflowName
|
||||
newSubFlow["provider"] = "registration-page-form"
|
||||
newSubFlow["type"] = "basic-flow"
|
||||
newSubFlow["type"] = flowType
|
||||
open_url(
|
||||
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
|
||||
url=self.baseurl,
|
||||
@@ -1731,8 +2006,11 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(newExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
|
||||
(flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e)))
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
|
||||
|
||||
def change_execution_priority(self, executionId, diff, realm='master'):
|
||||
""" Raise or lower execution priority of diff time
|
||||
@@ -2056,3 +2334,44 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
search_url = "%s/search?name=%s" % (url, quote(name))
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def create_authz_authorization_scope(self, payload, client_id, realm):
|
||||
"""Create an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def update_authz_authorization_scope(self, payload, id, client_id, realm):
|
||||
"""Update an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def remove_authz_authorization_scope(self, id, client_id, realm):
|
||||
"""Remove an authorization scope from a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
|
||||
|
||||
@@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
import time
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
@@ -228,3 +229,79 @@ class iLORedfishUtils(RedfishUtils):
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
|
||||
def get_server_poststate(self):
|
||||
# Get server details
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
server_data = response["data"]
|
||||
|
||||
if "Hpe" in server_data["Oem"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hpe"]["PostState"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hp"]["PostState"]
|
||||
}
|
||||
|
||||
def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
|
||||
# This method checks if OOB controller reboot is completed
|
||||
time.sleep(10)
|
||||
|
||||
# Check server poststate
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
count = int(max_polling_time / polling_interval)
|
||||
times = 0
|
||||
|
||||
# When server is powered OFF
|
||||
pcount = 0
|
||||
while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5:
|
||||
time.sleep(10)
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] not in ["PowerOff", "Off"]:
|
||||
break
|
||||
pcount = pcount + 1
|
||||
if state["server_poststate"] in ["PowerOff", "Off"]:
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server is powered OFF"
|
||||
}
|
||||
|
||||
# When server is not rebooting
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": False,
|
||||
"msg": "Server is not rebooting"
|
||||
}
|
||||
|
||||
while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": True,
|
||||
"msg": "Server reboot is completed"
|
||||
}
|
||||
time.sleep(polling_interval)
|
||||
times = times + 1
|
||||
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server Reboot has failed, server state: {state} ".format(state=state)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ from ansible_collections.community.general.plugins.module_utils.version import L
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests.exceptions
|
||||
import requests.exceptions # noqa: F401, pylint: disable=unused-import
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
@@ -25,7 +25,7 @@ INFLUXDB_IMP_ERR = None
|
||||
try:
|
||||
from influxdb import InfluxDBClient
|
||||
from influxdb import __version__ as influxdb_version
|
||||
from influxdb import exceptions
|
||||
from influxdb import exceptions # noqa: F401, pylint: disable=unused-import
|
||||
HAS_INFLUXDB = True
|
||||
except ImportError:
|
||||
INFLUXDB_IMP_ERR = traceback.format_exc()
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
import traceback
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
@@ -33,12 +34,14 @@ def gen_specs(**specs):
|
||||
specs.update({
|
||||
'bind_dn': dict(),
|
||||
'bind_pw': dict(default='', no_log=True),
|
||||
'ca_path': dict(type='path'),
|
||||
'dn': dict(required=True),
|
||||
'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
|
||||
'server_uri': dict(default='ldapi:///'),
|
||||
'start_tls': dict(default=False, type='bool'),
|
||||
'validate_certs': dict(default=True, type='bool'),
|
||||
'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
|
||||
'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'),
|
||||
})
|
||||
|
||||
return specs
|
||||
@@ -50,17 +53,22 @@ class LdapGeneric(object):
|
||||
self.module = module
|
||||
self.bind_dn = self.module.params['bind_dn']
|
||||
self.bind_pw = self.module.params['bind_pw']
|
||||
self.ca_path = self.module.params['ca_path']
|
||||
self.referrals_chasing = self.module.params['referrals_chasing']
|
||||
self.server_uri = self.module.params['server_uri']
|
||||
self.start_tls = self.module.params['start_tls']
|
||||
self.verify_cert = self.module.params['validate_certs']
|
||||
self.sasl_class = self.module.params['sasl_class']
|
||||
self.xorder_discovery = self.module.params['xorder_discovery']
|
||||
|
||||
# Establish connection
|
||||
self.connection = self._connect_to_ldap()
|
||||
|
||||
# Try to find the X_ORDERed version of the DN
|
||||
self.dn = self._find_dn()
|
||||
if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()):
|
||||
# Try to find the X_ORDERed version of the DN
|
||||
self.dn = self._find_dn()
|
||||
else:
|
||||
self.dn = self.module.params['dn']
|
||||
|
||||
def fail(self, msg, exn):
|
||||
self.module.fail_json(
|
||||
@@ -91,6 +99,9 @@ class LdapGeneric(object):
|
||||
if not self.verify_cert:
|
||||
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
|
||||
if self.ca_path:
|
||||
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path)
|
||||
|
||||
connection = ldap.initialize(self.server_uri)
|
||||
|
||||
if self.referrals_chasing == 'disabled':
|
||||
@@ -113,3 +124,8 @@ class LdapGeneric(object):
|
||||
self.fail("Cannot bind to the server.", e)
|
||||
|
||||
return connection
|
||||
|
||||
def _xorder_dn(self):
|
||||
# match X_ORDERed DNs
|
||||
regex = r"\w+=\{\d+\}.+"
|
||||
return re.match(regex, self.module.params['dn']) is not None
|
||||
|
||||
@@ -60,7 +60,7 @@ class LXDClient(object):
|
||||
self.cert_file = cert_file
|
||||
self.key_file = key_file
|
||||
parts = generic_urlparse(urlparse(self.url))
|
||||
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
ctx.load_cert_chain(cert_file, keyfile=key_file)
|
||||
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
|
||||
elif url.startswith('unix:'):
|
||||
|
||||
@@ -26,6 +26,7 @@ class Response(object):
|
||||
def __init__(self):
|
||||
self.content = None
|
||||
self.status_code = None
|
||||
self.stderr = None
|
||||
|
||||
def json(self):
|
||||
return json.loads(self.content)
|
||||
@@ -75,6 +76,10 @@ def memset_api_call(api_key, api_method, payload=None):
|
||||
msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
|
||||
else:
|
||||
msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
|
||||
except urllib_error.URLError as e:
|
||||
has_failed = True
|
||||
msg = "An URLError occured ({0})." . format(type(e))
|
||||
response.stderr = "{0}" . format(e)
|
||||
|
||||
if msg is None:
|
||||
msg = response.json()
|
||||
|
||||
@@ -9,7 +9,8 @@ __metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule
|
||||
# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
|
||||
@@ -8,12 +8,13 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( # noqa: F401, pylint: disable=unused-import
|
||||
ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401, pylint: disable=unused-import
|
||||
# pylint: disable-next=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception # noqa: F401
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict # noqa: F401, pylint: disable=unused-import
|
||||
|
||||
@@ -16,7 +16,8 @@ __metaclass__ = type
|
||||
import abc
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
# (TODO: remove next line!)
|
||||
import os # noqa: F401, pylint: disable=unused-import
|
||||
import traceback
|
||||
|
||||
HPE_ONEVIEW_IMP_ERR = None
|
||||
|
||||
@@ -45,6 +45,8 @@ def render(to_render):
|
||||
"""Converts dictionary to OpenNebula template."""
|
||||
def recurse(to_render):
|
||||
for key, value in sorted(to_render.items()):
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
|
||||
continue
|
||||
@@ -52,6 +54,9 @@ def render(to_render):
|
||||
for item in value:
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
|
||||
continue
|
||||
if isinstance(value, str):
|
||||
yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
continue
|
||||
yield '{0:}="{1:}"'.format(key, value)
|
||||
return '\n'.join(recurse(to_render))
|
||||
|
||||
|
||||
@@ -10,13 +10,14 @@ import logging
|
||||
import logging.config
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
# (TODO: remove next line!)
|
||||
from datetime import datetime # noqa: F401, pylint: disable=unused-import
|
||||
from operator import eq
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import yaml
|
||||
import yaml # noqa: F401, pylint: disable=unused-import
|
||||
|
||||
import oci
|
||||
from oci.constants import HEADER_NEXT_PAGE
|
||||
|
||||
@@ -32,12 +32,14 @@ def pipx_runner(module, command, **kwargs):
|
||||
state=fmt.as_map(_state_map),
|
||||
name=fmt.as_list(),
|
||||
name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])),
|
||||
install_apps=fmt.as_bool("--include-apps"),
|
||||
install_deps=fmt.as_bool("--include-deps"),
|
||||
inject_packages=fmt.as_list(),
|
||||
force=fmt.as_bool("--force"),
|
||||
include_injected=fmt.as_bool("--include-injected"),
|
||||
index_url=fmt.as_opt_val('--index-url'),
|
||||
python=fmt.as_opt_val('--python'),
|
||||
system_site_packages=fmt.as_bool("--system-site-packages"),
|
||||
_list=fmt.as_fixed(['list', '--include-injected', '--json']),
|
||||
editable=fmt.as_bool("--editable"),
|
||||
pip_args=fmt.as_opt_val('--pip-args'),
|
||||
|
||||
@@ -7,9 +7,12 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import atexit
|
||||
import time
|
||||
import re
|
||||
# (TODO: remove next line!)
|
||||
import atexit # noqa: F401, pylint: disable=unused-import
|
||||
# (TODO: remove next line!)
|
||||
import time # noqa: F401, pylint: disable=unused-import
|
||||
# (TODO: remove next line!)
|
||||
import re # noqa: F401, pylint: disable=unused-import
|
||||
import traceback
|
||||
|
||||
PROXMOXER_IMP_ERR = None
|
||||
@@ -22,7 +25,8 @@ except ImportError:
|
||||
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
# (TODO: remove next line!)
|
||||
from ansible.module_utils.common.text.converters import to_native # noqa: F401, pylint: disable=unused-import
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
|
||||
@@ -63,11 +63,7 @@ def puppet_runner(module):
|
||||
return cmd
|
||||
|
||||
def noop_func(v):
|
||||
_noop = cmd_runner_fmt.as_map({
|
||||
True: "--noop",
|
||||
False: "--no-noop",
|
||||
})
|
||||
return _noop(module.check_mode or v)
|
||||
return ["--noop"] if module.check_mode or v else ["--no-noop"]
|
||||
|
||||
_logdest_map = {
|
||||
"syslog": ["--logdest", "syslog"],
|
||||
@@ -96,6 +92,7 @@ def puppet_runner(module):
|
||||
confdir=cmd_runner_fmt.as_opt_val("--confdir"),
|
||||
environment=cmd_runner_fmt.as_opt_val("--environment"),
|
||||
tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]),
|
||||
skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
|
||||
certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
|
||||
noop=cmd_runner_fmt.as_func(noop_func),
|
||||
use_srv_records=cmd_runner_fmt.as_map({
|
||||
|
||||
@@ -21,13 +21,15 @@ except ImportError:
|
||||
|
||||
HAS_PURITY_FB = True
|
||||
try:
|
||||
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
|
||||
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
HAS_PURITY_FB = False
|
||||
|
||||
from functools import wraps
|
||||
# (TODO: remove next line!)
|
||||
from functools import wraps # noqa: F401, pylint: disable=unused-import
|
||||
from os import environ
|
||||
from os import path
|
||||
# (TODO: remove next line!)
|
||||
from os import path # noqa: F401, pylint: disable=unused-import
|
||||
import platform
|
||||
|
||||
VERSION = 1.2
|
||||
|
||||
@@ -3163,3 +3163,89 @@ class RedfishUtils(object):
|
||||
if resp['ret'] and resp['changed']:
|
||||
resp['msg'] = 'Modified session service'
|
||||
return resp
|
||||
|
||||
def verify_bios_attributes(self, bios_attributes):
|
||||
# This method verifies BIOS attributes against the provided input
|
||||
server_bios = self.get_multi_bios_attributes()
|
||||
if server_bios["ret"] is False:
|
||||
return server_bios
|
||||
|
||||
bios_dict = {}
|
||||
wrong_param = {}
|
||||
|
||||
# Verify bios_attributes with BIOS settings available in the server
|
||||
for key, value in bios_attributes.items():
|
||||
if key in server_bios["entries"][0][1]:
|
||||
if server_bios["entries"][0][1][key] != value:
|
||||
bios_dict.update({key: value})
|
||||
else:
|
||||
wrong_param.update({key: value})
|
||||
|
||||
if wrong_param:
|
||||
return {
|
||||
"ret": False,
|
||||
"msg": "Wrong parameters are provided: %s" % wrong_param
|
||||
}
|
||||
|
||||
if bios_dict:
|
||||
return {
|
||||
"ret": False,
|
||||
"msg": "BIOS parameters are not matching: %s" % bios_dict
|
||||
}
|
||||
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": False,
|
||||
"msg": "BIOS verification completed"
|
||||
}
|
||||
|
||||
def enable_secure_boot(self):
|
||||
# This function enable Secure Boot on an OOB controller
|
||||
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if response["ret"] is False:
|
||||
return response
|
||||
|
||||
server_details = response["data"]
|
||||
secure_boot_url = server_details["SecureBoot"]["@odata.id"]
|
||||
|
||||
response = self.get_request(self.root_uri + secure_boot_url)
|
||||
if response["ret"] is False:
|
||||
return response
|
||||
|
||||
body = {}
|
||||
body["SecureBootEnable"] = True
|
||||
|
||||
return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
|
||||
|
||||
def get_hpe_thermal_config(self):
|
||||
result = {}
|
||||
key = "Thermal"
|
||||
# Go through list
|
||||
for chassis_uri in self.chassis_uri_list:
|
||||
response = self.get_request(self.root_uri + chassis_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
oem = data.get['Oem']
|
||||
hpe = oem.get['Hpe']
|
||||
thermal_config = hpe.get('ThermalConfiguration')
|
||||
result["current_thermal_config"] = thermal_config
|
||||
return result
|
||||
|
||||
def get_hpe_fan_percent_min(self):
|
||||
result = {}
|
||||
key = "Thermal"
|
||||
# Go through list
|
||||
for chassis_uri in self.chassis_uri_list:
|
||||
response = self.get_request(self.root_uri + chassis_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
oem = data.get['Oem']
|
||||
hpe = oem.get['Hpe']
|
||||
fan_percent_min_config = hpe.get('FanPercentMinimum')
|
||||
result["fan_percent_min"] = fan_percent_min_config
|
||||
return result
|
||||
|
||||
@@ -81,12 +81,18 @@ def api_request(module, endpoint, data=None, method="GET"):
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
|
||||
if not content:
|
||||
return None, info
|
||||
else:
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
module.fail_json(
|
||||
msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info
|
||||
)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
|
||||
@@ -13,10 +13,10 @@ __metaclass__ = type
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
try:
|
||||
from ansible.module_utils.compat.version import LooseVersion
|
||||
from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
try:
|
||||
from distutils.version import LooseVersion
|
||||
from distutils.version import LooseVersion # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError as exc:
|
||||
msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
|
||||
raise_from(ImportError(msg), exc)
|
||||
|
||||
@@ -19,6 +19,13 @@ description:
|
||||
- If waiting for migrations is not desired, simply just poll until
|
||||
port 3000 if available or asinfo -v status returns ok
|
||||
author: "Albert Autin (@Alb0t)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
|
||||
@@ -18,6 +18,13 @@ author:
|
||||
short_description: Notify airbrake about app deployments
|
||||
description:
|
||||
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
project_id:
|
||||
description:
|
||||
|
||||
@@ -16,6 +16,13 @@ module: aix_devices
|
||||
short_description: Manages AIX devices
|
||||
description:
|
||||
- This module discovers, defines, removes and modifies attributes of AIX devices.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
attributes:
|
||||
description:
|
||||
@@ -101,7 +108,7 @@ EXAMPLES = r'''
|
||||
device: en1
|
||||
attributes:
|
||||
mtu: 900
|
||||
arp: off
|
||||
arp: 'off'
|
||||
state: available
|
||||
|
||||
- name: Configure IP, netmask and set en1 up.
|
||||
|
||||
@@ -19,6 +19,13 @@ description:
|
||||
- This module creates, removes, mount and unmount LVM and NFS file system for
|
||||
AIX using C(/etc/filesystems).
|
||||
- For LVM file systems is possible to resize a file system.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
account_subsystem:
|
||||
description:
|
||||
|
||||
@@ -11,11 +11,18 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Joris Weijters (@molekuul)
|
||||
- Joris Weijters (@molekuul)
|
||||
module: aix_inittab
|
||||
short_description: Manages the inittab on AIX
|
||||
description:
|
||||
- Manages the inittab on AIX.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
||||
@@ -11,11 +11,18 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Kairo Araujo (@kairoaraujo)
|
||||
- Kairo Araujo (@kairoaraujo)
|
||||
module: aix_lvg
|
||||
short_description: Manage LVM volume groups on AIX
|
||||
description:
|
||||
- This module creates, removes or resize volume groups on AIX LVM.
|
||||
- This module creates, removes or resize volume groups on AIX LVM.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
force:
|
||||
description:
|
||||
|
||||
@@ -17,6 +17,13 @@ module: aix_lvol
|
||||
short_description: Configure AIX LVM logical volumes
|
||||
description:
|
||||
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
vg:
|
||||
description:
|
||||
|
||||
@@ -20,6 +20,13 @@ seealso:
|
||||
- name: API documentation
|
||||
description: Documentation for Alerta API
|
||||
link: https://docs.alerta.io/api/reference.html#customers
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
customer:
|
||||
description:
|
||||
|
||||
@@ -31,6 +31,11 @@ short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS;
|
||||
description:
|
||||
- Create, start, stop, restart, modify or terminate ecs instances.
|
||||
- Add or remove ecs instances to/from security group.
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
@@ -252,6 +257,7 @@ requirements:
|
||||
- "footmark >= 1.19.0"
|
||||
extends_documentation_fragment:
|
||||
- community.general.alicloud
|
||||
- community.general.attributes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -33,6 +33,11 @@ description:
|
||||
The module must be called from within the ECS instance itself.
|
||||
- This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
version_added: 3.3.0
|
||||
# This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
|
||||
|
||||
options:
|
||||
name_prefix:
|
||||
description:
|
||||
|
||||
@@ -22,6 +22,13 @@ author:
|
||||
- Marius Rieder (@jiuka)
|
||||
- David Wittman (@DavidWittman)
|
||||
- Gabe Mulley (@mulby)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
||||
@@ -26,6 +26,13 @@ notes:
|
||||
If that one also fails, the module will fail.
|
||||
requirements:
|
||||
- Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
type:
|
||||
description:
|
||||
|
||||
@@ -20,6 +20,13 @@ description:
|
||||
status page has to be enabled and accessible, as this module relies on parsing
|
||||
this page. This module supports ansible check_mode, and requires BeautifulSoup
|
||||
python module.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
balancer_url_suffix:
|
||||
type: str
|
||||
|
||||
@@ -19,6 +19,13 @@ author:
|
||||
short_description: Enables/disables a module of the Apache2 webserver
|
||||
description:
|
||||
- Enables or disables a specified module of the Apache2 webserver.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
|
||||
@@ -19,6 +19,13 @@ short_description: Manages apk packages
|
||||
description:
|
||||
- Manages I(apk) packages for Alpine Linux.
|
||||
author: "Kevin Brebanov (@kbrebanov)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
available:
|
||||
description:
|
||||
|
||||
@@ -19,6 +19,13 @@ description:
|
||||
notes:
|
||||
- This module works on ALT based distros.
|
||||
- Does NOT support checkmode, due to a limitation in apt-repo tool.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
repo:
|
||||
description:
|
||||
|
||||
@@ -17,11 +17,17 @@ module: apt_rpm
|
||||
short_description: APT-RPM package manager
|
||||
description:
|
||||
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
package:
|
||||
description:
|
||||
- list of packages to install, upgrade or remove.
|
||||
required: true
|
||||
- List of packages to install, upgrade, or remove.
|
||||
aliases: [ name, pkg ]
|
||||
type: list
|
||||
elements: str
|
||||
@@ -33,9 +39,30 @@ options:
|
||||
type: str
|
||||
update_cache:
|
||||
description:
|
||||
- update the package database first C(apt-get update).
|
||||
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
|
||||
- Default is not to update the cache.
|
||||
type: bool
|
||||
default: false
|
||||
clean:
|
||||
description:
|
||||
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
|
||||
the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
|
||||
- Can be run as part of the package installation (clean runs before install) or as a separate step.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
dist_upgrade:
|
||||
description:
|
||||
- If true performs an C(apt-get dist-upgrade) to upgrade system.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
update_kernel:
|
||||
description:
|
||||
- If true performs an C(update-kernel) to upgrade kernel packages.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.5.0
|
||||
author:
|
||||
- Evgenii Terechkov (@evgkrsk)
|
||||
'''
|
||||
@@ -69,6 +96,16 @@ EXAMPLES = '''
|
||||
name: bar
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Run the equivalent of "apt-get clean" as a separate step
|
||||
community.general.apt_rpm:
|
||||
clean: true
|
||||
|
||||
- name: Perform cache update and complete system upgrade (includes kernel)
|
||||
community.general.apt_rpm:
|
||||
update_cache: true
|
||||
dist_upgrade: true
|
||||
update_kernel: true
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -77,6 +114,8 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
APT_PATH = "/usr/bin/apt-get"
|
||||
RPM_PATH = "/usr/bin/rpm"
|
||||
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
|
||||
UPDATE_KERNEL_ZERO = "\nTry to install new kernel "
|
||||
|
||||
|
||||
def query_package(module, name):
|
||||
@@ -97,14 +136,39 @@ def query_package_provides(module, name):
|
||||
|
||||
|
||||
def update_package_db(module):
|
||||
rc, out, err = module.run_command("%s update" % APT_PATH)
|
||||
rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (False, update_out)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not update package db: %s" % err)
|
||||
|
||||
def dir_size(module, path):
|
||||
total_size = 0
|
||||
for path, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
total_size += os.path.getsize(os.path.join(path, f))
|
||||
return total_size
|
||||
|
||||
|
||||
def clean(module):
|
||||
t = dir_size(module, "/var/cache/apt/archives")
|
||||
rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True)
|
||||
return (t != dir_size(module, "/var/cache/apt/archives"), out)
|
||||
|
||||
|
||||
def dist_upgrade(module):
|
||||
rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (APT_GET_ZERO not in out, out)
|
||||
|
||||
|
||||
def update_kernel(module):
|
||||
rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"})
|
||||
return (UPDATE_KERNEL_ZERO not in out, out)
|
||||
|
||||
|
||||
def remove_packages(module, packages):
|
||||
|
||||
if packages is None:
|
||||
return (False, "Empty package list")
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
@@ -112,7 +176,7 @@ def remove_packages(module, packages):
|
||||
if not query_package(module, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"})
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, err))
|
||||
@@ -120,13 +184,16 @@ def remove_packages(module, packages):
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
return (True, "removed %s package(s)" % remove_c)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
return (False, "package(s) already absent")
|
||||
|
||||
|
||||
def install_packages(module, pkgspec):
|
||||
|
||||
if pkgspec is None:
|
||||
return (False, "Empty package list")
|
||||
|
||||
packages = ""
|
||||
for package in pkgspec:
|
||||
if not query_package_provides(module, package):
|
||||
@@ -134,7 +201,7 @@ def install_packages(module, pkgspec):
|
||||
|
||||
if len(packages) != 0:
|
||||
|
||||
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
|
||||
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"})
|
||||
|
||||
installed = True
|
||||
for packages in pkgspec:
|
||||
@@ -145,9 +212,9 @@ def install_packages(module, pkgspec):
|
||||
if rc or not installed:
|
||||
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="%s present(s)" % packages)
|
||||
return (True, "%s present(s)" % packages)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
return (False, "Nothing to install")
|
||||
|
||||
|
||||
def main():
|
||||
@@ -155,7 +222,10 @@ def main():
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
|
||||
update_cache=dict(type='bool', default=False),
|
||||
package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
|
||||
clean=dict(type='bool', default=False),
|
||||
dist_upgrade=dict(type='bool', default=False),
|
||||
update_kernel=dict(type='bool', default=False),
|
||||
package=dict(type='list', elements='str', aliases=['name', 'pkg']),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -163,17 +233,39 @@ def main():
|
||||
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
|
||||
|
||||
p = module.params
|
||||
modified = False
|
||||
output = ""
|
||||
|
||||
if p['update_cache']:
|
||||
update_package_db(module)
|
||||
|
||||
if p['clean']:
|
||||
(m, out) = clean(module)
|
||||
modified = modified or m
|
||||
|
||||
if p['dist_upgrade']:
|
||||
(m, out) = dist_upgrade(module)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
if p['update_kernel']:
|
||||
(m, out) = update_kernel(module)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
packages = p['package']
|
||||
|
||||
if p['state'] in ['installed', 'present']:
|
||||
install_packages(module, packages)
|
||||
(m, out) = install_packages(module, packages)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
elif p['state'] in ['absent', 'removed']:
|
||||
remove_packages(module, packages)
|
||||
if p['state'] in ['absent', 'removed']:
|
||||
(m, out) = remove_packages(module, packages)
|
||||
modified = modified or m
|
||||
output += out
|
||||
|
||||
# Return total modification status and output of all commands
|
||||
module.exit_json(changed=modified, msg=output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -14,11 +14,18 @@ DOCUMENTATION = r'''
|
||||
---
|
||||
module: archive
|
||||
short_description: Creates a compressed archive of one or more files or trees
|
||||
extends_documentation_fragment: files
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- community.general.attributes
|
||||
description:
|
||||
- Creates or extends an archive.
|
||||
- The source and archive are on the remote host, and the archive I(is not) copied to the local host.
|
||||
- Source files can be deleted after archival by specifying I(remove=True).
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
@@ -191,6 +198,10 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
from ansible.module_utils import six
|
||||
|
||||
try: # python 3.2+
|
||||
from zipfile import BadZipFile # type: ignore[attr-defined]
|
||||
except ImportError: # older python
|
||||
from zipfile import BadZipfile as BadZipFile
|
||||
|
||||
LZMA_IMP_ERR = None
|
||||
if six.PY3:
|
||||
@@ -527,7 +538,7 @@ class ZipArchive(Archive):
|
||||
archive = zipfile.ZipFile(_to_native_ascii(path), 'r')
|
||||
checksums = set((info.filename, info.CRC) for info in archive.infolist())
|
||||
archive.close()
|
||||
except zipfile.BadZipfile:
|
||||
except BadZipFile:
|
||||
checksums = set()
|
||||
return checksums
|
||||
|
||||
@@ -597,7 +608,13 @@ class TarArchive(Archive):
|
||||
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
|
||||
# to their original names so only file checksum is returned
|
||||
f = self._open_compressed_file(_to_native_ascii(path), 'r')
|
||||
checksums = set([(b'', crc32(f.read()))])
|
||||
checksum = 0
|
||||
while True:
|
||||
chunk = f.read(16 * 1024 * 1024)
|
||||
if not chunk:
|
||||
break
|
||||
checksum = crc32(chunk, checksum)
|
||||
checksums = set([(b'', checksum)])
|
||||
f.close()
|
||||
except Exception:
|
||||
checksums = set()
|
||||
|
||||
@@ -22,6 +22,13 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- "python >= 2.6"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
|
||||
@@ -22,6 +22,13 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
revision:
|
||||
description:
|
||||
|
||||
@@ -22,6 +22,13 @@ notes:
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
|
||||
@@ -15,9 +15,16 @@ module: awall
|
||||
short_description: Manage awall policies
|
||||
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
|
||||
description:
|
||||
- This modules allows for enable/disable/activate of I(awall) policies.
|
||||
- This modules allows for enable/disable/activate of C(awall) policies.
|
||||
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
|
||||
and activates the configuration on the system.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
@@ -34,8 +41,11 @@ options:
|
||||
description:
|
||||
- Activate the new firewall rules.
|
||||
- Can be run with other steps or on its own.
|
||||
- Idempotency is affected if I(activate=true), as the module will always report a changed state.
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- At least one of I(name) and I(activate) is required.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
@@ -17,6 +17,13 @@ description:
|
||||
- Create, delete or activate ZFS boot environments.
|
||||
- Mount and unmount ZFS boot environments.
|
||||
author: Adam Števko (@xen0l)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
||||
@@ -14,6 +14,13 @@ description:
|
||||
- The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
|
||||
via the Incoming Robot integration.
|
||||
author: "Jiangge Zhang (@tonyseek)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
url:
|
||||
type: str
|
||||
|
||||
@@ -15,6 +15,13 @@ author: "Hagai Kariti (@hkariti)"
|
||||
short_description: Notify BigPanda about deployments
|
||||
description:
|
||||
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
component:
|
||||
type: str
|
||||
|
||||
@@ -18,6 +18,12 @@ author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
|
||||
@@ -18,6 +18,12 @@ author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
|
||||
@@ -19,8 +19,14 @@ author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
- community.general.attributes
|
||||
requirements:
|
||||
- paramiko
|
||||
- paramiko
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
|
||||
@@ -18,6 +18,12 @@ author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
|
||||
@@ -16,6 +16,13 @@ short_description: Manage bower packages with bower
|
||||
description:
|
||||
- Manage bower packages with bower
|
||||
author: "Michael Warkentin (@mwarkentin)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
|
||||
109
plugins/modules/btrfs_info.py
Normal file
109
plugins/modules/btrfs_info.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_info
|
||||
short_description: Query btrfs filesystem info
|
||||
version_added: "6.6.0"
|
||||
description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Query information about mounted btrfs filesystems
|
||||
community.general.btrfs_info:
|
||||
register: my_btrfs_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystems:
|
||||
description: Summaries of the current state for all btrfs filesystems found on the target host.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The id of the filesystem's default subvolume.
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
'''
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
provider = BtrfsFilesystemsProvider(module)
|
||||
filesystems = [x.get_summary() for x in provider.get_filesystems()]
|
||||
result = {
|
||||
"filesystems": filesystems,
|
||||
}
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
682
plugins/modules/btrfs_subvolume.py
Normal file
682
plugins/modules/btrfs_subvolume.py
Normal file
@@ -0,0 +1,682 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_subvolume
|
||||
short_description: Manage btrfs subvolumes
|
||||
version_added: "6.6.0"
|
||||
|
||||
description: Creates, updates and deletes btrfs subvolumes and snapshots.
|
||||
|
||||
options:
|
||||
automount:
|
||||
description:
|
||||
- Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
|
||||
type: bool
|
||||
default: false
|
||||
default:
|
||||
description:
|
||||
- Make the subvolume specified by I(name) the filesystem's default subvolume.
|
||||
type: bool
|
||||
default: false
|
||||
filesystem_device:
|
||||
description:
|
||||
- A block device contained within the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: path
|
||||
filesystem_label:
|
||||
description:
|
||||
- A descriptive label assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
filesystem_uuid:
|
||||
description:
|
||||
- A unique identifier assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the subvolume/snapshot to be targeted.
|
||||
required: true
|
||||
type: str
|
||||
recursive:
|
||||
description:
|
||||
- When true, indicates that parent/child subvolumes should be created/removedas necessary
|
||||
to complete the operation (for I(state=present) and I(state=absent) respectively).
|
||||
type: bool
|
||||
default: false
|
||||
snapshot_source:
|
||||
description:
|
||||
- Identifies the source subvolume for the created snapshot.
|
||||
- Infers that the created subvolume is a snapshot.
|
||||
type: str
|
||||
snapshot_conflict:
|
||||
description:
|
||||
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
|
||||
- C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
|
||||
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
|
||||
- C(clobber) - If a subvolume already exists at the requested location, delete it first.
|
||||
This option is not idempotent and will result in a new snapshot being generated on every execution.
|
||||
- C(error) - If a subvolume already exists at the requested location, return an error.
|
||||
This option is not idempotent and will result in an error on replay of the module.
|
||||
type: str
|
||||
choices: [ skip, clobber, error ]
|
||||
default: skip
|
||||
state:
|
||||
description:
|
||||
- Indicates the current state of the targeted subvolume.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
|
||||
notes:
|
||||
- If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
|
||||
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
|
||||
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: partial
|
||||
details:
|
||||
- In some scenarios it may erroneously report intermediate subvolumes being created.
|
||||
After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
|
||||
diff_mode:
|
||||
support: none
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Create a @home subvolume under the root subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the @home subvolume if it exists
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
state: absent
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume named @
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume and make it the new default subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
default: Yes
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystem:
|
||||
description:
|
||||
- A summary of the final state of the targeted btrfs filesystem.
|
||||
type: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The ID of the filesystem's default subvolume.
|
||||
returned: success and if filesystem is mounted
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
returned: success and if filesystem is mounted
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
modifications:
|
||||
description:
|
||||
- A list where each element describes a change made to the target btrfs filesystem.
|
||||
type: list
|
||||
returned: Success
|
||||
elements: str
|
||||
|
||||
target_subvolume_id:
|
||||
description:
|
||||
- The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
|
||||
type: int
|
||||
sample: 257
|
||||
returned: Success and subvolume exists after module execution
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
|
||||
class BtrfsSubvolumeModule(object):
|
||||
|
||||
__BTRFS_ROOT_SUBVOLUME = '/'
|
||||
__BTRFS_ROOT_SUBVOLUME_ID = 5
|
||||
__BTRFS_SUBVOLUME_INODE_NUMBER = 256
|
||||
|
||||
__CREATE_SUBVOLUME_OPERATION = 'create'
|
||||
__CREATE_SNAPSHOT_OPERATION = 'snapshot'
|
||||
__DELETE_SUBVOLUME_OPERATION = 'delete'
|
||||
__SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
|
||||
|
||||
__UNKNOWN_SUBVOLUME_ID = '?'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__provider = BtrfsFilesystemsProvider(module)
|
||||
|
||||
# module parameters
|
||||
name = self.module.params['name']
|
||||
self.__name = normalize_subvolume_path(name) if name is not None else None
|
||||
self.__state = self.module.params['state']
|
||||
|
||||
self.__automount = self.module.params['automount']
|
||||
self.__default = self.module.params['default']
|
||||
self.__filesystem_device = self.module.params['filesystem_device']
|
||||
self.__filesystem_label = self.module.params['filesystem_label']
|
||||
self.__filesystem_uuid = self.module.params['filesystem_uuid']
|
||||
self.__recursive = self.module.params['recursive']
|
||||
self.__snapshot_conflict = self.module.params['snapshot_conflict']
|
||||
snapshot_source = self.module.params['snapshot_source']
|
||||
self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
|
||||
|
||||
# execution state
|
||||
self.__filesystem = None
|
||||
self.__required_mounts = []
|
||||
self.__unit_of_work = []
|
||||
self.__completed_work = []
|
||||
self.__temporary_mounts = dict()
|
||||
|
||||
def run(self):
|
||||
error = None
|
||||
try:
|
||||
self.__load_filesystem()
|
||||
self.__prepare_unit_of_work()
|
||||
|
||||
if not self.module.check_mode:
|
||||
# check required mounts & mount
|
||||
if len(self.__unit_of_work) > 0:
|
||||
self.__execute_unit_of_work()
|
||||
self.__filesystem.refresh()
|
||||
else:
|
||||
# check required mounts
|
||||
self.__completed_work.extend(self.__unit_of_work)
|
||||
except Exception as e:
|
||||
error = e
|
||||
finally:
|
||||
self.__cleanup_mounts()
|
||||
if self.__filesystem is not None:
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
return (error, self.get_results())
|
||||
|
||||
# Identify the targeted filesystem and obtain the current state
|
||||
def __load_filesystem(self):
|
||||
if self.__has_filesystem_criteria():
|
||||
filesystem = self.__find_matching_filesytem()
|
||||
else:
|
||||
filesystem = self.__find_default_filesystem()
|
||||
|
||||
# The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
|
||||
if not filesystem.is_mounted():
|
||||
if not self.__automount:
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted and automount=False."
|
||||
"Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
|
||||
elif self.module.check_mode:
|
||||
# TODO is failing the module an appropriate outcome in this scenario?
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
|
||||
"state while running with check_mode=True" % filesystem.uuid)
|
||||
else:
|
||||
self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
filesystem.refresh()
|
||||
self.__filesystem = filesystem
|
||||
|
||||
def __has_filesystem_criteria(self):
|
||||
return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
|
||||
|
||||
def __find_matching_filesytem(self):
|
||||
criteria = {
|
||||
'uuid': self.__filesystem_uuid,
|
||||
'label': self.__filesystem_label,
|
||||
'device': self.__filesystem_device,
|
||||
}
|
||||
return self.__provider.get_matching_filesystem(criteria)
|
||||
|
||||
def __find_default_filesystem(self):
|
||||
filesystems = self.__provider.get_filesystems()
|
||||
filesystem = None
|
||||
|
||||
if len(filesystems) == 1:
|
||||
filesystem = filesystems[0]
|
||||
else:
|
||||
mounted_filesystems = [x for x in filesystems if x.is_mounted()]
|
||||
if len(mounted_filesystems) == 1:
|
||||
filesystem = mounted_filesystems[0]
|
||||
|
||||
if filesystem is not None:
|
||||
return filesystem
|
||||
else:
|
||||
raise BtrfsModuleException(
|
||||
"Failed to automatically identify targeted filesystem. "
|
||||
"No explicit device indicated and found %d available filesystems." % len(filesystems)
|
||||
)
|
||||
|
||||
# Prepare unit of work
|
||||
def __prepare_unit_of_work(self):
|
||||
if self.__state == "present":
|
||||
if self.__snapshot_source is None:
|
||||
self.__prepare_subvolume_present()
|
||||
else:
|
||||
self.__prepare_snapshot_present()
|
||||
|
||||
if self.__default:
|
||||
self.__prepare_set_default()
|
||||
elif self.__state == "absent":
|
||||
self.__prepare_subvolume_absent()
|
||||
|
||||
def __prepare_subvolume_present(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is None:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
self.__stage_create_subvolume(self.__name)
|
||||
|
||||
def __prepare_before_create_subvolume(self, subvolume_name):
|
||||
closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
|
||||
self.__stage_required_mount(closest_parent)
|
||||
if self.__recursive:
|
||||
self.__prepare_create_intermediates(closest_parent, subvolume_name)
|
||||
|
||||
def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
|
||||
relative_path = closest_subvolume.get_child_relative_path(self.__name)
|
||||
missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
|
||||
if len(missing_subvolumes) > 1:
|
||||
current = closest_subvolume.path
|
||||
for s in missing_subvolumes[:-1]:
|
||||
separator = os.path.sep if current[-1] != os.path.sep else ""
|
||||
current = current + separator + s
|
||||
self.__stage_create_subvolume(current, True)
|
||||
|
||||
def __prepare_snapshot_present(self):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_exists = subvolume is not None
|
||||
|
||||
if subvolume_exists:
|
||||
if self.__snapshot_conflict == "skip":
|
||||
# No change required
|
||||
return
|
||||
elif self.__snapshot_conflict == "error":
|
||||
raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
|
||||
|
||||
if source_subvolume is None:
|
||||
raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
|
||||
elif subvolume is not None and source_subvolume.id == subvolume.id:
|
||||
raise BtrfsModuleException("Snapshot source and target are the same.")
|
||||
else:
|
||||
self.__stage_required_mount(source_subvolume)
|
||||
|
||||
if subvolume_exists and self.__snapshot_conflict == "clobber":
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
elif not subvolume_exists:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
|
||||
self.__stage_create_snapshot(source_subvolume, self.__name)
|
||||
|
||||
def __prepare_subvolume_absent(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is not None:
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
|
||||
def __prepare_delete_subvolume_tree(self, subvolume):
|
||||
if subvolume.is_filesystem_root():
|
||||
raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
|
||||
if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
|
||||
raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
|
||||
"Either explicitly delete the child subvolumes first or pass "
|
||||
"parameter recursive=True." % subvolume.path)
|
||||
|
||||
self.__stage_required_mount(subvolume.get_parent_subvolume())
|
||||
queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
|
||||
# prepare unit of work
|
||||
for s in queue:
|
||||
if s.is_mounted():
|
||||
# TODO potentially unmount the subvolume if automount=True ?
|
||||
raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
|
||||
if s.is_filesystem_default():
|
||||
self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
self.__stage_delete_subvolume(s)
|
||||
|
||||
def __prepare_recursive_delete_order(self, subvolume):
|
||||
"""Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
|
||||
pending = [subvolume]
|
||||
ordered = []
|
||||
while len(pending) > 0:
|
||||
next = pending.pop()
|
||||
ordered.append(next)
|
||||
pending.extend(next.get_child_subvolumes())
|
||||
ordered.reverse() # reverse to ensure children are deleted before their parent
|
||||
return ordered
|
||||
|
||||
def __prepare_set_default(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_id = subvolume.id if subvolume is not None else None
|
||||
|
||||
if self.__filesystem.default_subvolid != subvolume_id:
|
||||
self.__stage_set_default_subvolume(self.__name, subvolume_id)
|
||||
|
||||
# Stage operations to the unit of work
|
||||
def __stage_required_mount(self, subvolume):
|
||||
if subvolume.get_mounted_path() is None:
|
||||
if self.__automount:
|
||||
self.__required_mounts.append(subvolume)
|
||||
else:
|
||||
raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
|
||||
|
||||
def __stage_create_subvolume(self, subvolume_path, intermediate=False):
|
||||
"""
|
||||
Add required creation of an intermediate subvolume to the unit of work
|
||||
If intermediate is true, the action will be skipped if a directory like file is found at target
|
||||
after mounting a parent subvolume
|
||||
"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'intermediate': intermediate,
|
||||
})
|
||||
|
||||
def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
|
||||
"""Add creation of a snapshot from source to target to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SNAPSHOT_OPERATION,
|
||||
'source': source_subvolume.path,
|
||||
'source_id': source_subvolume.id,
|
||||
'target': target_subvolume_path,
|
||||
})
|
||||
|
||||
def __stage_delete_subvolume(self, subvolume):
|
||||
"""Add deletion of the target subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__DELETE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume.path,
|
||||
'target_id': subvolume.id,
|
||||
})
|
||||
|
||||
def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
|
||||
"""Add update of the filesystem's default subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'target_id': subvolume_id,
|
||||
})
|
||||
|
||||
# Execute the unit of work
|
||||
def __execute_unit_of_work(self):
|
||||
self.__check_required_mounts()
|
||||
for op in self.__unit_of_work:
|
||||
if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
self.__execute_create_subvolume(op)
|
||||
elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
self.__execute_create_snapshot(op)
|
||||
elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
self.__execute_delete_subvolume(op)
|
||||
elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
self.__execute_set_default_subvolume(op)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % op['action'])
|
||||
|
||||
def __execute_create_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
if not self.__is_existing_directory_like(target_mounted_path):
|
||||
self.__btrfs_api.subvolume_create(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_create_snapshot(self, operation):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
|
||||
source_mounted_path = source_subvolume.get_mounted_path()
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
|
||||
self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_delete_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
self.__btrfs_api.subvolume_delete(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_set_default_subvolume(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
|
||||
if target_id is None:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
self.__filesystem.refresh() # the target may have been created earlier in module execution
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
|
||||
else:
|
||||
target_id = target_subvolume.id
|
||||
|
||||
self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __is_existing_directory_like(self, path):
|
||||
return os.path.exists(path) and (
|
||||
os.path.isdir(path) or
|
||||
os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
|
||||
)
|
||||
|
||||
def __check_required_mounts(self):
|
||||
filtered = self.__filter_child_subvolumes(self.__required_mounts)
|
||||
if len(filtered) > 0:
|
||||
for subvolume in filtered:
|
||||
self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
def __filter_child_subvolumes(self, subvolumes):
|
||||
"""Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
|
||||
filtered = []
|
||||
last = None
|
||||
ordered = sorted(subvolumes, key=lambda x: x.path)
|
||||
for next in ordered:
|
||||
if last is None or not next.path[0:len(last)] == last:
|
||||
filtered.append(next)
|
||||
last = next.path
|
||||
return filtered
|
||||
|
||||
# Create/cleanup temporary mountpoints
|
||||
def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
|
||||
# this check should be redundant
|
||||
if self.module.check_mode or not self.__automount:
|
||||
raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
|
||||
"with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
|
||||
|
||||
cache_key = "%s:%d" % (filesystem.uuid, subvolid)
|
||||
# The subvolume was already mounted, so return the current path
|
||||
if cache_key in self.__temporary_mounts:
|
||||
return self.__temporary_mounts[cache_key]
|
||||
|
||||
device = filesystem.devices[0]
|
||||
mountpoint = tempfile.mkdtemp(dir="/tmp")
|
||||
self.__temporary_mounts[cache_key] = mountpoint
|
||||
|
||||
mount = self.module.get_bin_path("mount", required=True)
|
||||
command = "%s -o noatime,subvolid=%d %s %s " % (mount,
|
||||
subvolid,
|
||||
device,
|
||||
mountpoint)
|
||||
result = self.module.run_command(command, check_rc=True)
|
||||
|
||||
return mountpoint
|
||||
|
||||
def __cleanup_mounts(self):
|
||||
for key in self.__temporary_mounts.keys():
|
||||
self.__cleanup_mount(self.__temporary_mounts[key])
|
||||
|
||||
def __cleanup_mount(self, mountpoint):
|
||||
umount = self.module.get_bin_path("umount", required=True)
|
||||
result = self.module.run_command("%s %s" % (umount, mountpoint))
|
||||
if result[0] == 0:
|
||||
rmdir = self.module.get_bin_path("rmdir", required=True)
|
||||
self.module.run_command("%s %s" % (rmdir, mountpoint))
|
||||
|
||||
# Format and return results
|
||||
def get_results(self):
|
||||
target = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
return dict(
|
||||
changed=len(self.__completed_work) > 0,
|
||||
filesystem=self.__filesystem.get_summary(),
|
||||
modifications=self.__get_formatted_modifications(),
|
||||
target_subvolume_id=(target.id if target is not None else None)
|
||||
)
|
||||
|
||||
def __get_formatted_modifications(self):
|
||||
return [self.__format_operation_result(op) for op in self.__completed_work]
|
||||
|
||||
def __format_operation_result(self, operation):
|
||||
action_type = operation['action']
|
||||
if action_type == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
return self.__format_create_subvolume_result(operation)
|
||||
elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
return self.__format_create_snapshot_result(operation)
|
||||
elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
return self.__format_delete_subvolume_result(operation)
|
||||
elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
return self.__format_set_default_subvolume_result(operation)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % operation['action'])
|
||||
|
||||
def __format_create_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_create_snapshot_result(self, operation):
|
||||
source = operation['source']
|
||||
source_id = operation['source_id']
|
||||
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
|
||||
|
||||
def __format_delete_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
return "Deleted subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_set_default_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
if 'target_id' in operation:
|
||||
target_id = operation['target_id']
|
||||
else:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Updated default subvolume to '%s' (%s)" % (target, target_id)
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
automount=dict(type='bool', required=False, default=False),
|
||||
default=dict(type='bool', required=False, default=False),
|
||||
filesystem_device=dict(type='path', required=False),
|
||||
filesystem_label=dict(type='str', required=False),
|
||||
filesystem_uuid=dict(type='str', required=False),
|
||||
name=dict(type='str', required=True),
|
||||
recursive=dict(type='bool', default=False),
|
||||
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
|
||||
snapshot_source=dict(type='str', required=False),
|
||||
snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
subvolume = BtrfsSubvolumeModule(module)
|
||||
error, result = subvolume.run()
|
||||
if error is not None:
|
||||
module.fail_json(str(error), **result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -15,6 +15,13 @@ module: bundler
|
||||
short_description: Manage Ruby Gem dependencies with Bundler
|
||||
description:
|
||||
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
executable:
|
||||
type: str
|
||||
|
||||
@@ -17,6 +17,13 @@ author:
|
||||
short_description: Deploy software (or files) from bzr branches
|
||||
description:
|
||||
- Manage I(bzr) branches to deploy files or software.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
||||
@@ -16,6 +16,13 @@ short_description: Send a message to Campfire
|
||||
description:
|
||||
- Send a message to Campfire.
|
||||
- Messages with newlines will result in a "Paste" message being sent.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
subscription:
|
||||
type: str
|
||||
|
||||
@@ -14,6 +14,13 @@ module: capabilities
|
||||
short_description: Manage Linux capabilities
|
||||
description:
|
||||
- This module manipulates files privileges using the Linux capabilities(7) system.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
|
||||
@@ -17,6 +17,13 @@ version_added: 4.3.0
|
||||
description:
|
||||
- Manage Rust packages with cargo.
|
||||
author: "Radek Sprta (@radek-sprta)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user