mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-01 02:43:16 +00:00
Compare commits
203 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1357e47f92 | ||
|
|
cc6d4209d4 | ||
|
|
01134d4625 | ||
|
|
16089ab2de | ||
|
|
0da4607c7f | ||
|
|
46cbf60c2d | ||
|
|
e7d86f5add | ||
|
|
ee4c76fa43 | ||
|
|
095d09ec12 | ||
|
|
d9ed00fb12 | ||
|
|
94bb204e29 | ||
|
|
9b1f450102 | ||
|
|
dd3bc067f5 | ||
|
|
15916cd61f | ||
|
|
02a257569a | ||
|
|
c486e42faa | ||
|
|
7f7e622262 | ||
|
|
e1551b3d34 | ||
|
|
a3bc0535a5 | ||
|
|
9e59665bee | ||
|
|
30b29d24ab | ||
|
|
d5efc3f13a | ||
|
|
d237faa447 | ||
|
|
65ce979d4a | ||
|
|
938367b67a | ||
|
|
578df1b054 | ||
|
|
0b494a5d2d | ||
|
|
f92043c7f5 | ||
|
|
839336c21d | ||
|
|
2a5e4b8a46 | ||
|
|
fae1dbc198 | ||
|
|
dc7aba3bca | ||
|
|
03c66d0db4 | ||
|
|
5707fc1b33 | ||
|
|
830e3988aa | ||
|
|
f4ccef462e | ||
|
|
cd26dd6b0a | ||
|
|
009bfd786d | ||
|
|
9f08cc11a8 | ||
|
|
cae4bc80af | ||
|
|
058462973e | ||
|
|
23dc90812f | ||
|
|
97e17fcee2 | ||
|
|
d75d5f67f7 | ||
|
|
c597a75372 | ||
|
|
031a3b56bf | ||
|
|
5fd87a1811 | ||
|
|
e463f94ce9 | ||
|
|
4205a94f0c | ||
|
|
bcfcdd1052 | ||
|
|
2638413475 | ||
|
|
9ea55a81b3 | ||
|
|
32c1e39df1 | ||
|
|
7bb23e9649 | ||
|
|
eeedd97eec | ||
|
|
4e1ba6af46 | ||
|
|
aa1ae85b55 | ||
|
|
4d2873afd2 | ||
|
|
43c1d9f66c | ||
|
|
201f49acd9 | ||
|
|
6f0d46d0a2 | ||
|
|
92529d5605 | ||
|
|
f635ed6c2e | ||
|
|
0e0fce54ab | ||
|
|
caae74abd8 | ||
|
|
c01a16c1bc | ||
|
|
15c832b8d5 | ||
|
|
b104c580e1 | ||
|
|
a4e1beefe3 | ||
|
|
b0898e5c51 | ||
|
|
eca1d0191f | ||
|
|
08036e7f65 | ||
|
|
3c6c9ba425 | ||
|
|
3eeb0009cb | ||
|
|
c92215d339 | ||
|
|
1a4ae5e9aa | ||
|
|
9ecfa370ff | ||
|
|
f3828ebdd7 | ||
|
|
7b71ffa1c4 | ||
|
|
d7494bb9e4 | ||
|
|
443a1c3ed7 | ||
|
|
177e327c24 | ||
|
|
6ff4b479ab | ||
|
|
feb8d421dc | ||
|
|
d633f6c4c8 | ||
|
|
3405b24fa6 | ||
|
|
540c92fa62 | ||
|
|
8cf2358fb1 | ||
|
|
2d92ab91eb | ||
|
|
d03abae7a1 | ||
|
|
d7efa7eb13 | ||
|
|
a9a9e723e5 | ||
|
|
3d87fbf6f9 | ||
|
|
9e6960639f | ||
|
|
7074a52721 | ||
|
|
d18afe6746 | ||
|
|
ee03af599c | ||
|
|
42c643ddda | ||
|
|
a7d89fc1ee | ||
|
|
d4477615a9 | ||
|
|
c84738324e | ||
|
|
0c3ae48f85 | ||
|
|
a3c711491a | ||
|
|
d6a57882d2 | ||
|
|
edbef2266d | ||
|
|
2b88ee01d3 | ||
|
|
afd24ccd35 | ||
|
|
26ada26df1 | ||
|
|
59999a89f1 | ||
|
|
e1f4be1e01 | ||
|
|
e7e2f095ee | ||
|
|
45200fc233 | ||
|
|
154d1b7024 | ||
|
|
541fcec900 | ||
|
|
dc6ccbea63 | ||
|
|
6cb044ac13 | ||
|
|
af78b2068a | ||
|
|
73569b1c36 | ||
|
|
5914c1df8e | ||
|
|
f847531a35 | ||
|
|
df01cde23d | ||
|
|
cdd9ced441 | ||
|
|
dfb61b283d | ||
|
|
fe7b151a26 | ||
|
|
9b983fba86 | ||
|
|
2d27dbd9ea | ||
|
|
55cbccf0fc | ||
|
|
675860c392 | ||
|
|
5e8914e00c | ||
|
|
c0230342b4 | ||
|
|
ebf15447f0 | ||
|
|
86d10f53fd | ||
|
|
d679f51018 | ||
|
|
e9e494e1ff | ||
|
|
bf90b4e88a | ||
|
|
874d7f7050 | ||
|
|
3a19fbc89c | ||
|
|
2d8e1339d1 | ||
|
|
4892f954fa | ||
|
|
6fd58ba388 | ||
|
|
49967547df | ||
|
|
492170b414 | ||
|
|
25b9391fb4 | ||
|
|
1e30afa92f | ||
|
|
7d4e69cdf7 | ||
|
|
ff23e41c21 | ||
|
|
3ce7d2fc7e | ||
|
|
20a9d120aa | ||
|
|
f4a40592a1 | ||
|
|
3d5145e924 | ||
|
|
1ae5a2cd05 | ||
|
|
75aa353281 | ||
|
|
5fe10915a8 | ||
|
|
eca6494503 | ||
|
|
b12d422fcc | ||
|
|
33809395ab | ||
|
|
60fe6ebc3c | ||
|
|
957f3e6eca | ||
|
|
dbd918865f | ||
|
|
46a83df85e | ||
|
|
57f262504d | ||
|
|
29671cb54c | ||
|
|
fd91e94279 | ||
|
|
b001e36fb3 | ||
|
|
5f63476404 | ||
|
|
eff452e4a5 | ||
|
|
d393b16064 | ||
|
|
40b5967fc3 | ||
|
|
ccabc342b9 | ||
|
|
570f6a8791 | ||
|
|
b10cf1e357 | ||
|
|
acfe464a31 | ||
|
|
f32a8dc740 | ||
|
|
ea8f109056 | ||
|
|
7e7b84348b | ||
|
|
baf552337d | ||
|
|
3ff9161ab0 | ||
|
|
aa8728b22c | ||
|
|
590ff351b4 | ||
|
|
a5824a2a9d | ||
|
|
71975be3c1 | ||
|
|
564f87c775 | ||
|
|
6750a866c6 | ||
|
|
c6316c1153 | ||
|
|
2c825f04e7 | ||
|
|
8a33e070be | ||
|
|
d2a5fdfc71 | ||
|
|
b6b2419206 | ||
|
|
4c399f1c01 | ||
|
|
923d335646 | ||
|
|
be96c33257 | ||
|
|
e68b1017e3 | ||
|
|
44e4b1d202 | ||
|
|
24378fd944 | ||
|
|
c3e1715233 | ||
|
|
ec86ebed98 | ||
|
|
5aa2779a48 | ||
|
|
f6d15ec818 | ||
|
|
f0320b5ac9 | ||
|
|
79578e5db3 | ||
|
|
43beaf4b00 | ||
|
|
6b0cc3c1de | ||
|
|
81a95a347d |
@@ -29,14 +29,14 @@ schedules:
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-6
|
||||
- stable-5
|
||||
- stable-8
|
||||
- stable-7
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-4
|
||||
- stable-6
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -53,26 +53,39 @@ variables:
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:3.0.0
|
||||
image: quay.io/ansible/azure-pipelines-test-container:4.0.1
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
### Sanity
|
||||
- stage: Sanity_devel
|
||||
displayName: Sanity devel
|
||||
- stage: Sanity_2_16
|
||||
displayName: Sanity 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: devel/sanity/{0}
|
||||
testFormat: 2.16/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_15
|
||||
displayName: Sanity 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.15/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
@@ -86,50 +99,34 @@ stages:
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.13/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
- stage: Units_2_16
|
||||
displayName: Units 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
testFormat: 2.16/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_15
|
||||
displayName: Units 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/units/{0}/1
|
||||
targets:
|
||||
- test: 3.5
|
||||
- test: "3.10"
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
@@ -139,64 +136,59 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.8
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel_extra_vms
|
||||
displayName: Remote devel extra VMs
|
||||
- stage: Remote_2_16_extra_vms
|
||||
displayName: Remote 2.16 extra VMs
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
testFormat: 2.16/{0}
|
||||
targets:
|
||||
- name: Alpine 3.17
|
||||
test: alpine/3.17
|
||||
# - name: Fedora 37
|
||||
# test: fedora/37
|
||||
# - name: Ubuntu 20.04
|
||||
# test: ubuntu/20.04
|
||||
- name: Alpine 3.18
|
||||
test: alpine/3.18
|
||||
# - name: Fedora 38
|
||||
# test: fedora/38
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu/22.04
|
||||
groups:
|
||||
- vm
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
- stage: Remote_2_16
|
||||
displayName: Remote 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
testFormat: 2.16/{0}
|
||||
targets:
|
||||
- name: macOS 13.2
|
||||
test: macos/13.2
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 9.2
|
||||
test: rhel/9.2
|
||||
- name: RHEL 8.8
|
||||
test: rhel/8.8
|
||||
- name: FreeBSD 13.2
|
||||
test: freebsd/13.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_15
|
||||
displayName: Remote 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/{0}
|
||||
targets:
|
||||
- name: RHEL 9.1
|
||||
test: rhel/9.1
|
||||
- name: RHEL 8.7
|
||||
test: rhel/8.7
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
- name: FreeBSD 12.4
|
||||
@@ -215,60 +207,24 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 12.3
|
||||
test: freebsd/12.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
#- name: FreeBSD 12.4
|
||||
# test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
- stage: Docker_2_16
|
||||
displayName: Docker 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
testFormat: 2.16/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: Fedora 38
|
||||
test: fedora38
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
@@ -281,6 +237,22 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_15
|
||||
displayName: Docker 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.15/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
@@ -295,56 +267,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
displayName: Docker (community images) devel
|
||||
- stage: Docker_community_2_16
|
||||
displayName: Docker (community images) 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux-community/{0}
|
||||
testFormat: 2.16/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
test: archlinux/3.11
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
@@ -353,17 +289,27 @@ stages:
|
||||
- 3
|
||||
|
||||
### Generic
|
||||
- stage: Generic_devel
|
||||
displayName: Generic devel
|
||||
- stage: Generic_2_16
|
||||
displayName: Generic 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/generic/{0}/1
|
||||
testFormat: 2.16/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_15
|
||||
displayName: Generic 2.15
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
@@ -374,52 +320,27 @@ stages:
|
||||
testFormat: 2.14/generic/{0}/1
|
||||
targets:
|
||||
- test: '3.10'
|
||||
- stage: Generic_2_13
|
||||
displayName: Generic 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Generic_2_12
|
||||
displayName: Generic 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Units_devel
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Sanity_2_15
|
||||
- Sanity_2_16
|
||||
- Units_2_14
|
||||
- Remote_devel_extra_vms
|
||||
- Remote_devel
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Units_2_15
|
||||
- Units_2_16
|
||||
- Remote_2_14
|
||||
- Docker_devel
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Remote_2_15
|
||||
- Remote_2_16
|
||||
- Remote_2_16_extra_vms
|
||||
- Docker_2_14
|
||||
- Docker_community_devel
|
||||
- Docker_2_15
|
||||
- Docker_2_16
|
||||
- Docker_community_2_16
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_12
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
# - Generic_2_15
|
||||
# - Generic_2_16
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
37
.github/BOTMETA.yml
vendored
37
.github/BOTMETA.yml
vendored
@@ -247,9 +247,11 @@ files:
|
||||
labels: onepassword
|
||||
maintainers: samdoran
|
||||
$lookups/onepassword.py:
|
||||
maintainers: azenk scottsb
|
||||
ignore: scottsb
|
||||
maintainers: azenk
|
||||
$lookups/onepassword_raw.py:
|
||||
maintainers: azenk scottsb
|
||||
ignore: scottsb
|
||||
maintainers: azenk
|
||||
$lookups/passwordstore.py: {}
|
||||
$lookups/random_pet.py:
|
||||
maintainers: Akasurde
|
||||
@@ -267,6 +269,8 @@ files:
|
||||
maintainers: delineaKrehl tylerezimmerman
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/btrfs.py:
|
||||
maintainers: gnfzdz
|
||||
$module_utils/deps.py:
|
||||
maintainers: russoz
|
||||
$module_utils/gconftool2.py:
|
||||
@@ -395,6 +399,8 @@ files:
|
||||
maintainers: catcombo
|
||||
$modules/bower.py:
|
||||
maintainers: mwarkentin
|
||||
$modules/btrfs_:
|
||||
maintainers: gnfzdz
|
||||
$modules/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/bzr.py:
|
||||
@@ -523,6 +529,7 @@ files:
|
||||
keywords: gitlab source_control
|
||||
maintainers: $team_gitlab
|
||||
notify: jlozadad
|
||||
ignore: dj-wasabi
|
||||
$modules/gitlab_branch.py:
|
||||
maintainers: paytroff
|
||||
$modules/gitlab_project_variable.py:
|
||||
@@ -589,7 +596,7 @@ files:
|
||||
ignore: jose-delarosa
|
||||
maintainers: $team_redfish
|
||||
$modules/ilo_:
|
||||
ignore: jose-delarosa
|
||||
ignore: jose-delarosa varini-hp
|
||||
maintainers: $team_redfish
|
||||
$modules/imc_rest.py:
|
||||
labels: cisco
|
||||
@@ -665,9 +672,9 @@ files:
|
||||
$modules/jenkins_script.py:
|
||||
maintainers: hogarthj
|
||||
$modules/jira.py:
|
||||
ignore: DWSR
|
||||
ignore: DWSR tarka
|
||||
labels: jira
|
||||
maintainers: Slezhuk tarka pertoft
|
||||
maintainers: Slezhuk pertoft
|
||||
$modules/kdeconfig.py:
|
||||
maintainers: smeso
|
||||
$modules/kernel_blacklist.py:
|
||||
@@ -676,10 +683,14 @@ files:
|
||||
maintainers: $team_keycloak
|
||||
$modules/keycloak_authentication.py:
|
||||
maintainers: elfelip Gaetan2907
|
||||
$modules/keycloak_authz_authorization_scope.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_client_rolemapping.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/keycloak_clientscope_type.py:
|
||||
maintainers: simonpahl
|
||||
$modules/keycloak_clientsecret_info.py:
|
||||
maintainers: fynncfchen johncant
|
||||
$modules/keycloak_clientsecret_regenerate.py:
|
||||
@@ -921,7 +932,7 @@ files:
|
||||
$modules/pamd.py:
|
||||
maintainers: kevensen
|
||||
$modules/parted.py:
|
||||
maintainers: ColOfAbRiX rosowiecki jake2184
|
||||
maintainers: ColOfAbRiX jake2184
|
||||
$modules/pear.py:
|
||||
ignore: jle64
|
||||
labels: pear
|
||||
@@ -990,7 +1001,7 @@ files:
|
||||
maintainers: sysadmind
|
||||
$modules/puppet.py:
|
||||
labels: puppet
|
||||
maintainers: nibalizer emonty
|
||||
maintainers: emonty
|
||||
$modules/pushbullet.py:
|
||||
maintainers: willybarro
|
||||
$modules/pushover.py:
|
||||
@@ -1045,7 +1056,8 @@ files:
|
||||
maintainers: $team_redfish TSKushal
|
||||
$modules/redhat_subscription.py:
|
||||
labels: redhat_subscription
|
||||
maintainers: barnabycourt alikins kahowell
|
||||
maintainers: $team_rhsm
|
||||
ignore: barnabycourt alikins kahowell
|
||||
$modules/redis.py:
|
||||
maintainers: slok
|
||||
$modules/redis_data.py:
|
||||
@@ -1068,9 +1080,9 @@ files:
|
||||
labels: rhn_register
|
||||
maintainers: jlaska $team_rhn
|
||||
$modules/rhsm_release.py:
|
||||
maintainers: seandst
|
||||
maintainers: seandst $team_rhsm
|
||||
$modules/rhsm_repository.py:
|
||||
maintainers: giovannisciortino
|
||||
maintainers: giovannisciortino $team_rhsm
|
||||
$modules/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/rocketchat.py:
|
||||
@@ -1384,12 +1396,12 @@ macros:
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
|
||||
team_gitlab: Lunik Shaps marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_keycloak: eikef ndclt mattock
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
@@ -1399,6 +1411,7 @@ macros:
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_rhsm: cnsnyder ptoscano
|
||||
team_scaleway: remyleone abarbare
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
|
||||
102
.github/workflows/ansible-test.yml
vendored
102
.github/workflows/ansible-test.yml
vendored
@@ -30,6 +30,8 @@ jobs:
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.11'
|
||||
- '2.12'
|
||||
- '2.13'
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
@@ -43,7 +45,7 @@ jobs:
|
||||
- name: Perform sanity testing
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
@@ -75,6 +77,14 @@ jobs:
|
||||
python: '2.7'
|
||||
- ansible: '2.11'
|
||||
python: '3.5'
|
||||
- ansible: '2.12'
|
||||
python: '2.6'
|
||||
- ansible: '2.12'
|
||||
python: '3.8'
|
||||
- ansible: '2.13'
|
||||
python: '2.7'
|
||||
- ansible: '2.13'
|
||||
python: '3.8'
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
@@ -82,7 +92,7 @@ jobs:
|
||||
Ansible version ${{ matrix.ansible }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pre-test-cmd: >-
|
||||
@@ -163,7 +173,91 @@ jobs:
|
||||
# - ansible: '2.11'
|
||||
# docker: default
|
||||
# python: '3.5'
|
||||
# target: azp/generic/2/
|
||||
# target: azp/generic/1/
|
||||
# 2.12
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: centos6
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: fedora34
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.12'
|
||||
docker: ubuntu1804
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.12'
|
||||
# docker: default
|
||||
# python: '3.8'
|
||||
# target: azp/generic/1/
|
||||
# 2.13
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.13'
|
||||
# docker: default
|
||||
# python: '3.9'
|
||||
# target: azp/generic/1/
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
@@ -172,7 +266,7 @@ jobs:
|
||||
under Python ${{ matrix.python }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: felixfontein/ansible
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
docker-image: ${{ matrix.docker }}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
args: [--fix=lf]
|
||||
- id: fix-encoding-pragma
|
||||
- id: check-ast
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
||||
rev: v1.9.0
|
||||
hooks:
|
||||
- id: rst-backticks
|
||||
types: [file]
|
||||
files: changelogs/fragments/.*\.(yml|yaml)$
|
||||
231
CHANGELOG.rst
231
CHANGELOG.rst
@@ -6,6 +6,237 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 5.0.0.
|
||||
|
||||
v6.6.7
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
From now on, community.general 6.x.y will only receive major bugfixes and security fixes anymore.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- composer - fix impossible to run ``working_dir`` dependent commands. The module was throwing an error when trying to run a ``working_dir`` dependent command, because it tried to get the command help without passing the ``working_dir`` (https://github.com/ansible-collections/community.general/issues/3787).
|
||||
- github_deploy_key - fix pagination behaviour causing a crash when only a single page of deploy keys exist (https://github.com/ansible-collections/community.general/pull/7375).
|
||||
- gitlab_group_members - fix gitlab constants call in ``gitlab_group_members`` module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_project_members - fix gitlab constants call in ``gitlab_project_members`` module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches`` module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- lxc connection plugin - properly evaluate options (https://github.com/ansible-collections/community.general/pull/7369).
|
||||
- memset module utils - make compatible with ansible-core 2.17 (https://github.com/ansible-collections/community.general/pull/7379).
|
||||
- redhat_subscription - use the right D-Bus options for the consumer type when
|
||||
registering a RHEL system older than 9 or a RHEL 9 system older than 9.2
|
||||
and using ``consumer_type``
|
||||
(https://github.com/ansible-collections/community.general/pull/7378).
|
||||
- selective callback plugin - fix length of task name lines in output always being 3 characters longer than desired (https://github.com/ansible-collections/community.general/pull/7374).
|
||||
|
||||
v6.6.6
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- mail - skip headers containing equals characters due to missing ``maxsplit`` on header key/value parsing (https://github.com/ansible-collections/community.general/pull/7303).
|
||||
- onepassword - fix KeyError exception when trying to access value of a field that is not filled out in OnePassword item (https://github.com/ansible-collections/community.general/pull/7241).
|
||||
- terraform - prevents ``-backend-config`` option double encapsulating with ``shlex_quote`` function. (https://github.com/ansible-collections/community.general/pull/7301).
|
||||
|
||||
v6.6.5
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- make - allows ``params`` to be used without value (https://github.com/ansible-collections/community.general/pull/7180).
|
||||
- pritunl module utils - ensure ``validate_certs`` parameter is honoured in all methods (https://github.com/ansible-collections/community.general/pull/7156).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- CmdRunner module utils - does not attempt to resolve path if executable is a relative or absolute path (https://github.com/ansible-collections/community.general/pull/7200).
|
||||
- lxc connection plugin - now handles ``remote_addr`` defaulting to ``inventory_hostname`` correctly (https://github.com/ansible-collections/community.general/pull/7104).
|
||||
- nsupdate - fix a possible ``list index out of range`` exception (https://github.com/ansible-collections/community.general/issues/836).
|
||||
- oci_utils module util - fix inappropriate logical comparison expressions and makes them simpler. The previous checks had logical short circuits (https://github.com/ansible-collections/community.general/pull/7125).
|
||||
- pritunl module utils - fix incorrect URL parameter for orgnization add method (https://github.com/ansible-collections/community.general/pull/7161).
|
||||
|
||||
v6.6.4
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- redfish_utils - use ``Controllers`` key in redfish data to obtain Storage controllers properties (https://github.com/ansible-collections/community.general/pull/7081).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- bitwarden lookup plugin - the plugin made assumptions about the structure of a Bitwarden JSON object which may have been broken by an update in the Bitwarden API. Remove assumptions, and allow queries for general fields such as ``notes`` (https://github.com/ansible-collections/community.general/pull/7061).
|
||||
- cmd_runner module utils - when a parameter in ``argument_spec`` has no type, meaning it is implicitly a ``str``, ``CmdRunner`` would fail trying to find the ``type`` key in that dictionary (https://github.com/ansible-collections/community.general/pull/6968).
|
||||
- ejabberd_user - module was failing to detect whether user was already created and/or password was changed (https://github.com/ansible-collections/community.general/pull/7033).
|
||||
- ejabberd_user - provide meaningful error message when the ``ejabberdctl`` command is not found (https://github.com/ansible-collections/community.general/pull/7028, https://github.com/ansible-collections/community.general/issues/6949).
|
||||
- oci_utils module utils - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- proxmox module utils - fix proxmoxer library version check (https://github.com/ansible-collections/community.general/issues/6974, https://github.com/ansible-collections/community.general/issues/6975, https://github.com/ansible-collections/community.general/pull/6980).
|
||||
- proxmox_kvm - when ``name`` option is provided without ``vmid`` and VM with that name already exists then no new VM will be created (https://github.com/ansible-collections/community.general/issues/6911, https://github.com/ansible-collections/community.general/pull/6981).
|
||||
- proxmox_user_info - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- rundeck - fix ``TypeError`` on 404 API response (https://github.com/ansible-collections/community.general/pull/6983).
|
||||
|
||||
v6.6.3
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cobbler inventory plugin - convert Ansible unicode strings to native Python unicode strings before passing user/password to XMLRPC client (https://github.com/ansible-collections/community.general/pull/6923).
|
||||
- redfish_info - fix for ``GetVolumeInventory``, Controller name was getting populated incorrectly and duplicates were seen in the volumes retrieved (https://github.com/ansible-collections/community.general/pull/6719).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- datadog_downtime - presence of ``rrule`` param lead to the Datadog API returning Bad Request due to a missing recurrence type (https://github.com/ansible-collections/community.general/pull/6811).
|
||||
- icinga2_host - fix a key error when updating an existing host (https://github.com/ansible-collections/community.general/pull/6748).
|
||||
- ipa_dnszone - fix 'idnsallowsyncptr' key error for reverse zone (https://github.com/ansible-collections/community.general/pull/6906, https://github.com/ansible-collections/community.general/issues/6905).
|
||||
- locale_gen - now works for locales without the underscore character such as ``C.UTF-8`` (https://github.com/ansible-collections/community.general/pull/6774, https://github.com/ansible-collections/community.general/issues/5142, https://github.com/ansible-collections/community.general/issues/4305).
|
||||
- machinectl become plugin - mark plugin as ``require_tty`` to automatically disable pipelining, with which this plugin is not compatible (https://github.com/ansible-collections/community.general/issues/6932, https://github.com/ansible-collections/community.general/pull/6935).
|
||||
- nmcli - fix support for empty list (in compare and scrape) (https://github.com/ansible-collections/community.general/pull/6769).
|
||||
- openbsd_pkg - the pkg_info(1) behavior has changed in OpenBSD >7.3. The error message ``Can't find`` should not lead to an error case (https://github.com/ansible-collections/community.general/pull/6785).
|
||||
- pacman - module recognizes the output of ``yay`` running as ``root`` (https://github.com/ansible-collections/community.general/pull/6713).
|
||||
- proxmox - fix error when a configuration had no ``template`` field (https://github.com/ansible-collections/community.general/pull/6838, https://github.com/ansible-collections/community.general/issues/5372).
|
||||
- proxmox module utils - add logic to detect whether an old Promoxer complains about the ``token_name`` and ``token_value`` parameters and provide a better error message when that happens (https://github.com/ansible-collections/community.general/pull/6839, https://github.com/ansible-collections/community.general/issues/5371).
|
||||
- proxmox_disk - fix unable to create ``cdrom`` media due to ``size`` always being appended (https://github.com/ansible-collections/community.general/pull/6770).
|
||||
- proxmox_kvm - ``absent`` state with ``force`` specified failed to stop the VM due to the ``timeout`` value not being passed to ``stop_vm`` (https://github.com/ansible-collections/community.general/pull/6827).
|
||||
- redfish_info - fix ``ListUsers`` to not show empty account slots (https://github.com/ansible-collections/community.general/issues/6771, https://github.com/ansible-collections/community.general/pull/6772).
|
||||
- refish_utils module utils - changing variable names to avoid issues occuring when fetching Volumes data (https://github.com/ansible-collections/community.general/pull/6883).
|
||||
- rhsm_repository - when using the ``purge`` option, the ``repositories``
|
||||
dictionary element in the returned JSON is now properly updated according
|
||||
to the pruning operation
|
||||
(https://github.com/ansible-collections/community.general/pull/6676).
|
||||
|
||||
v6.6.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- csv module utils - detects and remove unicode BOM markers from incoming CSV content (https://github.com/ansible-collections/community.general/pull/6662).
|
||||
- gitlab_group - the module passed parameters to the API call even when not set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
|
||||
- ini_file - fix a bug where the inactive options were not used when possible (https://github.com/ansible-collections/community.general/pull/6575).
|
||||
- keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries (https://github.com/ansible-collections/community.general/pull/6688).
|
||||
- keycloak module utils - the function ``get_user_by_username`` now return the user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
|
||||
|
||||
v6.6.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008, https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- portage - update the logic for generating the emerge command arguments to ensure that ``withbdeps: false`` results in a passing an ``n`` argument with the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451, https://github.com/ansible-collections/community.general/pull/6456).
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from ``required_together`` as it causes to require ``api_password`` even when API token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has been fixed (https://github.com/ansible-collections/community.general/issues/6452, https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
|
||||
v6.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- dconf - be forgiving about boolean values: convert them to GVariant booleans automatically (https://github.com/ansible-collections/community.general/pull/6206).
|
||||
- dconf - minor refactoring improving parameters and dependencies validation (https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects (https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response (https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address`` and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade`` and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in the native implementation of terraform will not cause errors due to the non-existent file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing Flatpaks because of a parameter witch only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob`` to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade`` (https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment`` is specified, so it possible to specify again the environment names for registering, as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when ``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258, https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices, not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/pull/5887, https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers or the underscore character, failing to identify valid snap names such as ``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- btrfs_info - Query btrfs filesystem info
|
||||
- btrfs_subvolume - Manage btrfs subvolumes
|
||||
- ilo_redfish_command - Manages Out-Of-Band controllers using Redfish APIs
|
||||
- keycloak_authz_authorization_scope - Allows administration of Keycloak client authorization scopes via Keycloak API
|
||||
- keycloak_clientscope_type - Set the type of aclientscope in realm or client via Keycloak API
|
||||
|
||||
v6.5.0
|
||||
======
|
||||
|
||||
|
||||
@@ -121,19 +121,3 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
|
||||
|
||||
## pre-commit
|
||||
|
||||
To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which
|
||||
corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see
|
||||
the [Installation](#installation) section below.
|
||||
|
||||
This is optional and not required to contribute to this repository.
|
||||
|
||||
### Installation
|
||||
|
||||
Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`.
|
||||
|
||||
This is optional to run it locally.
|
||||
|
||||
You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`.
|
||||
|
||||
@@ -24,7 +24,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, and ansible-core 2.16 releases. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
|
||||
Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
|
||||
|
||||
@@ -34,13 +34,13 @@ Some modules and plugins require external libraries. Please check the requiremen
|
||||
|
||||
## Included content
|
||||
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
|
||||
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool:
|
||||
|
||||
ansible-galaxy collection install community.general
|
||||
|
||||
@@ -57,7 +57,7 @@ Note that if you install the collection manually, it will not be upgraded automa
|
||||
ansible-galaxy collection install community.general --upgrade
|
||||
```
|
||||
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/):
|
||||
|
||||
```bash
|
||||
ansible-galaxy collection install community.general:==X.Y.Z
|
||||
|
||||
@@ -1237,3 +1237,385 @@ releases:
|
||||
name: merge_variables
|
||||
namespace: null
|
||||
release_date: '2023-03-27'
|
||||
6.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
|
||||
- flatpak - fixes idempotency detection issues. In some cases the module could
|
||||
fail to properly detect already existing Flatpaks because of a parameter witch
|
||||
only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
|
||||
- icinga2_host - fix the data structure sent to Icinga to make use of host templates
|
||||
and template vars (https://github.com/ansible-collections/community.general/pull/6286).
|
||||
- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob``
|
||||
to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
|
||||
- ini_file - make ``section`` parameter not required so it is possible to pass
|
||||
``null`` as a value. This only was possible in the past due to a bug in ansible-core
|
||||
that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
|
||||
- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
|
||||
- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade``
|
||||
(https://github.com/ansible-collections/community.general/pull/6303).
|
||||
- redhat_subscription - do not use D-Bus for registering when ``environment``
|
||||
is specified, so it possible to specify again the environment names for registering,
|
||||
as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
|
||||
- redhat_subscription - try to unregister only when already registered when
|
||||
``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258,
|
||||
https://github.com/ansible-collections/community.general/pull/6259).
|
||||
- redhat_subscription - use the right D-Bus options for environments when registering
|
||||
a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
|
||||
- rhsm_release - make ``release`` parameter not required so it is possible to
|
||||
pass ``null`` as a value. This only was possible in the past due to a bug
|
||||
in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
|
||||
- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices,
|
||||
not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck``
|
||||
functions (https://github.com/ansible-collections/community.general/pull/5887,
|
||||
https://github.com/ansible-collections/community.general/pull/6300).
|
||||
- rundeck_project - update the module to use ``module_utils.rundeck`` functions
|
||||
(https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
|
||||
- snap_alias - module would only recognize snap names containing letter, numbers
|
||||
or the underscore character, failing to identify valid snap names such as
|
||||
``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
|
||||
minor_changes:
|
||||
- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
|
||||
- 'dconf - be forgiving about boolean values: convert them to GVariant booleans
|
||||
automatically (https://github.com/ansible-collections/community.general/pull/6206).'
|
||||
- dconf - minor refactoring improving parameters and dependencies validation
|
||||
(https://github.com/ansible-collections/community.general/pull/6336).
|
||||
- deps module utils - add function ``failed()`` providing the ability to check
|
||||
the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
|
||||
- dig lookup plugin - Support multiple domains to be queried as indicated in
|
||||
docs (https://github.com/ansible-collections/community.general/pull/6334).
|
||||
- gitlab_project - add new option ``topics`` for adding topics to GitLab projects
|
||||
(https://github.com/ansible-collections/community.general/pull/6278).
|
||||
- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
|
||||
- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response
|
||||
(https://github.com/ansible-collections/community.general/issues/5603).
|
||||
- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing
|
||||
hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
|
||||
- keycloak_authentication - add flow type option to sub flows to allow the creation
|
||||
of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
|
||||
- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
|
||||
- nmap inventory plugin - added environment variables for configure ``address``
|
||||
and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
|
||||
- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
|
||||
- pipx - add ``system_site_packages`` parameter to give application access to
|
||||
system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
|
||||
- pipx - ensure ``include_injected`` parameter works with ``state=upgrade``
|
||||
and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
|
||||
- puppet - add new options ``skip_tags`` to exclude certain tagged resources
|
||||
during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
|
||||
- terraform - remove state file check condition and error block, because in
|
||||
the native implementation of terraform will not cause errors due to the non-existent
|
||||
file (https://github.com/ansible-collections/community.general/pull/6296).
|
||||
- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
|
||||
release_summary: Bugfix and feature release.
|
||||
fragments:
|
||||
- 2090-idrac-redfish-resource-id-fix.yml
|
||||
- 5603-redfish-idrac-job-id-in-response.yml
|
||||
- 6.6.0.yml
|
||||
- 6199-archive-generate-checksum-in-chunks.yml
|
||||
- 6203-add-append-option-to-ipa-hostgroup.yml
|
||||
- 6206-dconf-booleans.yml
|
||||
- 6212-pipx-include-injected.yml
|
||||
- 6259-redhat_subscription-fix-force.yaml
|
||||
- 6267-homebrew-cask-upgrade-all-greedy.yml
|
||||
- 6269-mksysb-output.yml
|
||||
- 6275-redhat_subscription-fix-environments-centos.yaml
|
||||
- 6277-add-topics-gitlab-project.yml
|
||||
- 6286-icinga2_host-template-and-template-vars.yml
|
||||
- 6289-bugfix-flatpak-check-if-already-installed.yml
|
||||
- 6293-add-puppet-skip-tags-option.yaml
|
||||
- 6294-fix-one_vm-instantiation.yml
|
||||
- 6296-LanceNero-Terraform_statefile_check.yml
|
||||
- 6300-rundeck-modules-fixes-and-improvements.yml
|
||||
- 6303-pipx-fix-state-latest-and-add-system-site-packages.yml
|
||||
- 6308-pipx-add-system-site-packages.yml
|
||||
- 6312-nmcli-add-macvlan-connection-type.yml
|
||||
- 6318-add-form-flow.yml
|
||||
- 6319-redhat_subscription-fix-environment-parameter.yaml
|
||||
- 6334-dig-support-multiple-domains.yml
|
||||
- 6336-dconf-refactor.yml
|
||||
- 6351-support-env-variables-to-nmap-dynamic-inventoiry.yaml
|
||||
- 6361-snap-alias-regex-bugfix.yml
|
||||
- 6382-udm-dns-record-refactor.yml
|
||||
- 6383-deps-failed.yml
|
||||
- 6385-cpan-mh-feat.yml
|
||||
- 6401-rhsm_release-required.yml
|
||||
- 6404-ini_file-section.yml
|
||||
modules:
|
||||
- description: Query btrfs filesystem info
|
||||
name: btrfs_info
|
||||
namespace: ''
|
||||
- description: Manage btrfs subvolumes
|
||||
name: btrfs_subvolume
|
||||
namespace: ''
|
||||
- description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
name: ilo_redfish_command
|
||||
namespace: ''
|
||||
- description: Allows administration of Keycloak client authorization scopes via
|
||||
Keycloak API
|
||||
name: keycloak_authz_authorization_scope
|
||||
namespace: ''
|
||||
- description: Set the type of aclientscope in realm or client via Keycloak API
|
||||
name: keycloak_clientscope_type
|
||||
namespace: ''
|
||||
release_date: '2023-04-24'
|
||||
6.6.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
|
||||
- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
|
||||
- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
|
||||
- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008,
|
||||
https://github.com/ansible-collections/community.general/pull/6548).
|
||||
- 'portage - update the logic for generating the emerge command arguments to
|
||||
ensure that ``withbdeps: false`` results in a passing an ``n`` argument with
|
||||
the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451,
|
||||
https://github.com/ansible-collections/community.general/pull/6456).'
|
||||
- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from
|
||||
``required_together`` as it causes to require ``api_password`` even when API
|
||||
token param is used (https://github.com/ansible-collections/community.general/issues/6201).
|
||||
- puppet - handling ``noop`` parameter was not working at all, now it is has
|
||||
been fixed (https://github.com/ansible-collections/community.general/issues/6452,
|
||||
https://github.com/ansible-collections/community.general/issues/6458).
|
||||
- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
|
||||
- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not
|
||||
initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
|
||||
- zypper - added handling of zypper exitcode 102. Changed state is set correctly
|
||||
now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
|
||||
minor_changes:
|
||||
- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter
|
||||
that has it (https://github.com/ansible-collections/community.general/pull/6491).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 5143-fix-xfs-quota-project-init.yml
|
||||
- 6.6.1.yml
|
||||
- 6456-fix-portage-withbdeps-false.yml
|
||||
- 6458-puppet-noop.yml
|
||||
- 6491-dconf-respawn.yml
|
||||
- 6497-terraform-fix.yml
|
||||
- 6527-nmcli-bond-fix-xmit_hash_policy.yml
|
||||
- 6534-zypper-exitcode-102-handled.yaml
|
||||
- 6548-portage-changed_use-newuse.yml
|
||||
- 6554-proxmox-tasks-info-fix-required-password.yaml
|
||||
- deps.yml
|
||||
- passwordstore-lock.yml
|
||||
release_date: '2023-05-22'
|
||||
6.6.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- csv module utils - detects and remove unicode BOM markers from incoming CSV
|
||||
content (https://github.com/ansible-collections/community.general/pull/6662).
|
||||
- gitlab_group - the module passed parameters to the API call even when not
|
||||
set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
|
||||
- ini_file - fix a bug where the inactive options were not used when possible
|
||||
(https://github.com/ansible-collections/community.general/pull/6575).
|
||||
- keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries
|
||||
(https://github.com/ansible-collections/community.general/pull/6688).
|
||||
- keycloak module utils - the function ``get_user_by_username`` now return the
|
||||
user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.2.yml
|
||||
- 6568-fix-get-user-by-username-in-keycloak-module-utils.yml
|
||||
- 6662-csv-bom.yml
|
||||
- 6688-is-struct-included-bug-in-keycloak-py.yml
|
||||
- 6712-gitlab_group-filtered-for-none-values.yml
|
||||
- ini_file-use-inactive-options-when-possible.yml
|
||||
release_date: '2023-06-19'
|
||||
6.6.3:
|
||||
changes:
|
||||
bugfixes:
|
||||
- datadog_downtime - presence of ``rrule`` param lead to the Datadog API returning
|
||||
Bad Request due to a missing recurrence type (https://github.com/ansible-collections/community.general/pull/6811).
|
||||
- icinga2_host - fix a key error when updating an existing host (https://github.com/ansible-collections/community.general/pull/6748).
|
||||
- ipa_dnszone - fix 'idnsallowsyncptr' key error for reverse zone (https://github.com/ansible-collections/community.general/pull/6906,
|
||||
https://github.com/ansible-collections/community.general/issues/6905).
|
||||
- locale_gen - now works for locales without the underscore character such as
|
||||
``C.UTF-8`` (https://github.com/ansible-collections/community.general/pull/6774,
|
||||
https://github.com/ansible-collections/community.general/issues/5142, https://github.com/ansible-collections/community.general/issues/4305).
|
||||
- machinectl become plugin - mark plugin as ``require_tty`` to automatically
|
||||
disable pipelining, with which this plugin is not compatible (https://github.com/ansible-collections/community.general/issues/6932,
|
||||
https://github.com/ansible-collections/community.general/pull/6935).
|
||||
- nmcli - fix support for empty list (in compare and scrape) (https://github.com/ansible-collections/community.general/pull/6769).
|
||||
- openbsd_pkg - the pkg_info(1) behavior has changed in OpenBSD >7.3. The error
|
||||
message ``Can't find`` should not lead to an error case (https://github.com/ansible-collections/community.general/pull/6785).
|
||||
- pacman - module recognizes the output of ``yay`` running as ``root`` (https://github.com/ansible-collections/community.general/pull/6713).
|
||||
- proxmox - fix error when a configuration had no ``template`` field (https://github.com/ansible-collections/community.general/pull/6838,
|
||||
https://github.com/ansible-collections/community.general/issues/5372).
|
||||
- proxmox module utils - add logic to detect whether an old Promoxer complains
|
||||
about the ``token_name`` and ``token_value`` parameters and provide a better
|
||||
error message when that happens (https://github.com/ansible-collections/community.general/pull/6839,
|
||||
https://github.com/ansible-collections/community.general/issues/5371).
|
||||
- proxmox_disk - fix unable to create ``cdrom`` media due to ``size`` always
|
||||
being appended (https://github.com/ansible-collections/community.general/pull/6770).
|
||||
- proxmox_kvm - ``absent`` state with ``force`` specified failed to stop the
|
||||
VM due to the ``timeout`` value not being passed to ``stop_vm`` (https://github.com/ansible-collections/community.general/pull/6827).
|
||||
- redfish_info - fix ``ListUsers`` to not show empty account slots (https://github.com/ansible-collections/community.general/issues/6771,
|
||||
https://github.com/ansible-collections/community.general/pull/6772).
|
||||
- refish_utils module utils - changing variable names to avoid issues occuring
|
||||
when fetching Volumes data (https://github.com/ansible-collections/community.general/pull/6883).
|
||||
- 'rhsm_repository - when using the ``purge`` option, the ``repositories``
|
||||
|
||||
dictionary element in the returned JSON is now properly updated according
|
||||
|
||||
to the pruning operation
|
||||
|
||||
(https://github.com/ansible-collections/community.general/pull/6676).
|
||||
|
||||
'
|
||||
minor_changes:
|
||||
- cobbler inventory plugin - convert Ansible unicode strings to native Python
|
||||
unicode strings before passing user/password to XMLRPC client (https://github.com/ansible-collections/community.general/pull/6923).
|
||||
- redfish_info - fix for ``GetVolumeInventory``, Controller name was getting
|
||||
populated incorrectly and duplicates were seen in the volumes retrieved (https://github.com/ansible-collections/community.general/pull/6719).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.3.yml
|
||||
- 6676-rhsm_repository-fix-returned-repositories-with-purge.yml
|
||||
- 6713-yay-become.yml
|
||||
- 6719-redfish-utils-fix-for-get-volume-inventory.yml
|
||||
- 6748-icinga2_host-datafix.yml
|
||||
- 6769-nmcli-fix-empty-list.yml
|
||||
- 6770-proxmox_disk_create_cdrom.yml
|
||||
- 6771-redfish-filter-empty-account-slots.yml
|
||||
- 6774-locale-gen-fix.yml
|
||||
- 6785-openbsd_pkg_pkg_info_handling.yml
|
||||
- 6811-datadog-downtime-rrule-type.yaml
|
||||
- 6827-proxmox_kvm-force-delete-bug-fix.yaml
|
||||
- 6838-proxmox-dict-template.yml
|
||||
- 6839-promoxer-tokens.yml
|
||||
- 6883-redfish-utils-changing-variable-names-in-get-volume-inventory.yml
|
||||
- 6905-ipa_dnszone-key-error-fix.yml
|
||||
- 6923-cobbler-inventory_unicode.yml
|
||||
- 6935-machinectl-become.yml
|
||||
release_date: '2023-07-16'
|
||||
6.6.4:
|
||||
changes:
|
||||
bugfixes:
|
||||
- bitwarden lookup plugin - the plugin made assumptions about the structure
|
||||
of a Bitwarden JSON object which may have been broken by an update in the
|
||||
Bitwarden API. Remove assumptions, and allow queries for general fields such
|
||||
as ``notes`` (https://github.com/ansible-collections/community.general/pull/7061).
|
||||
- cmd_runner module utils - when a parameter in ``argument_spec`` has no type,
|
||||
meaning it is implicitly a ``str``, ``CmdRunner`` would fail trying to find
|
||||
the ``type`` key in that dictionary (https://github.com/ansible-collections/community.general/pull/6968).
|
||||
- ejabberd_user - module was failing to detect whether user was already created
|
||||
and/or password was changed (https://github.com/ansible-collections/community.general/pull/7033).
|
||||
- ejabberd_user - provide meaningful error message when the ``ejabberdctl``
|
||||
command is not found (https://github.com/ansible-collections/community.general/pull/7028,
|
||||
https://github.com/ansible-collections/community.general/issues/6949).
|
||||
- oci_utils module utils - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- proxmox module utils - fix proxmoxer library version check (https://github.com/ansible-collections/community.general/issues/6974,
|
||||
https://github.com/ansible-collections/community.general/issues/6975, https://github.com/ansible-collections/community.general/pull/6980).
|
||||
- proxmox_kvm - when ``name`` option is provided without ``vmid`` and VM with
|
||||
that name already exists then no new VM will be created (https://github.com/ansible-collections/community.general/issues/6911,
|
||||
https://github.com/ansible-collections/community.general/pull/6981).
|
||||
- proxmox_user_info - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- rundeck - fix ``TypeError`` on 404 API response (https://github.com/ansible-collections/community.general/pull/6983).
|
||||
minor_changes:
|
||||
- redfish_utils - use ``Controllers`` key in redfish data to obtain Storage
|
||||
controllers properties (https://github.com/ansible-collections/community.general/pull/7081).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.4.yml
|
||||
- 6949-ejabberdctl-error.yml
|
||||
- 6968-cmdrunner-implicit-type.yml
|
||||
- 6980-proxmox-fix-token-auth.yml
|
||||
- 6981-proxmox-fix-vm-creation-when-only-name-provided.yml
|
||||
- 6983-rundeck-fix-typerrror-on-404-api-response.yml
|
||||
- 7033-ejabberd-user-bugs.yml
|
||||
- 7061-fix-bitwarden-get_field.yml
|
||||
- 7081-redfish-utils-fix-for-storagecontrollers-deprecated-key.yaml
|
||||
- 7085-sanity.yml
|
||||
release_date: '2023-08-13'
|
||||
6.6.5:
|
||||
changes:
|
||||
bugfixes:
|
||||
- CmdRunner module utils - does not attempt to resolve path if executable is
|
||||
a relative or absolute path (https://github.com/ansible-collections/community.general/pull/7200).
|
||||
- lxc connection plugin - now handles ``remote_addr`` defaulting to ``inventory_hostname``
|
||||
correctly (https://github.com/ansible-collections/community.general/pull/7104).
|
||||
- nsupdate - fix a possible ``list index out of range`` exception (https://github.com/ansible-collections/community.general/issues/836).
|
||||
- oci_utils module util - fix inappropriate logical comparison expressions and
|
||||
makes them simpler. The previous checks had logical short circuits (https://github.com/ansible-collections/community.general/pull/7125).
|
||||
- pritunl module utils - fix incorrect URL parameter for orgnization add method
|
||||
(https://github.com/ansible-collections/community.general/pull/7161).
|
||||
minor_changes:
|
||||
- make - allows ``params`` to be used without value (https://github.com/ansible-collections/community.general/pull/7180).
|
||||
- pritunl module utils - ensure ``validate_certs`` parameter is honoured in
|
||||
all methods (https://github.com/ansible-collections/community.general/pull/7156).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.5.yml
|
||||
- 7104_fix_lxc_remoteaddr_default.yml
|
||||
- 7125-fix-inappropriate-comparison.yml
|
||||
- 7156-ensure-validate-certs-parameter-is-honoured.yml
|
||||
- 7161-fix-incorrect-post-parameter.yml
|
||||
- 7180-make_params_without_value.yml
|
||||
- 7200-cmd-runner-abs-path.yml
|
||||
- 7219-fix-nsupdate-cname.yaml
|
||||
release_date: '2023-09-11'
|
||||
6.6.6:
|
||||
changes:
|
||||
bugfixes:
|
||||
- mail - skip headers containing equals characters due to missing ``maxsplit``
|
||||
on header key/value parsing (https://github.com/ansible-collections/community.general/pull/7303).
|
||||
- onepassword - fix KeyError exception when trying to access value of a field
|
||||
that is not filled out in OnePassword item (https://github.com/ansible-collections/community.general/pull/7241).
|
||||
- terraform - prevents ``-backend-config`` option double encapsulating with
|
||||
``shlex_quote`` function. (https://github.com/ansible-collections/community.general/pull/7301).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 6.6.6.yml
|
||||
- 7241-prevent-key-error-when-value-does-not-exist.yml
|
||||
- 7301-fix-backend-config-string-encapsulation.yml
|
||||
- 7303-mail-incorrect-header-parsing.yml
|
||||
release_date: '2023-10-08'
|
||||
6.6.7:
|
||||
changes:
|
||||
bugfixes:
|
||||
- composer - fix impossible to run ``working_dir`` dependent commands. The module
|
||||
was throwing an error when trying to run a ``working_dir`` dependent command,
|
||||
because it tried to get the command help without passing the ``working_dir``
|
||||
(https://github.com/ansible-collections/community.general/issues/3787).
|
||||
- github_deploy_key - fix pagination behaviour causing a crash when only a single
|
||||
page of deploy keys exist (https://github.com/ansible-collections/community.general/pull/7375).
|
||||
- gitlab_group_members - fix gitlab constants call in ``gitlab_group_members``
|
||||
module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_project_members - fix gitlab constants call in ``gitlab_project_members``
|
||||
module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_protected_branches - fix gitlab constants call in ``gitlab_protected_branches``
|
||||
module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- gitlab_user - fix gitlab constants call in ``gitlab_user`` module (https://github.com/ansible-collections/community.general/issues/7467).
|
||||
- lxc connection plugin - properly evaluate options (https://github.com/ansible-collections/community.general/pull/7369).
|
||||
- memset module utils - make compatible with ansible-core 2.17 (https://github.com/ansible-collections/community.general/pull/7379).
|
||||
- 'redhat_subscription - use the right D-Bus options for the consumer type when
|
||||
|
||||
registering a RHEL system older than 9 or a RHEL 9 system older than 9.2
|
||||
|
||||
and using ``consumer_type``
|
||||
|
||||
(https://github.com/ansible-collections/community.general/pull/7378).
|
||||
|
||||
'
|
||||
- selective callback plugin - fix length of task name lines in output always
|
||||
being 3 characters longer than desired (https://github.com/ansible-collections/community.general/pull/7374).
|
||||
release_summary: 'Bugfix release.
|
||||
|
||||
|
||||
From now on, community.general 6.x.y will only receive major bugfixes and
|
||||
security fixes anymore.
|
||||
|
||||
'
|
||||
fragments:
|
||||
- 3787-pass-composer-working-dir.yml
|
||||
- 6.6.7.yml
|
||||
- 7369-fix-lxc-options.yml
|
||||
- 7374-fix-selective-callback-taskname-length.yml
|
||||
- 7375-fix-github-deploy-key-pagination.yml
|
||||
- 7378-redhat_subscription-dbus-consumer-type.yaml
|
||||
- 7379-url.yml
|
||||
- 7467-fix-gitlab-constants-calls.yml
|
||||
release_date: '2023-11-04'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 6.5.0
|
||||
version: 6.6.7
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -102,6 +102,7 @@ class BecomeModule(BecomeBase):
|
||||
prompt = 'Password: '
|
||||
fail = ('==== AUTHENTICATION FAILED ====',)
|
||||
success = ('==== AUTHENTICATION COMPLETE ====',)
|
||||
require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932
|
||||
|
||||
@staticmethod
|
||||
def remove_ansi_codes(line):
|
||||
|
||||
@@ -115,8 +115,8 @@ class CallbackModule(CallbackBase):
|
||||
line_length = 120
|
||||
if self.last_skipped:
|
||||
print()
|
||||
msg = colorize("# {0} {1}".format(task_name,
|
||||
'*' * (line_length - len(task_name))), 'bold')
|
||||
line = "# {0} ".format(task_name)
|
||||
msg = colorize("{0}{1}".format(line, '*' * (line_length - len(line))), 'bold')
|
||||
print(msg)
|
||||
|
||||
def _indent_text(self, text, indent_level):
|
||||
|
||||
@@ -48,6 +48,27 @@ DOCUMENTATION = '''
|
||||
default: chroot
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
# Plugin requires root privileges for chroot, -E preserves your env (and location of ~/.ansible):
|
||||
# sudo -E ansible-playbook ...
|
||||
#
|
||||
# Static inventory file
|
||||
# [chroots]
|
||||
# /path/to/debootstrap
|
||||
# /path/to/feboostrap
|
||||
# /path/to/lxc-image
|
||||
# /path/to/chroot
|
||||
|
||||
# playbook
|
||||
---
|
||||
- hosts: chroots
|
||||
connection: community.general.chroot
|
||||
tasks:
|
||||
- debug:
|
||||
msg: "This is coming from chroot environment"
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
|
||||
@@ -19,6 +19,7 @@ DOCUMENTATION = '''
|
||||
- Container identifier
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_lxc_host
|
||||
executable:
|
||||
@@ -59,7 +60,7 @@ class Connection(ConnectionBase):
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self.container_name = self._play_context.remote_addr
|
||||
self.container_name = None
|
||||
self.container = None
|
||||
|
||||
def _connect(self):
|
||||
@@ -67,12 +68,14 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self)._connect()
|
||||
|
||||
if not HAS_LIBLXC:
|
||||
msg = "lxc bindings for python2 are not installed"
|
||||
msg = "lxc python bindings are not installed"
|
||||
raise errors.AnsibleError(msg)
|
||||
|
||||
if self.container:
|
||||
return
|
||||
|
||||
self.container_name = self.get_option('remote_addr')
|
||||
|
||||
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
|
||||
self.container = _lxc.Container(self.container_name)
|
||||
if self.container.state == "STOPPED":
|
||||
@@ -117,7 +120,7 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# python2-lxc needs bytes. python3-lxc needs text.
|
||||
executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
|
||||
executable = to_native(self.get_option('executable'), errors='surrogate_or_strict')
|
||||
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
|
||||
|
||||
read_stdout, write_stdout = None, None
|
||||
|
||||
@@ -29,11 +29,13 @@ options:
|
||||
api_token_id:
|
||||
description:
|
||||
- Specify the token ID.
|
||||
- Requires C(proxmoxer>=1.1.0) to work.
|
||||
type: str
|
||||
version_added: 1.3.0
|
||||
api_token_secret:
|
||||
description:
|
||||
- Specify the token secret.
|
||||
- Requires C(proxmoxer>=1.1.0) to work.
|
||||
type: str
|
||||
version_added: 1.3.0
|
||||
validate_certs:
|
||||
|
||||
@@ -56,7 +56,7 @@ EXAMPLES = '''
|
||||
- name: Parse a CSV file's contents
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ csv_data | community.genera.from_csv(dialect='unix') }}
|
||||
{{ csv_data | community.general.from_csv(dialect='unix') }}
|
||||
vars:
|
||||
csv_data: |
|
||||
Column 1,Value
|
||||
|
||||
@@ -87,6 +87,7 @@ from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
# xmlrpc
|
||||
try:
|
||||
@@ -128,7 +129,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
|
||||
self.token = None
|
||||
if self.get_option('user') is not None:
|
||||
self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
|
||||
self.token = self.connection.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
|
||||
return self.connection
|
||||
|
||||
def _init_cache(self):
|
||||
|
||||
@@ -30,10 +30,18 @@ DOCUMENTATION = '''
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: true
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_ADDRESS
|
||||
version_added: 6.6.0
|
||||
exclude:
|
||||
description: list of addresses to exclude
|
||||
description:
|
||||
- List of addresses to exclude.
|
||||
- For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_NMAP_EXCLUDE
|
||||
version_added: 6.6.0
|
||||
port:
|
||||
description:
|
||||
- Only scan specific port or port range (C(-p)).
|
||||
|
||||
@@ -12,6 +12,8 @@ DOCUMENTATION = """
|
||||
requirements:
|
||||
- bw (command line utility)
|
||||
- be logged into bitwarden
|
||||
- bitwarden vault unlocked
|
||||
- C(BW_SESSION) environment variable set
|
||||
short_description: Retrieve secrets from Bitwarden
|
||||
version_added: 5.4.0
|
||||
description:
|
||||
@@ -130,20 +132,29 @@ class Bitwarden(object):
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value, search_field, collection_id)
|
||||
|
||||
if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
|
||||
return [match['login'][field] for match in matches]
|
||||
elif not field:
|
||||
if not field:
|
||||
return matches
|
||||
else:
|
||||
custom_field_matches = []
|
||||
for match in matches:
|
||||
field_matches = []
|
||||
for match in matches:
|
||||
# if there are no custom fields, then `match` has no key 'fields'
|
||||
if 'fields' in match:
|
||||
custom_field_found = False
|
||||
for custom_field in match['fields']:
|
||||
if custom_field['name'] == field:
|
||||
custom_field_matches.append(custom_field['value'])
|
||||
if matches and not custom_field_matches:
|
||||
raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return custom_field_matches
|
||||
if field == custom_field['name']:
|
||||
field_matches.append(custom_field['value'])
|
||||
custom_field_found = True
|
||||
break
|
||||
if custom_field_found:
|
||||
continue
|
||||
if 'login' in match and field in match['login']:
|
||||
field_matches.append(match['login'][field])
|
||||
continue
|
||||
if field in match:
|
||||
field_matches.append(match[field])
|
||||
continue
|
||||
if matches and not field_matches:
|
||||
raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return field_matches
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
@@ -61,6 +61,7 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
|
||||
- The default for this option will likely change to C(true) in the future.
|
||||
- This option will be forced to C(true) if multiple domains to be queried are specified.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.0.0
|
||||
@@ -95,6 +96,21 @@ EXAMPLES = """
|
||||
msg: "MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}"
|
||||
|
||||
- name: Lookup multiple names at once
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}"
|
||||
|
||||
- name: Lookup multiple names at once (from list variable)
|
||||
ansible.builtin.debug:
|
||||
msg: "A record found {{ item }}"
|
||||
loop: "{{ query('community.general.dig', *hosts) }}"
|
||||
vars:
|
||||
hosts:
|
||||
- example.org.
|
||||
- example.com.
|
||||
- gmail.com.
|
||||
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
@@ -308,7 +324,7 @@ class LookupModule(LookupBase):
|
||||
edns_size = 4096
|
||||
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
|
||||
|
||||
domain = None
|
||||
domains = []
|
||||
qtype = self.get_option('qtype')
|
||||
flat = self.get_option('flat')
|
||||
fail_on_error = self.get_option('fail_on_error')
|
||||
@@ -365,63 +381,71 @@ class LookupModule(LookupBase):
|
||||
if '/' in t:
|
||||
try:
|
||||
domain, qtype = t.split('/')
|
||||
domains.append(domain)
|
||||
except Exception:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
else:
|
||||
domain = t
|
||||
domains.append(t)
|
||||
|
||||
# print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
reversed_domains = []
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
reversed_domains.append(n.to_text())
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
domains = reversed_domains
|
||||
|
||||
if len(domains) > 1:
|
||||
real_empty = True
|
||||
|
||||
ret = []
|
||||
|
||||
if qtype.upper() == 'PTR':
|
||||
for domain in domains:
|
||||
try:
|
||||
n = dns.reversename.from_address(domain)
|
||||
domain = n.to_text()
|
||||
except dns.exception.SyntaxError:
|
||||
pass
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
s = s[1:-1] # Strip outside quotes on TXT rdata
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
|
||||
if flat:
|
||||
ret.append(s)
|
||||
else:
|
||||
try:
|
||||
rd = make_rdata_dict(rdata)
|
||||
rd['owner'] = answers.canonical_name.to_text()
|
||||
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
|
||||
rd['ttl'] = answers.rrset.ttl
|
||||
rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
ret.append(rd)
|
||||
except Exception as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
ret.append(str(err))
|
||||
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
except dns.resolver.NXDOMAIN as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append('NXDOMAIN')
|
||||
except dns.resolver.NoAnswer as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.resolver.Timeout as err:
|
||||
if fail_on_error:
|
||||
raise AnsibleError("Lookup failed: %s" % str(err))
|
||||
if not real_empty:
|
||||
ret.append("")
|
||||
except dns.exception.DNSException as err:
|
||||
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
|
||||
|
||||
return ret
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
|
||||
# Copyright (c) 2018, Scott Buchanan <scott@buchanan.works>
|
||||
# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -74,18 +74,18 @@ EXAMPLES = """
|
||||
|
||||
- name: Retrieve password for HAL when not signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
var: lookup('community.general.onepassword',
|
||||
'HAL 9000',
|
||||
subdomain='Discovery',
|
||||
master_password=vault_master_password)
|
||||
|
||||
- name: Retrieve password for HAL when never signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
master_password=vault_master_password
|
||||
username='tweety@acme.com'
|
||||
var: lookup('community.general.onepassword',
|
||||
'HAL 9000',
|
||||
subdomain='Discovery',
|
||||
master_password=vault_master_password,
|
||||
username='tweety@acme.com',
|
||||
secret_key=vault_secret_key)
|
||||
"""
|
||||
|
||||
@@ -451,10 +451,10 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
# If the field name doesn't exist in the section, match on the value of "label"
|
||||
# then "id" and return "value"
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
# Look at the section data and get an indentifier. The value of 'id' is either a unique ID
|
||||
# or a human-readable string. If a 'label' field exists, prefer that since
|
||||
@@ -464,10 +464,10 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
if section_title == current_section_title:
|
||||
# In the correct section. Check "label" then "id" for the desired field_name
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
@@ -209,7 +209,6 @@ import time
|
||||
import yaml
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.module_utils.common.file import FileLock
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.utils.display import Display
|
||||
@@ -217,6 +216,8 @@ from ansible.utils.encrypt import random_password
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible import constants as C
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils._filelock import FileLock
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ DOCUMENTATION = r"""
|
||||
version_added: '3.2.0'
|
||||
description:
|
||||
- Generates random string based upon the given constraints.
|
||||
- Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom),
|
||||
so should be strong enough for cryptographic purposes.
|
||||
options:
|
||||
length:
|
||||
description: The length of the string.
|
||||
|
||||
109
plugins/module_utils/_filelock.py
Normal file
109
plugins/module_utils/_filelock.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
# NOTE:
|
||||
# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import fcntl
|
||||
import sys
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class LockTimeout(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FileLock:
|
||||
'''
|
||||
Currently FileLock is implemented via fcntl.flock on a lock file, however this
|
||||
behaviour may change in the future. Avoid mixing lock types fcntl.flock,
|
||||
fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
|
||||
unwanted and/or unexpected behaviour
|
||||
'''
|
||||
def __init__(self):
|
||||
self.lockfd = None
|
||||
|
||||
@contextmanager
|
||||
def lock_file(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Context for lock acquisition
|
||||
'''
|
||||
try:
|
||||
self.set_lock(path, tmpdir, lock_timeout)
|
||||
yield
|
||||
finally:
|
||||
self.unlock()
|
||||
|
||||
def set_lock(self, path, tmpdir, lock_timeout=None):
|
||||
'''
|
||||
Create a lock file based on path with flock to prevent other processes
|
||||
using given path.
|
||||
Please note that currently file locking only works when it's executed by
|
||||
the same user, I.E single user scenarios
|
||||
|
||||
:kw path: Path (file) to lock
|
||||
:kw tmpdir: Path where to place the temporary .lock file
|
||||
:kw lock_timeout:
|
||||
Wait n seconds for lock acquisition, fail if timeout is reached.
|
||||
0 = Do not wait, fail if lock cannot be acquired immediately,
|
||||
Default is None, wait indefinitely until lock is released.
|
||||
:returns: True
|
||||
'''
|
||||
lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
|
||||
l_wait = 0.1
|
||||
r_exception = IOError
|
||||
if sys.version_info[0] == 3:
|
||||
r_exception = BlockingIOError
|
||||
|
||||
self.lockfd = open(lock_path, 'w')
|
||||
|
||||
if lock_timeout <= 0:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
|
||||
if lock_timeout:
|
||||
e_secs = 0
|
||||
while e_secs < lock_timeout:
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
return True
|
||||
except r_exception:
|
||||
time.sleep(l_wait)
|
||||
e_secs += l_wait
|
||||
continue
|
||||
|
||||
self.lockfd.close()
|
||||
raise LockTimeout('{0} sec'.format(lock_timeout))
|
||||
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_EX)
|
||||
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
|
||||
|
||||
return True
|
||||
|
||||
def unlock(self):
|
||||
'''
|
||||
Make sure lock file is available for everyone and Unlock the file descriptor
|
||||
locked by set_lock
|
||||
|
||||
:returns: True
|
||||
'''
|
||||
if not self.lockfd:
|
||||
return True
|
||||
|
||||
try:
|
||||
fcntl.flock(self.lockfd, fcntl.LOCK_UN)
|
||||
self.lockfd.close()
|
||||
except ValueError: # file wasn't opened, let context manager fail gracefully
|
||||
pass
|
||||
|
||||
return True
|
||||
464
plugins/module_utils/btrfs.py
Normal file
464
plugins/module_utils/btrfs.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def normalize_subvolume_path(path):
|
||||
"""
|
||||
Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
|
||||
In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
|
||||
"""
|
||||
fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
|
||||
result = re.sub(r'/+$', '', re.sub(r'/+', '/', '/' + fstree_stripped))
|
||||
return result if len(result) > 0 else '/'
|
||||
|
||||
|
||||
class BtrfsModuleException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BtrfsCommands(object):
|
||||
|
||||
"""
|
||||
Provides access to a subset of the Btrfs command line
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs = self.__module.get_bin_path("btrfs", required=True)
|
||||
|
||||
def filesystem_show(self):
|
||||
command = "%s filesystem show -d" % (self.__btrfs)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.strip() for x in result[1].splitlines()]
|
||||
filesystems = []
|
||||
current = None
|
||||
for line in stdout:
|
||||
if line.startswith('Label'):
|
||||
current = self.__parse_filesystem(line)
|
||||
filesystems.append(current)
|
||||
elif line.startswith('devid'):
|
||||
current['devices'].append(self.__parse_filesystem_device(line))
|
||||
return filesystems
|
||||
|
||||
def __parse_filesystem(self, line):
|
||||
label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
|
||||
id = re.sub(r'^.*uuid:\s*', '', line)
|
||||
|
||||
filesystem = {}
|
||||
filesystem['label'] = label.strip("'") if label != 'none' else None
|
||||
filesystem['uuid'] = id
|
||||
filesystem['devices'] = []
|
||||
filesystem['mountpoints'] = []
|
||||
filesystem['subvolumes'] = []
|
||||
filesystem['default_subvolid'] = None
|
||||
return filesystem
|
||||
|
||||
def __parse_filesystem_device(self, line):
|
||||
return re.sub(r'^.*path\s', '', line)
|
||||
|
||||
def subvolumes_list(self, filesystem_path):
|
||||
command = "%s subvolume list -tap %s" % (self.__btrfs, filesystem_path)
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
stdout = [x.split('\t') for x in result[1].splitlines()]
|
||||
subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
|
||||
if len(stdout) > 2:
|
||||
subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
|
||||
return subvolumes
|
||||
|
||||
def __parse_subvolume_list_record(self, item):
|
||||
return {
|
||||
'id': int(item[0]),
|
||||
'parent': int(item[2]),
|
||||
'path': normalize_subvolume_path(item[5]),
|
||||
}
|
||||
|
||||
def subvolume_get_default(self, filesystem_path):
|
||||
command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
# ID [n] ...
|
||||
return int(result[1].strip().split()[1])
|
||||
|
||||
def subvolume_set_default(self, filesystem_path, subvolume_id):
|
||||
command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_create(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_snapshot(self, snapshot_source, snapshot_destination):
|
||||
command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
def subvolume_delete(self, subvolume_path):
|
||||
command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)]
|
||||
result = self.__module.run_command(command, check_rc=True)
|
||||
|
||||
|
||||
class BtrfsInfoProvider(object):
|
||||
|
||||
"""
|
||||
Utility providing details of the currently available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True)
|
||||
|
||||
def get_filesystems(self):
|
||||
filesystems = self.__btrfs_api.filesystem_show()
|
||||
mountpoints = self.__find_mountpoints()
|
||||
for filesystem in filesystems:
|
||||
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
|
||||
filesystem['mountpoints'] = device_mountpoints
|
||||
|
||||
if len(device_mountpoints) > 0:
|
||||
|
||||
# any path within the filesystem can be used to query metadata
|
||||
mountpoint = device_mountpoints[0]['mountpoint']
|
||||
filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
|
||||
filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
|
||||
|
||||
return filesystems
|
||||
|
||||
def get_mountpoints(self, filesystem_devices):
|
||||
mountpoints = self.__find_mountpoints()
|
||||
return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices)
|
||||
|
||||
def get_subvolumes(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolumes_list(filesystem_path)
|
||||
|
||||
def get_default_subvolume_id(self, filesystem_path):
|
||||
return self.__btrfs_api.subvolume_get_default(filesystem_path)
|
||||
|
||||
def __filter_mountpoints_for_devices(self, mountpoints, devices):
|
||||
return [m for m in mountpoints if (m['device'] in devices)]
|
||||
|
||||
def __find_mountpoints(self):
|
||||
command = "%s -t btrfs -nvP" % self.__findmnt_path
|
||||
result = self.__module.run_command(command)
|
||||
mountpoints = []
|
||||
if result[0] == 0:
|
||||
lines = result[1].splitlines()
|
||||
for line in lines:
|
||||
mountpoint = self.__parse_mountpoint_pairs(line)
|
||||
mountpoints.append(mountpoint)
|
||||
return mountpoints
|
||||
|
||||
def __parse_mountpoint_pairs(self, line):
|
||||
pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
|
||||
match = pattern.search(line)
|
||||
if match is not None:
|
||||
groups = match.groupdict()
|
||||
|
||||
return {
|
||||
'mountpoint': groups['target'],
|
||||
'device': groups['source'],
|
||||
'subvolid': self.__extract_mount_subvolid(groups['options']),
|
||||
}
|
||||
else:
|
||||
raise BtrfsModuleException("Failed to parse findmnt result for line: '%s'" % line)
|
||||
|
||||
def __extract_mount_subvolid(self, mount_options):
|
||||
for option in mount_options.split(','):
|
||||
if option.startswith('subvolid='):
|
||||
return int(option[len('subvolid='):])
|
||||
raise BtrfsModuleException("Failed to find subvolid for mountpoint in options '%s'" % mount_options)
|
||||
|
||||
|
||||
class BtrfsSubvolume(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs subvolume
|
||||
"""
|
||||
|
||||
def __init__(self, filesystem, subvolume_id):
|
||||
self.__filesystem = filesystem
|
||||
self.__subvolume_id = subvolume_id
|
||||
|
||||
def get_filesystem(self):
|
||||
return self.__filesystem
|
||||
|
||||
def is_mounted(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
return mountpoints is not None and len(mountpoints) > 0
|
||||
|
||||
def is_filesystem_root(self):
|
||||
return 5 == self.__subvolume_id
|
||||
|
||||
def is_filesystem_default(self):
|
||||
return self.__filesystem.default_subvolid == self.__subvolume_id
|
||||
|
||||
def get_mounted_path(self):
|
||||
mountpoints = self.get_mountpoints()
|
||||
if mountpoints is not None and len(mountpoints) > 0:
|
||||
return mountpoints[0]
|
||||
elif self.parent is not None:
|
||||
parent = self.__filesystem.get_subvolume_by_id(self.parent)
|
||||
parent_path = parent.get_mounted_path()
|
||||
if parent_path is not None:
|
||||
return parent_path + os.path.sep + self.name
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_mountpoints(self):
|
||||
return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id)
|
||||
|
||||
def get_child_relative_path(self, absolute_child_path):
|
||||
"""
|
||||
Get the relative path from this subvolume to the named child subvolume.
|
||||
The provided parameter is expected to be normalized as by normalize_subvolume_path.
|
||||
"""
|
||||
path = self.path
|
||||
if absolute_child_path.startswith(path):
|
||||
relative = absolute_child_path[len(path):]
|
||||
return re.sub(r'^/*', '', relative)
|
||||
else:
|
||||
raise BtrfsModuleException("Path '%s' doesn't start with '%s'" % (absolute_child_path, path))
|
||||
|
||||
def get_parent_subvolume(self):
|
||||
parent_id = self.parent
|
||||
return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None
|
||||
|
||||
def get_child_subvolumes(self):
|
||||
return self.__filesystem.get_subvolume_children(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def __info(self):
|
||||
return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.__subvolume_id
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.path.split('/').pop()
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.__info['path']
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self.__info['parent']
|
||||
|
||||
|
||||
class BtrfsFilesystem(object):
|
||||
|
||||
"""
|
||||
Wrapper class providing convenience methods for inspection of a btrfs filesystem
|
||||
"""
|
||||
|
||||
def __init__(self, info, provider, module):
|
||||
self.__provider = provider
|
||||
|
||||
# constant for module execution
|
||||
self.__uuid = info['uuid']
|
||||
self.__label = info['label']
|
||||
self.__devices = info['devices']
|
||||
|
||||
# refreshable
|
||||
self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
|
||||
self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
|
||||
self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.__uuid
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return self.__label
|
||||
|
||||
@property
|
||||
def default_subvolid(self):
|
||||
return self.__default_subvolid
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
return list(self.__devices)
|
||||
|
||||
def refresh(self):
|
||||
self.refresh_mountpoints()
|
||||
self.refresh_subvolumes()
|
||||
self.refresh_default_subvolume()
|
||||
|
||||
def refresh_mountpoints(self):
|
||||
mountpoints = self.__provider.get_mountpoints(list(self.__devices))
|
||||
self.__update_mountpoints(mountpoints)
|
||||
|
||||
def __update_mountpoints(self, mountpoints):
|
||||
self.__mountpoints = dict()
|
||||
for i in mountpoints:
|
||||
subvolid = i['subvolid']
|
||||
mountpoint = i['mountpoint']
|
||||
if subvolid not in self.__mountpoints:
|
||||
self.__mountpoints[subvolid] = []
|
||||
self.__mountpoints[subvolid].append(mountpoint)
|
||||
|
||||
def refresh_subvolumes(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
subvolumes = self.__provider.get_subvolumes(filesystem_path)
|
||||
self.__update_subvolumes(subvolumes)
|
||||
|
||||
def __update_subvolumes(self, subvolumes):
|
||||
# TODO strategy for retaining information on deleted subvolumes?
|
||||
self.__subvolumes = dict()
|
||||
for subvolume in subvolumes:
|
||||
self.__subvolumes[subvolume['id']] = subvolume
|
||||
|
||||
def refresh_default_subvolume(self):
|
||||
filesystem_path = self.get_any_mountpoint()
|
||||
if filesystem_path is not None:
|
||||
self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path)
|
||||
|
||||
def contains_device(self, device):
|
||||
return device in self.__devices
|
||||
|
||||
def contains_subvolume(self, subvolume):
|
||||
return self.get_subvolume_by_name(subvolume) is not None
|
||||
|
||||
def get_subvolume_by_id(self, subvolume_id):
|
||||
return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_info_for_id(self, subvolume_id):
|
||||
return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None
|
||||
|
||||
def get_subvolume_by_name(self, subvolume):
|
||||
for subvolume_info in self.__subvolumes.values():
|
||||
if subvolume_info['path'] == subvolume:
|
||||
return BtrfsSubvolume(self, subvolume_info['id'])
|
||||
return None
|
||||
|
||||
def get_any_mountpoint(self):
|
||||
for subvol_mountpoints in self.__mountpoints.values():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return subvol_mountpoints[0]
|
||||
# maybe error?
|
||||
return None
|
||||
|
||||
def get_any_mounted_subvolume(self):
|
||||
for subvolid, subvol_mountpoints in self.__mountpoints.items():
|
||||
if len(subvol_mountpoints) > 0:
|
||||
return self.get_subvolume_by_id(subvolid)
|
||||
return None
|
||||
|
||||
def get_mountpoints_by_subvolume_id(self, subvolume_id):
|
||||
return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else []
|
||||
|
||||
def get_nearest_subvolume(self, subvolume):
|
||||
"""Return the identified subvolume if existing, else the closest matching parent"""
|
||||
subvolumes_by_path = self.__get_subvolumes_by_path()
|
||||
while len(subvolume) > 1:
|
||||
if subvolume in subvolumes_by_path:
|
||||
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
|
||||
else:
|
||||
subvolume = re.sub(r'/[^/]+$', '', subvolume)
|
||||
|
||||
return BtrfsSubvolume(self, 5)
|
||||
|
||||
def get_mountpath_as_child(self, subvolume_name):
|
||||
"""Find a path to the target subvolume through a mounted ancestor"""
|
||||
nearest = self.get_nearest_subvolume(subvolume_name)
|
||||
if nearest.path == subvolume_name:
|
||||
nearest = nearest.get_parent_subvolume()
|
||||
if nearest is None or nearest.get_mounted_path() is None:
|
||||
raise BtrfsModuleException("Failed to find a path '%s' through a mounted parent subvolume" % subvolume_name)
|
||||
else:
|
||||
return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
|
||||
|
||||
def get_subvolume_children(self, subvolume_id):
|
||||
return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
|
||||
|
||||
def __get_subvolumes_by_path(self):
|
||||
result = {}
|
||||
for s in self.__subvolumes.values():
|
||||
path = s['path']
|
||||
result[path] = s
|
||||
return result
|
||||
|
||||
def is_mounted(self):
|
||||
return self.__mountpoints is not None and len(self.__mountpoints) > 0
|
||||
|
||||
def get_summary(self):
|
||||
subvolumes = []
|
||||
sources = self.__subvolumes.values() if self.__subvolumes is not None else []
|
||||
for subvolume in sources:
|
||||
id = subvolume['id']
|
||||
subvolumes.append({
|
||||
'id': id,
|
||||
'path': subvolume['path'],
|
||||
'parent': subvolume['parent'],
|
||||
'mountpoints': self.get_mountpoints_by_subvolume_id(id),
|
||||
})
|
||||
|
||||
return {
|
||||
'default_subvolume': self.__default_subvolid,
|
||||
'devices': self.__devices,
|
||||
'label': self.__label,
|
||||
'uuid': self.__uuid,
|
||||
'subvolumes': subvolumes,
|
||||
}
|
||||
|
||||
|
||||
class BtrfsFilesystemsProvider(object):
|
||||
|
||||
"""
|
||||
Provides methods to query available btrfs filesystems
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.__module = module
|
||||
self.__provider = BtrfsInfoProvider(module)
|
||||
self.__filesystems = None
|
||||
|
||||
def get_matching_filesystem(self, criteria):
|
||||
if criteria['device'] is not None:
|
||||
criteria['device'] = os.path.realpath(criteria['device'])
|
||||
|
||||
self.__check_init()
|
||||
matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
|
||||
if len(matching) == 1:
|
||||
return matching[0]
|
||||
else:
|
||||
raise BtrfsModuleException("Found %d filesystems matching criteria uuid=%s label=%s device=%s" % (
|
||||
len(matching),
|
||||
criteria['uuid'],
|
||||
criteria['label'],
|
||||
criteria['device']
|
||||
))
|
||||
|
||||
def __filesystem_matches_criteria(self, filesystem, criteria):
|
||||
return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
|
||||
(criteria['label'] is None or filesystem.label == criteria['label']) and
|
||||
(criteria['device'] is None or filesystem.contains_device(criteria['device'])))
|
||||
|
||||
def get_filesystem_for_device(self, device):
|
||||
real_device = os.path.realpath(device)
|
||||
self.__check_init()
|
||||
for fs in self.__filesystems.values():
|
||||
if fs.contains_device(real_device):
|
||||
return fs
|
||||
return None
|
||||
|
||||
def get_filesystems(self):
|
||||
self.__check_init()
|
||||
return list(self.__filesystems.values())
|
||||
|
||||
def __check_init(self):
|
||||
if self.__filesystems is None:
|
||||
self.__filesystems = dict()
|
||||
for f in self.__provider.get_filesystems():
|
||||
uuid = f['uuid']
|
||||
self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)
|
||||
@@ -6,6 +6,7 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
from functools import wraps
|
||||
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
@@ -199,11 +200,16 @@ class CmdRunner(object):
|
||||
environ_update = {}
|
||||
self.environ_update = environ_update
|
||||
|
||||
self.command[0] = module.get_bin_path(self.command[0], opt_dirs=path_prefix, required=True)
|
||||
_cmd = self.command[0]
|
||||
self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
|
||||
|
||||
for mod_param_name, spec in iteritems(module.argument_spec):
|
||||
if mod_param_name not in self.arg_formats:
|
||||
self.arg_formats[mod_param_name] = _Format.as_default_type(spec['type'], mod_param_name)
|
||||
self.arg_formats[mod_param_name] = _Format.as_default_type(spec.get('type', 'str'), mod_param_name)
|
||||
|
||||
@property
|
||||
def binary(self):
|
||||
return self.command[0]
|
||||
|
||||
def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs):
|
||||
if output_process is None:
|
||||
|
||||
@@ -55,8 +55,10 @@ def initialize_dialect(dialect, **kwargs):
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
BOM = to_native(u'\ufeff')
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
if data.startswith(BOM):
|
||||
data = data[len(BOM):]
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
|
||||
@@ -50,7 +50,7 @@ class _Dependency(object):
|
||||
def failed(self):
|
||||
return self.state == 1
|
||||
|
||||
def verify(self, module):
|
||||
def validate(self, module):
|
||||
if self.failed:
|
||||
module.fail_json(msg=self.message, exception=self.trace)
|
||||
|
||||
@@ -71,20 +71,28 @@ def declare(name, *args, **kwargs):
|
||||
_deps[name] = dep
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
def _select_names(spec):
|
||||
dep_names = sorted(_deps)
|
||||
|
||||
if spec is not None:
|
||||
if spec:
|
||||
if spec.startswith("-"):
|
||||
spec_split = spec[1:].split(":")
|
||||
for d in spec_split:
|
||||
dep_names.remove(d)
|
||||
else:
|
||||
spec_split = spec[1:].split(":")
|
||||
spec_split = spec.split(":")
|
||||
dep_names = []
|
||||
for d in spec_split:
|
||||
_deps[d] # ensure it exists
|
||||
dep_names.append(d)
|
||||
|
||||
for dep in dep_names:
|
||||
_deps[dep].verify(module)
|
||||
return dep_names
|
||||
|
||||
|
||||
def validate(module, spec=None):
|
||||
for dep in _select_names(spec):
|
||||
_deps[dep].validate(module)
|
||||
|
||||
|
||||
def failed(spec=None):
|
||||
return any(_deps[d].failed for d in _select_names(spec))
|
||||
|
||||
@@ -49,6 +49,16 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes"
|
||||
URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}"
|
||||
URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes"
|
||||
URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes"
|
||||
URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes"
|
||||
URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}"
|
||||
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
@@ -80,6 +90,9 @@ URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/ins
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
|
||||
URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -194,24 +207,30 @@ def is_struct_included(struct1, struct2, exclude=None):
|
||||
Return True if all element of dict 1 are present in dict 2, return false otherwise.
|
||||
"""
|
||||
if isinstance(struct1, list) and isinstance(struct2, list):
|
||||
if not struct1 and not struct2:
|
||||
return True
|
||||
for item1 in struct1:
|
||||
if isinstance(item1, (list, dict)):
|
||||
for item2 in struct2:
|
||||
if not is_struct_included(item1, item2, exclude):
|
||||
return False
|
||||
if is_struct_included(item1, item2, exclude):
|
||||
break
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
if item1 not in struct2:
|
||||
return False
|
||||
return True
|
||||
elif isinstance(struct1, dict) and isinstance(struct2, dict):
|
||||
if not struct1 and not struct2:
|
||||
return True
|
||||
try:
|
||||
for key in struct1:
|
||||
if not (exclude and key in exclude):
|
||||
if not is_struct_included(struct1[key], struct2[key], exclude):
|
||||
return False
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
elif isinstance(struct1, bool) and isinstance(struct2, bool):
|
||||
return struct1 == struct2
|
||||
else:
|
||||
@@ -734,8 +753,15 @@ class KeycloakAPI(object):
|
||||
users_url = URL_USERS.format(url=self.baseurl, realm=realm)
|
||||
users_url += '?username=%s&exact=true' % username
|
||||
try:
|
||||
return json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
userrep = None
|
||||
users = json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for user in users:
|
||||
if user['username'] == username:
|
||||
userrep = user
|
||||
break
|
||||
return userrep
|
||||
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
@@ -1163,6 +1189,131 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
|
||||
% (mapper_rep, realm, str(e)))
|
||||
|
||||
def get_default_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The default clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'default', client_id)
|
||||
|
||||
def get_optional_clientscopes(self, realm, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The optinal clientscopes of this realm or client
|
||||
"""
|
||||
url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
|
||||
return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
|
||||
|
||||
def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None):
|
||||
"""Fetch the name and ID of all clientscopes on the Keycloak server.
|
||||
|
||||
To fetch the full data of the client scope, make a subsequent call to
|
||||
get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
|
||||
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param url_template the template for the right type
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
:return The clientscopes of the specified type of this realm
|
||||
"""
|
||||
if client_id is None:
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
|
||||
else:
|
||||
cid = self.get_client_id(client_id=client_id, realm=realm)
|
||||
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
|
||||
|
||||
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
|
||||
"""Decides which url to use.
|
||||
:param scope_type this can be either optinal or default
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
if client_id is None:
|
||||
if scope_type == "default":
|
||||
return URL_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_OPTIONAL_CLIENTSCOPE
|
||||
else:
|
||||
if scope_type == "default":
|
||||
return URL_CLIENT_DEFAULT_CLIENTSCOPE
|
||||
if scope_type == "optional":
|
||||
return URL_CLIENT_OPTIONAL_CLIENTSCOPE
|
||||
|
||||
def add_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'add')
|
||||
|
||||
def add_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Add a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'add')
|
||||
|
||||
def delete_default_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as default either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "default", realm, 'delete')
|
||||
|
||||
def delete_optional_clientscope(self, id, realm="master", client_id=None):
|
||||
"""Remove a client scope as optional either on realm or client level.
|
||||
|
||||
:param id: Client scope Id.
|
||||
:param realm: Realm in which the clientscope resides.
|
||||
:param client_id: The client in which the clientscope resides.
|
||||
"""
|
||||
self._action_type_clientscope(id, client_id, "optional", realm, 'delete')
|
||||
|
||||
def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'):
|
||||
""" Delete or add a clientscope of type.
|
||||
:param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
|
||||
:param client_id: The ID of the clientscope (preferred to name).
|
||||
:param scope_type 'default' or 'optional'
|
||||
:param realm: The realm in which this group resides, default "master".
|
||||
"""
|
||||
cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm)
|
||||
# should have a good cid by here.
|
||||
clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
|
||||
try:
|
||||
method = 'PUT' if action == "add" else 'DELETE'
|
||||
return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
except Exception as e:
|
||||
place = 'realm' if client_id is None else 'client ' + client_id
|
||||
self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
|
||||
|
||||
def create_clientsecret(self, id, realm="master"):
|
||||
""" Generate a new client secret by id
|
||||
|
||||
@@ -1795,6 +1946,9 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(updatedExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
|
||||
(flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
|
||||
|
||||
@@ -1819,7 +1973,7 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
|
||||
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master'):
|
||||
def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
|
||||
""" Create new sublow on the flow
|
||||
|
||||
:param subflowName: name of the subflow to create
|
||||
@@ -1830,7 +1984,7 @@ class KeycloakAPI(object):
|
||||
newSubFlow = {}
|
||||
newSubFlow["alias"] = subflowName
|
||||
newSubFlow["provider"] = "registration-page-form"
|
||||
newSubFlow["type"] = "basic-flow"
|
||||
newSubFlow["type"] = flowType
|
||||
open_url(
|
||||
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
|
||||
url=self.baseurl,
|
||||
@@ -1865,8 +2019,11 @@ class KeycloakAPI(object):
|
||||
data=json.dumps(newExec),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
|
||||
(flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e)))
|
||||
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
|
||||
|
||||
def change_execution_priority(self, executionId, diff, realm='master'):
|
||||
""" Raise or lower execution priority of diff time
|
||||
@@ -2190,3 +2347,44 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
search_url = "%s/search?name=%s" % (url, quote(name))
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def create_authz_authorization_scope(self, payload, client_id, realm):
|
||||
"""Create an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def update_authz_authorization_scope(self, payload, id, client_id, realm):
|
||||
"""Update an authorization scope for a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def remove_authz_authorization_scope(self, id, client_id, realm):
|
||||
"""Remove an authorization scope from a Keycloak client"""
|
||||
url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
|
||||
|
||||
@@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
import time
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
@@ -228,3 +229,79 @@ class iLORedfishUtils(RedfishUtils):
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
|
||||
def get_server_poststate(self):
|
||||
# Get server details
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
server_data = response["data"]
|
||||
|
||||
if "Hpe" in server_data["Oem"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hpe"]["PostState"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"ret": True,
|
||||
"server_poststate": server_data["Oem"]["Hp"]["PostState"]
|
||||
}
|
||||
|
||||
def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
|
||||
# This method checks if OOB controller reboot is completed
|
||||
time.sleep(10)
|
||||
|
||||
# Check server poststate
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
count = int(max_polling_time / polling_interval)
|
||||
times = 0
|
||||
|
||||
# When server is powered OFF
|
||||
pcount = 0
|
||||
while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5:
|
||||
time.sleep(10)
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] not in ["PowerOff", "Off"]:
|
||||
break
|
||||
pcount = pcount + 1
|
||||
if state["server_poststate"] in ["PowerOff", "Off"]:
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server is powered OFF"
|
||||
}
|
||||
|
||||
# When server is not rebooting
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": False,
|
||||
"msg": "Server is not rebooting"
|
||||
}
|
||||
|
||||
while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
|
||||
state = self.get_server_poststate()
|
||||
if not state["ret"]:
|
||||
return state
|
||||
|
||||
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
|
||||
return {
|
||||
"ret": True,
|
||||
"changed": True,
|
||||
"msg": "Server reboot is completed"
|
||||
}
|
||||
time.sleep(polling_interval)
|
||||
times = times + 1
|
||||
|
||||
return {
|
||||
"ret": False,
|
||||
"changed": False,
|
||||
"msg": "Server Reboot has failed, server state: {state} ".format(state=state)
|
||||
}
|
||||
|
||||
@@ -14,8 +14,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.urls import open_url, urllib_error
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.basic import json
|
||||
import ansible.module_utils.six.moves.urllib.error as urllib_error
|
||||
|
||||
|
||||
class Response(object):
|
||||
|
||||
@@ -79,7 +79,7 @@ def _post_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
path="/organization",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
@@ -220,7 +220,7 @@ def post_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -248,7 +248,7 @@ def post_pritunl_user(
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -267,7 +267,7 @@ def post_pritunl_user(
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -287,7 +287,7 @@ def delete_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -307,7 +307,7 @@ def delete_pritunl_user(
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
|
||||
@@ -45,6 +45,8 @@ def render(to_render):
|
||||
"""Converts dictionary to OpenNebula template."""
|
||||
def recurse(to_render):
|
||||
for key, value in sorted(to_render.items()):
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
|
||||
continue
|
||||
@@ -52,6 +54,9 @@ def render(to_render):
|
||||
for item in value:
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
|
||||
continue
|
||||
if isinstance(value, str):
|
||||
yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
continue
|
||||
yield '{0:}="{1:}"'.format(key, value)
|
||||
return '\n'.join(recurse(to_render))
|
||||
|
||||
|
||||
@@ -561,7 +561,7 @@ def are_lists_equal(s, t):
|
||||
if s is None and t is None:
|
||||
return True
|
||||
|
||||
if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
|
||||
if s is None or t is None or (len(s) != len(t)):
|
||||
return False
|
||||
|
||||
if len(s) == 0:
|
||||
@@ -570,7 +570,7 @@ def are_lists_equal(s, t):
|
||||
s = to_dict(s)
|
||||
t = to_dict(t)
|
||||
|
||||
if type(s[0]) == dict:
|
||||
if isinstance(s[0], dict):
|
||||
# Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
|
||||
# service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
|
||||
# `service_name` which is not provided in the list of `services` by a user while making an update call; only
|
||||
@@ -604,9 +604,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
|
||||
user_provided_attr_value = module.params.get(attr, None)
|
||||
|
||||
unequal_list_attr = (
|
||||
type(resources_attr_value) == list or type(user_provided_attr_value) == list
|
||||
isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list)
|
||||
) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
|
||||
unequal_attr = type(resources_attr_value) != list and to_dict(
|
||||
unequal_attr = not isinstance(resources_attr_value, list) and to_dict(
|
||||
resources_attr_value
|
||||
) != to_dict(user_provided_attr_value)
|
||||
if unequal_list_attr or unequal_attr:
|
||||
@@ -936,9 +936,9 @@ def tuplize(d):
|
||||
list_of_tuples = []
|
||||
key_list = sorted(list(d.keys()))
|
||||
for key in key_list:
|
||||
if type(d[key]) == list:
|
||||
if isinstance(d[key], list):
|
||||
# Convert a value which is itself a list of dict to a list of tuples.
|
||||
if d[key] and type(d[key][0]) == dict:
|
||||
if d[key] and isinstance(d[key][0], dict):
|
||||
sub_tuples = []
|
||||
for sub_dict in d[key]:
|
||||
sub_tuples.append(tuplize(sub_dict))
|
||||
@@ -948,7 +948,7 @@ def tuplize(d):
|
||||
list_of_tuples.append((sub_tuples is None, key, sub_tuples))
|
||||
else:
|
||||
list_of_tuples.append((d[key] is None, key, d[key]))
|
||||
elif type(d[key]) == dict:
|
||||
elif isinstance(d[key], dict):
|
||||
tupled_value = tuplize(d[key])
|
||||
list_of_tuples.append((tupled_value is None, key, tupled_value))
|
||||
else:
|
||||
@@ -969,13 +969,13 @@ def sort_dictionary(d):
|
||||
"""
|
||||
sorted_d = {}
|
||||
for key in d:
|
||||
if type(d[key]) == list:
|
||||
if d[key] and type(d[key][0]) == dict:
|
||||
if isinstance(d[key], list):
|
||||
if d[key] and isinstance(d[key][0], dict):
|
||||
sorted_value = sort_list_of_dictionary(d[key])
|
||||
sorted_d[key] = sorted_value
|
||||
else:
|
||||
sorted_d[key] = sorted(d[key])
|
||||
elif type(d[key]) == dict:
|
||||
elif isinstance(d[key], dict):
|
||||
sorted_d[key] = sort_dictionary(d[key])
|
||||
else:
|
||||
sorted_d[key] = d[key]
|
||||
@@ -1026,10 +1026,7 @@ def check_if_user_value_matches_resources_attr(
|
||||
return
|
||||
|
||||
if (
|
||||
resources_value_for_attr is None
|
||||
and len(user_provided_value_for_attr) >= 0
|
||||
or user_provided_value_for_attr is None
|
||||
and len(resources_value_for_attr) >= 0
|
||||
resources_value_for_attr is None or user_provided_value_for_attr is None
|
||||
):
|
||||
res[0] = False
|
||||
return
|
||||
@@ -1044,7 +1041,7 @@ def check_if_user_value_matches_resources_attr(
|
||||
|
||||
if (
|
||||
user_provided_value_for_attr
|
||||
and type(user_provided_value_for_attr[0]) == dict
|
||||
and isinstance(user_provided_value_for_attr[0], dict)
|
||||
):
|
||||
# Process a list of dict
|
||||
sorted_user_provided_value_for_attr = sort_list_of_dictionary(
|
||||
@@ -1547,7 +1544,7 @@ def delete_and_wait(
|
||||
except ServiceError as ex:
|
||||
# DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
|
||||
# resource is not available, instead of the expected 404. So working around this for now.
|
||||
if type(client) == oci.dns.DnsClient:
|
||||
if isinstance(client, oci.dns.DnsClient):
|
||||
if ex.status == 400 and ex.code == "InvalidParameter":
|
||||
_debug(
|
||||
"Resource {0} with {1} already deleted. So returning changed=False".format(
|
||||
|
||||
@@ -39,6 +39,7 @@ def pipx_runner(module, command, **kwargs):
|
||||
include_injected=fmt.as_bool("--include-injected"),
|
||||
index_url=fmt.as_opt_val('--index-url'),
|
||||
python=fmt.as_opt_val('--python'),
|
||||
system_site_packages=fmt.as_bool("--system-site-packages"),
|
||||
_list=fmt.as_fixed(['list', '--include-injected', '--json']),
|
||||
editable=fmt.as_bool("--editable"),
|
||||
pip_args=fmt.as_opt_val('--pip-args'),
|
||||
|
||||
@@ -18,6 +18,7 @@ import traceback
|
||||
PROXMOXER_IMP_ERR = None
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
from proxmoxer import __version__ as proxmoxer_version
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
@@ -79,6 +80,7 @@ class ProxmoxAnsible(object):
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
self.module = module
|
||||
self.proxmoxer_version = proxmoxer_version
|
||||
self.proxmox_api = self._connect()
|
||||
# Test token validity
|
||||
try:
|
||||
@@ -98,6 +100,8 @@ class ProxmoxAnsible(object):
|
||||
if api_password:
|
||||
auth_args['password'] = api_password
|
||||
else:
|
||||
if self.proxmoxer_version < LooseVersion('1.1.0'):
|
||||
self.module.fail_json('Using "token_name" and "token_value" require proxmoxer>=1.1.0')
|
||||
auth_args['token_name'] = api_token_id
|
||||
auth_args['token_value'] = api_token_secret
|
||||
|
||||
|
||||
@@ -63,11 +63,7 @@ def puppet_runner(module):
|
||||
return cmd
|
||||
|
||||
def noop_func(v):
|
||||
_noop = cmd_runner_fmt.as_map({
|
||||
True: "--noop",
|
||||
False: "--no-noop",
|
||||
})
|
||||
return _noop(module.check_mode or v)
|
||||
return ["--noop"] if module.check_mode or v else ["--no-noop"]
|
||||
|
||||
_logdest_map = {
|
||||
"syslog": ["--logdest", "syslog"],
|
||||
@@ -96,6 +92,7 @@ def puppet_runner(module):
|
||||
confdir=cmd_runner_fmt.as_opt_val("--confdir"),
|
||||
environment=cmd_runner_fmt.as_opt_val("--environment"),
|
||||
tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]),
|
||||
skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
|
||||
certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
|
||||
noop=cmd_runner_fmt.as_func(noop_func),
|
||||
use_srv_records=cmd_runner_fmt.as_map({
|
||||
|
||||
@@ -652,7 +652,8 @@ class RedfishUtils(object):
|
||||
properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
|
||||
'Location', 'Manufacturer', 'Model', 'Name', 'Id',
|
||||
'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
|
||||
key = "StorageControllers"
|
||||
key = "Controllers"
|
||||
deprecated_key = "StorageControllers"
|
||||
|
||||
# Find Storage service
|
||||
response = self.get_request(self.root_uri + systems_uri)
|
||||
@@ -680,7 +681,30 @@ class RedfishUtils(object):
|
||||
data = response['data']
|
||||
|
||||
if key in data:
|
||||
controller_list = data[key]
|
||||
controllers_uri = data[key][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controllers_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if data[u'Members']:
|
||||
for controller_member in data[u'Members']:
|
||||
controller_member_uri = controller_member[u'@odata.id']
|
||||
response = self.get_request(self.root_uri + controller_member_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
controller_result = {}
|
||||
for property in properties:
|
||||
if property in data:
|
||||
controller_result[property] = data[property]
|
||||
controller_results.append(controller_result)
|
||||
elif deprecated_key in data:
|
||||
controller_list = data[deprecated_key]
|
||||
for controller in controller_list:
|
||||
controller_result = {}
|
||||
for property in properties:
|
||||
@@ -735,7 +759,25 @@ class RedfishUtils(object):
|
||||
return response
|
||||
data = response['data']
|
||||
controller_name = 'Controller 1'
|
||||
if 'StorageControllers' in data:
|
||||
if 'Controllers' in data:
|
||||
controllers_uri = data['Controllers'][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controllers_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
cdata = response['data']
|
||||
|
||||
if cdata[u'Members']:
|
||||
controller_member_uri = cdata[u'Members'][0][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controller_member_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
cdata = response['data']
|
||||
controller_name = cdata['Name']
|
||||
elif 'StorageControllers' in data:
|
||||
sc = data['StorageControllers']
|
||||
if sc:
|
||||
if 'Name' in sc[0]:
|
||||
@@ -832,14 +874,32 @@ class RedfishUtils(object):
|
||||
if data.get('Members'):
|
||||
for controller in data[u'Members']:
|
||||
controller_list.append(controller[u'@odata.id'])
|
||||
for c in controller_list:
|
||||
for idx, c in enumerate(controller_list):
|
||||
uri = self.root_uri + c
|
||||
response = self.get_request(uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
controller_name = 'Controller 1'
|
||||
if 'StorageControllers' in data:
|
||||
controller_name = 'Controller %s' % str(idx)
|
||||
if 'Controllers' in data:
|
||||
response = self.get_request(self.root_uri + data['Controllers'][u'@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
c_data = response['data']
|
||||
|
||||
if c_data.get('Members') and c_data['Members']:
|
||||
response = self.get_request(self.root_uri + c_data['Members'][0][u'@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
member_data = response['data']
|
||||
|
||||
if member_data:
|
||||
if 'Name' in member_data:
|
||||
controller_name = member_data['Name']
|
||||
else:
|
||||
controller_id = member_data.get('Id', '1')
|
||||
controller_name = 'Controller %s' % controller_id
|
||||
elif 'StorageControllers' in data:
|
||||
sc = data['StorageControllers']
|
||||
if sc:
|
||||
if 'Name' in sc[0]:
|
||||
@@ -848,6 +908,7 @@ class RedfishUtils(object):
|
||||
sc_id = sc[0].get('Id', '1')
|
||||
controller_name = 'Controller %s' % sc_id
|
||||
volume_results = []
|
||||
volume_list = []
|
||||
if 'Volumes' in data:
|
||||
# Get a list of all volumes and build respective URIs
|
||||
volumes_uri = data[u'Volumes'][u'@odata.id']
|
||||
@@ -1079,6 +1140,12 @@ class RedfishUtils(object):
|
||||
if property in data:
|
||||
user[property] = data[property]
|
||||
|
||||
# Filter out empty account slots
|
||||
# An empty account slot can be detected if the username is an empty
|
||||
# string and if the account is disabled
|
||||
if user.get('UserName', '') == '' and not user.get('Enabled', False):
|
||||
continue
|
||||
|
||||
users_results.append(user)
|
||||
result["entries"] = users_results
|
||||
return result
|
||||
|
||||
@@ -72,7 +72,9 @@ def api_request(module, endpoint, data=None, method="GET"):
|
||||
if info["status"] == 403:
|
||||
module.fail_json(msg="Token authorization failed",
|
||||
execution_info=json.loads(info["body"]))
|
||||
if info["status"] == 409:
|
||||
elif info["status"] == 404:
|
||||
return None, info
|
||||
elif info["status"] == 409:
|
||||
module.fail_json(msg="Job executions limit reached",
|
||||
execution_info=json.loads(info["body"]))
|
||||
elif info["status"] >= 500:
|
||||
@@ -81,12 +83,18 @@ def api_request(module, endpoint, data=None, method="GET"):
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
|
||||
if not content:
|
||||
return None, info
|
||||
else:
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
module.fail_json(
|
||||
msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info
|
||||
)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
|
||||
@@ -154,7 +154,7 @@ def _get_ctl_binary(module):
|
||||
if ctl_binary is not None:
|
||||
return ctl_binary
|
||||
|
||||
module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
|
||||
module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.")
|
||||
|
||||
|
||||
def _module_is_enabled(module):
|
||||
|
||||
@@ -608,7 +608,13 @@ class TarArchive(Archive):
|
||||
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
|
||||
# to their original names so only file checksum is returned
|
||||
f = self._open_compressed_file(_to_native_ascii(path), 'r')
|
||||
checksums = set([(b'', crc32(f.read()))])
|
||||
checksum = 0
|
||||
while True:
|
||||
chunk = f.read(16 * 1024 * 1024)
|
||||
if not chunk:
|
||||
break
|
||||
checksum = crc32(chunk, checksum)
|
||||
checksums = set([(b'', checksum)])
|
||||
f.close()
|
||||
except Exception:
|
||||
checksums = set()
|
||||
|
||||
@@ -15,7 +15,7 @@ module: awall
|
||||
short_description: Manage awall policies
|
||||
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
|
||||
description:
|
||||
- This modules allows for enable/disable/activate of I(awall) policies.
|
||||
- This modules allows for enable/disable/activate of C(awall) policies.
|
||||
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
|
||||
and activates the configuration on the system.
|
||||
extends_documentation_fragment:
|
||||
@@ -41,8 +41,11 @@ options:
|
||||
description:
|
||||
- Activate the new firewall rules.
|
||||
- Can be run with other steps or on its own.
|
||||
- Idempotency is affected if I(activate=true), as the module will always report a changed state.
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- At least one of I(name) and I(activate) is required.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
109
plugins/modules/btrfs_info.py
Normal file
109
plugins/modules/btrfs_info.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_info
|
||||
short_description: Query btrfs filesystem info
|
||||
version_added: "6.6.0"
|
||||
description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Query information about mounted btrfs filesystems
|
||||
community.general.btrfs_info:
|
||||
register: my_btrfs_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystems:
|
||||
description: Summaries of the current state for all btrfs filesystems found on the target host.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The id of the filesystem's default subvolume.
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
'''
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
provider = BtrfsFilesystemsProvider(module)
|
||||
filesystems = [x.get_summary() for x in provider.get_filesystems()]
|
||||
result = {
|
||||
"filesystems": filesystems,
|
||||
}
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
682
plugins/modules/btrfs_subvolume.py
Normal file
682
plugins/modules/btrfs_subvolume.py
Normal file
@@ -0,0 +1,682 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: btrfs_subvolume
|
||||
short_description: Manage btrfs subvolumes
|
||||
version_added: "6.6.0"
|
||||
|
||||
description: Creates, updates and deletes btrfs subvolumes and snapshots.
|
||||
|
||||
options:
|
||||
automount:
|
||||
description:
|
||||
- Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
|
||||
type: bool
|
||||
default: false
|
||||
default:
|
||||
description:
|
||||
- Make the subvolume specified by I(name) the filesystem's default subvolume.
|
||||
type: bool
|
||||
default: false
|
||||
filesystem_device:
|
||||
description:
|
||||
- A block device contained within the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: path
|
||||
filesystem_label:
|
||||
description:
|
||||
- A descriptive label assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
filesystem_uuid:
|
||||
description:
|
||||
- A unique identifier assigned to the btrfs filesystem to be targeted.
|
||||
- Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the subvolume/snapshot to be targeted.
|
||||
required: true
|
||||
type: str
|
||||
recursive:
|
||||
description:
|
||||
- When true, indicates that parent/child subvolumes should be created/removedas necessary
|
||||
to complete the operation (for I(state=present) and I(state=absent) respectively).
|
||||
type: bool
|
||||
default: false
|
||||
snapshot_source:
|
||||
description:
|
||||
- Identifies the source subvolume for the created snapshot.
|
||||
- Infers that the created subvolume is a snapshot.
|
||||
type: str
|
||||
snapshot_conflict:
|
||||
description:
|
||||
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
|
||||
- C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
|
||||
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
|
||||
- C(clobber) - If a subvolume already exists at the requested location, delete it first.
|
||||
This option is not idempotent and will result in a new snapshot being generated on every execution.
|
||||
- C(error) - If a subvolume already exists at the requested location, return an error.
|
||||
This option is not idempotent and will result in an error on replay of the module.
|
||||
type: str
|
||||
choices: [ skip, clobber, error ]
|
||||
default: skip
|
||||
state:
|
||||
description:
|
||||
- Indicates the current state of the targeted subvolume.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
|
||||
notes:
|
||||
- If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
|
||||
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
|
||||
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: partial
|
||||
details:
|
||||
- In some scenarios it may erroneously report intermediate subvolumes being created.
|
||||
After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
|
||||
diff_mode:
|
||||
support: none
|
||||
|
||||
author:
|
||||
- Gregory Furlong (@gnfzdz)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Create a @home subvolume under the root subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the @home subvolume if it exists
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@home
|
||||
state: absent
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume named @
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the root subvolume and make it the new default subvolume
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@
|
||||
snapshot_source: /
|
||||
default: Yes
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
- name: Remove the /@ subvolume and recursively delete child subvolumes as required
|
||||
community.general.btrfs_subvolume:
|
||||
name: /@snapshots/@2022_06_09
|
||||
snapshot_source: /@
|
||||
recursive: True
|
||||
device: /dev/vda2
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
filesystem:
|
||||
description:
|
||||
- A summary of the final state of the targeted btrfs filesystem.
|
||||
type: dict
|
||||
returned: success
|
||||
contains:
|
||||
uuid:
|
||||
description: A unique identifier assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: 96c9c605-1454-49b8-a63a-15e2584c208e
|
||||
label:
|
||||
description: An optional label assigned to the filesystem.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Tank
|
||||
devices:
|
||||
description: A list of devices assigned to the filesystem.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
- /dev/sda1
|
||||
- /dev/sdb1
|
||||
default_subvolume:
|
||||
description: The ID of the filesystem's default subvolume.
|
||||
returned: success and if filesystem is mounted
|
||||
type: int
|
||||
sample: 5
|
||||
subvolumes:
|
||||
description: A list of dicts containing metadata for all of the filesystem's subvolumes.
|
||||
returned: success and if filesystem is mounted
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: An identifier assigned to the subvolume, unique within the containing filesystem.
|
||||
type: int
|
||||
sample: 256
|
||||
mountpoints:
|
||||
description: Paths where the subvolume is mounted on the targeted host.
|
||||
type: list
|
||||
sample: ['/home']
|
||||
parent:
|
||||
description: The identifier of this subvolume's parent.
|
||||
type: int
|
||||
sample: 5
|
||||
path:
|
||||
description: The full path of the subvolume relative to the btrfs fileystem's root.
|
||||
type: str
|
||||
sample: /@home
|
||||
|
||||
modifications:
|
||||
description:
|
||||
- A list where each element describes a change made to the target btrfs filesystem.
|
||||
type: list
|
||||
returned: Success
|
||||
elements: str
|
||||
|
||||
target_subvolume_id:
|
||||
description:
|
||||
- The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
|
||||
type: int
|
||||
sample: 257
|
||||
returned: Success and subvolume exists after module execution
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
|
||||
from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
|
||||
class BtrfsSubvolumeModule(object):
|
||||
|
||||
__BTRFS_ROOT_SUBVOLUME = '/'
|
||||
__BTRFS_ROOT_SUBVOLUME_ID = 5
|
||||
__BTRFS_SUBVOLUME_INODE_NUMBER = 256
|
||||
|
||||
__CREATE_SUBVOLUME_OPERATION = 'create'
|
||||
__CREATE_SNAPSHOT_OPERATION = 'snapshot'
|
||||
__DELETE_SUBVOLUME_OPERATION = 'delete'
|
||||
__SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
|
||||
|
||||
__UNKNOWN_SUBVOLUME_ID = '?'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.__btrfs_api = BtrfsCommands(module)
|
||||
self.__provider = BtrfsFilesystemsProvider(module)
|
||||
|
||||
# module parameters
|
||||
name = self.module.params['name']
|
||||
self.__name = normalize_subvolume_path(name) if name is not None else None
|
||||
self.__state = self.module.params['state']
|
||||
|
||||
self.__automount = self.module.params['automount']
|
||||
self.__default = self.module.params['default']
|
||||
self.__filesystem_device = self.module.params['filesystem_device']
|
||||
self.__filesystem_label = self.module.params['filesystem_label']
|
||||
self.__filesystem_uuid = self.module.params['filesystem_uuid']
|
||||
self.__recursive = self.module.params['recursive']
|
||||
self.__snapshot_conflict = self.module.params['snapshot_conflict']
|
||||
snapshot_source = self.module.params['snapshot_source']
|
||||
self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
|
||||
|
||||
# execution state
|
||||
self.__filesystem = None
|
||||
self.__required_mounts = []
|
||||
self.__unit_of_work = []
|
||||
self.__completed_work = []
|
||||
self.__temporary_mounts = dict()
|
||||
|
||||
def run(self):
|
||||
error = None
|
||||
try:
|
||||
self.__load_filesystem()
|
||||
self.__prepare_unit_of_work()
|
||||
|
||||
if not self.module.check_mode:
|
||||
# check required mounts & mount
|
||||
if len(self.__unit_of_work) > 0:
|
||||
self.__execute_unit_of_work()
|
||||
self.__filesystem.refresh()
|
||||
else:
|
||||
# check required mounts
|
||||
self.__completed_work.extend(self.__unit_of_work)
|
||||
except Exception as e:
|
||||
error = e
|
||||
finally:
|
||||
self.__cleanup_mounts()
|
||||
if self.__filesystem is not None:
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
return (error, self.get_results())
|
||||
|
||||
# Identify the targeted filesystem and obtain the current state
|
||||
def __load_filesystem(self):
|
||||
if self.__has_filesystem_criteria():
|
||||
filesystem = self.__find_matching_filesytem()
|
||||
else:
|
||||
filesystem = self.__find_default_filesystem()
|
||||
|
||||
# The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
|
||||
if not filesystem.is_mounted():
|
||||
if not self.__automount:
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted and automount=False."
|
||||
"Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
|
||||
elif self.module.check_mode:
|
||||
# TODO is failing the module an appropriate outcome in this scenario?
|
||||
raise BtrfsModuleException(
|
||||
"Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
|
||||
"state while running with check_mode=True" % filesystem.uuid)
|
||||
else:
|
||||
self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
filesystem.refresh()
|
||||
self.__filesystem = filesystem
|
||||
|
||||
def __has_filesystem_criteria(self):
|
||||
return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
|
||||
|
||||
def __find_matching_filesytem(self):
|
||||
criteria = {
|
||||
'uuid': self.__filesystem_uuid,
|
||||
'label': self.__filesystem_label,
|
||||
'device': self.__filesystem_device,
|
||||
}
|
||||
return self.__provider.get_matching_filesystem(criteria)
|
||||
|
||||
def __find_default_filesystem(self):
|
||||
filesystems = self.__provider.get_filesystems()
|
||||
filesystem = None
|
||||
|
||||
if len(filesystems) == 1:
|
||||
filesystem = filesystems[0]
|
||||
else:
|
||||
mounted_filesystems = [x for x in filesystems if x.is_mounted()]
|
||||
if len(mounted_filesystems) == 1:
|
||||
filesystem = mounted_filesystems[0]
|
||||
|
||||
if filesystem is not None:
|
||||
return filesystem
|
||||
else:
|
||||
raise BtrfsModuleException(
|
||||
"Failed to automatically identify targeted filesystem. "
|
||||
"No explicit device indicated and found %d available filesystems." % len(filesystems)
|
||||
)
|
||||
|
||||
# Prepare unit of work
|
||||
def __prepare_unit_of_work(self):
|
||||
if self.__state == "present":
|
||||
if self.__snapshot_source is None:
|
||||
self.__prepare_subvolume_present()
|
||||
else:
|
||||
self.__prepare_snapshot_present()
|
||||
|
||||
if self.__default:
|
||||
self.__prepare_set_default()
|
||||
elif self.__state == "absent":
|
||||
self.__prepare_subvolume_absent()
|
||||
|
||||
def __prepare_subvolume_present(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is None:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
self.__stage_create_subvolume(self.__name)
|
||||
|
||||
def __prepare_before_create_subvolume(self, subvolume_name):
|
||||
closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
|
||||
self.__stage_required_mount(closest_parent)
|
||||
if self.__recursive:
|
||||
self.__prepare_create_intermediates(closest_parent, subvolume_name)
|
||||
|
||||
def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
|
||||
relative_path = closest_subvolume.get_child_relative_path(self.__name)
|
||||
missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
|
||||
if len(missing_subvolumes) > 1:
|
||||
current = closest_subvolume.path
|
||||
for s in missing_subvolumes[:-1]:
|
||||
separator = os.path.sep if current[-1] != os.path.sep else ""
|
||||
current = current + separator + s
|
||||
self.__stage_create_subvolume(current, True)
|
||||
|
||||
def __prepare_snapshot_present(self):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_exists = subvolume is not None
|
||||
|
||||
if subvolume_exists:
|
||||
if self.__snapshot_conflict == "skip":
|
||||
# No change required
|
||||
return
|
||||
elif self.__snapshot_conflict == "error":
|
||||
raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
|
||||
|
||||
if source_subvolume is None:
|
||||
raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
|
||||
elif subvolume is not None and source_subvolume.id == subvolume.id:
|
||||
raise BtrfsModuleException("Snapshot source and target are the same.")
|
||||
else:
|
||||
self.__stage_required_mount(source_subvolume)
|
||||
|
||||
if subvolume_exists and self.__snapshot_conflict == "clobber":
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
elif not subvolume_exists:
|
||||
self.__prepare_before_create_subvolume(self.__name)
|
||||
|
||||
self.__stage_create_snapshot(source_subvolume, self.__name)
|
||||
|
||||
def __prepare_subvolume_absent(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
if subvolume is not None:
|
||||
self.__prepare_delete_subvolume_tree(subvolume)
|
||||
|
||||
def __prepare_delete_subvolume_tree(self, subvolume):
|
||||
if subvolume.is_filesystem_root():
|
||||
raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
|
||||
if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
|
||||
raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
|
||||
"Either explicitly delete the child subvolumes first or pass "
|
||||
"parameter recursive=True." % subvolume.path)
|
||||
|
||||
self.__stage_required_mount(subvolume.get_parent_subvolume())
|
||||
queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
|
||||
# prepare unit of work
|
||||
for s in queue:
|
||||
if s.is_mounted():
|
||||
# TODO potentially unmount the subvolume if automount=True ?
|
||||
raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
|
||||
if s.is_filesystem_default():
|
||||
self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
|
||||
self.__stage_delete_subvolume(s)
|
||||
|
||||
def __prepare_recursive_delete_order(self, subvolume):
|
||||
"""Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
|
||||
pending = [subvolume]
|
||||
ordered = []
|
||||
while len(pending) > 0:
|
||||
next = pending.pop()
|
||||
ordered.append(next)
|
||||
pending.extend(next.get_child_subvolumes())
|
||||
ordered.reverse() # reverse to ensure children are deleted before their parent
|
||||
return ordered
|
||||
|
||||
def __prepare_set_default(self):
|
||||
subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
subvolume_id = subvolume.id if subvolume is not None else None
|
||||
|
||||
if self.__filesystem.default_subvolid != subvolume_id:
|
||||
self.__stage_set_default_subvolume(self.__name, subvolume_id)
|
||||
|
||||
# Stage operations to the unit of work
|
||||
def __stage_required_mount(self, subvolume):
|
||||
if subvolume.get_mounted_path() is None:
|
||||
if self.__automount:
|
||||
self.__required_mounts.append(subvolume)
|
||||
else:
|
||||
raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
|
||||
|
||||
def __stage_create_subvolume(self, subvolume_path, intermediate=False):
|
||||
"""
|
||||
Add required creation of an intermediate subvolume to the unit of work
|
||||
If intermediate is true, the action will be skipped if a directory like file is found at target
|
||||
after mounting a parent subvolume
|
||||
"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'intermediate': intermediate,
|
||||
})
|
||||
|
||||
def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
|
||||
"""Add creation of a snapshot from source to target to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__CREATE_SNAPSHOT_OPERATION,
|
||||
'source': source_subvolume.path,
|
||||
'source_id': source_subvolume.id,
|
||||
'target': target_subvolume_path,
|
||||
})
|
||||
|
||||
def __stage_delete_subvolume(self, subvolume):
|
||||
"""Add deletion of the target subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__DELETE_SUBVOLUME_OPERATION,
|
||||
'target': subvolume.path,
|
||||
'target_id': subvolume.id,
|
||||
})
|
||||
|
||||
def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
|
||||
"""Add update of the filesystem's default subvolume to the unit of work"""
|
||||
self.__unit_of_work.append({
|
||||
'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
|
||||
'target': subvolume_path,
|
||||
'target_id': subvolume_id,
|
||||
})
|
||||
|
||||
# Execute the unit of work
|
||||
def __execute_unit_of_work(self):
|
||||
self.__check_required_mounts()
|
||||
for op in self.__unit_of_work:
|
||||
if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
self.__execute_create_subvolume(op)
|
||||
elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
self.__execute_create_snapshot(op)
|
||||
elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
self.__execute_delete_subvolume(op)
|
||||
elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
self.__execute_set_default_subvolume(op)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % op['action'])
|
||||
|
||||
def __execute_create_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
if not self.__is_existing_directory_like(target_mounted_path):
|
||||
self.__btrfs_api.subvolume_create(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_create_snapshot(self, operation):
|
||||
source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
|
||||
source_mounted_path = source_subvolume.get_mounted_path()
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
|
||||
self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_delete_subvolume(self, operation):
|
||||
target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
|
||||
self.__btrfs_api.subvolume_delete(target_mounted_path)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __execute_set_default_subvolume(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
|
||||
if target_id is None:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
self.__filesystem.refresh() # the target may have been created earlier in module execution
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
|
||||
if target_subvolume is None:
|
||||
raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
|
||||
else:
|
||||
target_id = target_subvolume.id
|
||||
|
||||
self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
|
||||
self.__completed_work.append(operation)
|
||||
|
||||
def __is_existing_directory_like(self, path):
|
||||
return os.path.exists(path) and (
|
||||
os.path.isdir(path) or
|
||||
os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
|
||||
)
|
||||
|
||||
def __check_required_mounts(self):
|
||||
filtered = self.__filter_child_subvolumes(self.__required_mounts)
|
||||
if len(filtered) > 0:
|
||||
for subvolume in filtered:
|
||||
self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
|
||||
self.__filesystem.refresh_mountpoints()
|
||||
|
||||
def __filter_child_subvolumes(self, subvolumes):
|
||||
"""Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
|
||||
filtered = []
|
||||
last = None
|
||||
ordered = sorted(subvolumes, key=lambda x: x.path)
|
||||
for next in ordered:
|
||||
if last is None or not next.path[0:len(last)] == last:
|
||||
filtered.append(next)
|
||||
last = next.path
|
||||
return filtered
|
||||
|
||||
# Create/cleanup temporary mountpoints
|
||||
def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
|
||||
# this check should be redundant
|
||||
if self.module.check_mode or not self.__automount:
|
||||
raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
|
||||
"with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
|
||||
|
||||
cache_key = "%s:%d" % (filesystem.uuid, subvolid)
|
||||
# The subvolume was already mounted, so return the current path
|
||||
if cache_key in self.__temporary_mounts:
|
||||
return self.__temporary_mounts[cache_key]
|
||||
|
||||
device = filesystem.devices[0]
|
||||
mountpoint = tempfile.mkdtemp(dir="/tmp")
|
||||
self.__temporary_mounts[cache_key] = mountpoint
|
||||
|
||||
mount = self.module.get_bin_path("mount", required=True)
|
||||
command = "%s -o noatime,subvolid=%d %s %s " % (mount,
|
||||
subvolid,
|
||||
device,
|
||||
mountpoint)
|
||||
result = self.module.run_command(command, check_rc=True)
|
||||
|
||||
return mountpoint
|
||||
|
||||
def __cleanup_mounts(self):
|
||||
for key in self.__temporary_mounts.keys():
|
||||
self.__cleanup_mount(self.__temporary_mounts[key])
|
||||
|
||||
def __cleanup_mount(self, mountpoint):
|
||||
umount = self.module.get_bin_path("umount", required=True)
|
||||
result = self.module.run_command("%s %s" % (umount, mountpoint))
|
||||
if result[0] == 0:
|
||||
rmdir = self.module.get_bin_path("rmdir", required=True)
|
||||
self.module.run_command("%s %s" % (rmdir, mountpoint))
|
||||
|
||||
# Format and return results
|
||||
def get_results(self):
|
||||
target = self.__filesystem.get_subvolume_by_name(self.__name)
|
||||
return dict(
|
||||
changed=len(self.__completed_work) > 0,
|
||||
filesystem=self.__filesystem.get_summary(),
|
||||
modifications=self.__get_formatted_modifications(),
|
||||
target_subvolume_id=(target.id if target is not None else None)
|
||||
)
|
||||
|
||||
def __get_formatted_modifications(self):
|
||||
return [self.__format_operation_result(op) for op in self.__completed_work]
|
||||
|
||||
def __format_operation_result(self, operation):
|
||||
action_type = operation['action']
|
||||
if action_type == self.__CREATE_SUBVOLUME_OPERATION:
|
||||
return self.__format_create_subvolume_result(operation)
|
||||
elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
|
||||
return self.__format_create_snapshot_result(operation)
|
||||
elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
|
||||
return self.__format_delete_subvolume_result(operation)
|
||||
elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
|
||||
return self.__format_set_default_subvolume_result(operation)
|
||||
else:
|
||||
raise ValueError("Unknown operation type '%s'" % operation['action'])
|
||||
|
||||
def __format_create_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_create_snapshot_result(self, operation):
|
||||
source = operation['source']
|
||||
source_id = operation['source_id']
|
||||
|
||||
target = operation['target']
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
|
||||
|
||||
def __format_delete_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
target_id = operation['target_id']
|
||||
return "Deleted subvolume '%s' (%s)" % (target, target_id)
|
||||
|
||||
def __format_set_default_subvolume_result(self, operation):
|
||||
target = operation['target']
|
||||
if 'target_id' in operation:
|
||||
target_id = operation['target_id']
|
||||
else:
|
||||
target_subvolume = self.__filesystem.get_subvolume_by_name(target)
|
||||
target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
|
||||
return "Updated default subvolume to '%s' (%s)" % (target, target_id)
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
automount=dict(type='bool', required=False, default=False),
|
||||
default=dict(type='bool', required=False, default=False),
|
||||
filesystem_device=dict(type='path', required=False),
|
||||
filesystem_label=dict(type='str', required=False),
|
||||
filesystem_uuid=dict(type='str', required=False),
|
||||
name=dict(type='str', required=True),
|
||||
recursive=dict(type='bool', default=False),
|
||||
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
|
||||
snapshot_source=dict(type='str', required=False),
|
||||
snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
subvolume = BtrfsSubvolumeModule(module)
|
||||
error, result = subvolume.run()
|
||||
if error is not None:
|
||||
module.fail_json(str(error), **result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -170,10 +170,15 @@ def get_available_options(module, command='install'):
|
||||
return command_help_json['definition']['options']
|
||||
|
||||
|
||||
def composer_command(module, command, arguments="", options=None, global_command=False):
|
||||
def composer_command(module, command, arguments="", options=None):
|
||||
if options is None:
|
||||
options = []
|
||||
|
||||
global_command = module.params['global_command']
|
||||
|
||||
if not global_command:
|
||||
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
|
||||
|
||||
if module.params['executable'] is None:
|
||||
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
|
||||
else:
|
||||
@@ -217,7 +222,6 @@ def main():
|
||||
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
|
||||
|
||||
arguments = module.params['arguments']
|
||||
global_command = module.params['global_command']
|
||||
available_options = get_available_options(module=module, command=command)
|
||||
|
||||
options = []
|
||||
@@ -234,9 +238,6 @@ def main():
|
||||
option = "--%s" % option
|
||||
options.append(option)
|
||||
|
||||
if not global_command:
|
||||
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
|
||||
|
||||
option_params = {
|
||||
'prefer_source': 'prefer-source',
|
||||
'prefer_dist': 'prefer-dist',
|
||||
@@ -260,7 +261,7 @@ def main():
|
||||
else:
|
||||
module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
|
||||
|
||||
rc, out, err = composer_command(module, command, arguments, options, global_command)
|
||||
rc, out, err = composer_command(module, command, arguments, options)
|
||||
|
||||
if rc != 0:
|
||||
output = parse_out(err)
|
||||
|
||||
@@ -183,7 +183,7 @@ class CPANMinus(ModuleHelper):
|
||||
if v.name and v.from_path:
|
||||
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
|
||||
|
||||
self.command = self.module.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.command = self.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.vars.set("binary", self.command)
|
||||
|
||||
def _is_package_installed(self, name, locallib, version):
|
||||
|
||||
@@ -16,7 +16,7 @@ short_description: Manages Datadog downtimes
|
||||
version_added: 2.0.0
|
||||
description:
|
||||
- Manages downtimes within Datadog.
|
||||
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s).
|
||||
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/).
|
||||
author:
|
||||
- Datadog (@Datadog)
|
||||
requirements:
|
||||
@@ -248,7 +248,8 @@ def build_downtime(module):
|
||||
downtime.timezone = module.params["timezone"]
|
||||
if module.params["rrule"]:
|
||||
downtime.recurrence = DowntimeRecurrence(
|
||||
rrule=module.params["rrule"]
|
||||
rrule=module.params["rrule"],
|
||||
type="rrule",
|
||||
)
|
||||
return downtime
|
||||
|
||||
|
||||
@@ -49,8 +49,8 @@ notes:
|
||||
I(value="'myvalue'") - with single quotes as part of the Ansible parameter
|
||||
value.
|
||||
- When using loops in combination with a value like
|
||||
:code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
|
||||
type conversions. Applying a filter :code:`"{{ item.value | string }}"`
|
||||
"[('xkb', 'us'), ('xkb', 'se')]", you need to be aware of possible
|
||||
type conversions. Applying a filter C({{ item.value | string }})
|
||||
to the parameter variable can avoid potential conversion problems.
|
||||
- The easiest way to figure out exact syntax/value you need to provide for a
|
||||
key is by making the configuration change in application affected by the
|
||||
@@ -70,13 +70,18 @@ options:
|
||||
description:
|
||||
- A dconf key to modify or read from the dconf database.
|
||||
value:
|
||||
type: str
|
||||
type: raw
|
||||
required: false
|
||||
description:
|
||||
- Value to set for the specified dconf key. Value should be specified in
|
||||
GVariant format. Due to complexity of this format, it is best to have a
|
||||
look at existing values in the dconf database.
|
||||
- Required for I(state=present).
|
||||
- Although the type is specified as "raw", it should typically be
|
||||
specified as a string. However, boolean values in particular are
|
||||
handled properly even when specified as booleans rather than strings
|
||||
(in fact, handling booleans properly is why the type of this parameter
|
||||
is "raw").
|
||||
state:
|
||||
type: str
|
||||
required: false
|
||||
@@ -138,7 +143,18 @@ EXAMPLES = r"""
|
||||
|
||||
|
||||
import os
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.respawn import (
|
||||
has_respawned,
|
||||
probe_interpreters_for_module,
|
||||
respawn_module,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils import deps
|
||||
|
||||
glib_module_name = 'gi.repository.GLib'
|
||||
|
||||
try:
|
||||
from gi.repository.GLib import Variant, GError
|
||||
@@ -146,15 +162,8 @@ except ImportError:
|
||||
Variant = None
|
||||
GError = AttributeError
|
||||
|
||||
PSUTIL_IMP_ERR = None
|
||||
try:
|
||||
with deps.declare("psutil"):
|
||||
import psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
PSUTIL_IMP_ERR = traceback.format_exc()
|
||||
HAS_PSUTIL = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class DBusWrapper(object):
|
||||
@@ -288,6 +297,10 @@ class DconfPreference(object):
|
||||
|
||||
Returns True if the two values are equal.
|
||||
"""
|
||||
if canonical_value is None:
|
||||
# It's unset in dconf database, so anything the user is trying to
|
||||
# set is a change.
|
||||
return False
|
||||
try:
|
||||
variant1 = Variant.parse(None, canonical_value)
|
||||
variant2 = Variant.parse(variant1.get_type(), user_value)
|
||||
@@ -349,7 +362,7 @@ class DconfPreference(object):
|
||||
rc, out, err = dbus_wrapper.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='dconf failed while write the value with error: %s' % err,
|
||||
self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err),
|
||||
out=out,
|
||||
err=err)
|
||||
|
||||
@@ -401,23 +414,62 @@ def main():
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent', 'read']),
|
||||
key=dict(required=True, type='str', no_log=False),
|
||||
value=dict(required=False, default=None, type='str'),
|
||||
# Converted to str below after special handling of bool.
|
||||
value=dict(required=False, default=None, type='raw'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['value']),
|
||||
],
|
||||
)
|
||||
|
||||
if Variant is None:
|
||||
# This interpreter can't see the GLib module. To try to fix that, we'll
|
||||
# look in common locations for system-owned interpreters that can see
|
||||
# it; if we find one, we'll respawn under it. Otherwise we'll proceed
|
||||
# with degraded performance, without the ability to parse GVariants.
|
||||
# Later (in a different PR) we'll actually deprecate this degraded
|
||||
# performance level and fail with an error if the library can't be
|
||||
# found.
|
||||
|
||||
if has_respawned():
|
||||
# This shouldn't be possible; short-circuit early if it happens.
|
||||
module.fail_json(
|
||||
msg="%s must be installed and visible from %s." %
|
||||
(glib_module_name, sys.executable))
|
||||
|
||||
interpreters = ['/usr/bin/python3', '/usr/bin/python2',
|
||||
'/usr/bin/python']
|
||||
|
||||
interpreter = probe_interpreters_for_module(
|
||||
interpreters, glib_module_name)
|
||||
|
||||
if interpreter:
|
||||
# Found the Python bindings; respawn this module under the
|
||||
# interpreter where we found them.
|
||||
respawn_module(interpreter)
|
||||
# This is the end of the line for this process, it will exit here
|
||||
# once the respawned module has completed.
|
||||
|
||||
# Try to be forgiving about the user specifying a boolean as the value, or
|
||||
# more accurately about the fact that YAML and Ansible are quite insistent
|
||||
# about converting strings that look like booleans into booleans. Convert
|
||||
# the boolean into a string of the type dconf will understand. Any type for
|
||||
# the value other than boolean is just converted into a string directly.
|
||||
if module.params['value'] is not None:
|
||||
if isinstance(module.params['value'], bool):
|
||||
module.params['value'] = 'true' if module.params['value'] else 'false'
|
||||
else:
|
||||
module.params['value'] = to_native(
|
||||
module.params['value'], errors='surrogate_or_strict')
|
||||
|
||||
if Variant is None:
|
||||
module.warn(
|
||||
'WARNING: The gi.repository Python library is not available; '
|
||||
'using string comparison to check value equality. This fallback '
|
||||
'will be deprecated in a future version of community.general.')
|
||||
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
|
||||
|
||||
# If present state was specified, value must be provided.
|
||||
if module.params['state'] == 'present' and module.params['value'] is None:
|
||||
module.fail_json(msg='State "present" requires "value" to be set.')
|
||||
deps.validate(module)
|
||||
|
||||
# Create wrapper instance.
|
||||
dconf = DconfPreference(module, module.check_mode)
|
||||
|
||||
@@ -102,7 +102,7 @@ class EjabberdUser(object):
|
||||
changed. It will return True if the user does not match the supplied
|
||||
credentials and False if it does not
|
||||
"""
|
||||
return self.run_command('check_password', [self.user, self.host, self.pwd])
|
||||
return bool(self.run_command('check_password', [self.user, self.host, self.pwd])[0])
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
@@ -110,7 +110,7 @@ class EjabberdUser(object):
|
||||
host specified. If the user exists True is returned, otherwise False
|
||||
is returned
|
||||
"""
|
||||
return self.run_command('check_account', [self.user, self.host])
|
||||
return not bool(self.run_command('check_account', [self.user, self.host])[0])
|
||||
|
||||
def log(self, entry):
|
||||
""" This method will log information to the local syslog facility """
|
||||
@@ -122,7 +122,7 @@ class EjabberdUser(object):
|
||||
""" This method will run the any command specified and return the
|
||||
returns using the Ansible common module
|
||||
"""
|
||||
cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options
|
||||
cmd = [self.module.get_bin_path('ejabberdctl', required=True), cmd] + options
|
||||
self.log('command: %s' % " ".join(cmd))
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ def uninstall_flat(module, binary, names, method):
|
||||
|
||||
def flatpak_exists(module, binary, names, method):
|
||||
"""Check if the flatpaks are installed."""
|
||||
command = [binary, "list", "--{0}".format(method), "--app"]
|
||||
command = [binary, "list", "--{0}".format(method)]
|
||||
output = _flatpak_command(module, False, command)
|
||||
installed = []
|
||||
not_installed = []
|
||||
|
||||
@@ -227,7 +227,7 @@ class GithubDeployKey(object):
|
||||
yield self.module.from_json(resp.read())
|
||||
|
||||
links = {}
|
||||
for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
|
||||
for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')):
|
||||
links[y] = x
|
||||
|
||||
url = links.get('next')
|
||||
|
||||
@@ -82,8 +82,14 @@ EXAMPLES = '''
|
||||
name: Access Key for Some Machine
|
||||
token: '{{ github_access_token }}'
|
||||
pubkey: '{{ ssh_pub_key.stdout }}'
|
||||
'''
|
||||
|
||||
# Alternatively, a single task can be used reading a key from a file on the controller
|
||||
- name: Authorize key with GitHub
|
||||
community.general.github_key:
|
||||
name: Access Key for Some Machine
|
||||
token: '{{ github_access_token }}'
|
||||
pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
|
||||
'''
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
@@ -255,7 +255,10 @@ class GitLabGroup(object):
|
||||
return True
|
||||
|
||||
try:
|
||||
group = self._gitlab.groups.create(arguments)
|
||||
# Filter out None values
|
||||
filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None)
|
||||
|
||||
group = self._gitlab.groups.create(filtered)
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
|
||||
|
||||
|
||||
@@ -276,11 +276,11 @@ def main():
|
||||
ensure_gitlab_package(module)
|
||||
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.OWNER_ACCESS,
|
||||
'guest': gitlab.const.GUEST_ACCESS,
|
||||
'reporter': gitlab.const.REPORTER_ACCESS,
|
||||
'developer': gitlab.const.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.const.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.const.OWNER_ACCESS,
|
||||
}
|
||||
|
||||
gitlab_group = module.params['gitlab_group']
|
||||
|
||||
@@ -251,6 +251,13 @@ options:
|
||||
type: str
|
||||
choices: ["private", "disabled", "enabled"]
|
||||
version_added: "6.4.0"
|
||||
topics:
|
||||
description:
|
||||
- A topic or list of topics to be assigned to a project.
|
||||
- It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)).
|
||||
type: list
|
||||
elements: str
|
||||
version_added: "6.6.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -334,6 +341,8 @@ from ansible_collections.community.general.plugins.module_utils.gitlab import (
|
||||
auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
|
||||
)
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class GitLabProject(object):
|
||||
def __init__(self, module, gitlab_instance):
|
||||
@@ -376,6 +385,14 @@ class GitLabProject(object):
|
||||
'monitor_access_level': options['monitor_access_level'],
|
||||
'security_and_compliance_access_level': options['security_and_compliance_access_level'],
|
||||
}
|
||||
|
||||
# topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version
|
||||
# and check if less than 14. If yes we use tag_list instead topics
|
||||
if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"):
|
||||
project_options['tag_list'] = options['topics']
|
||||
else:
|
||||
project_options['topics'] = options['topics']
|
||||
|
||||
# Because we have already call userExists in main()
|
||||
if self.project_object is None:
|
||||
project_options.update({
|
||||
@@ -514,6 +531,7 @@ def main():
|
||||
infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
topics=dict(type='list', elements='str'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -570,6 +588,7 @@ def main():
|
||||
infrastructure_access_level = module.params['infrastructure_access_level']
|
||||
monitor_access_level = module.params['monitor_access_level']
|
||||
security_and_compliance_access_level = module.params['security_and_compliance_access_level']
|
||||
topics = module.params['topics']
|
||||
|
||||
if default_branch and not initialize_with_readme:
|
||||
module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
|
||||
@@ -648,6 +667,7 @@ def main():
|
||||
"infrastructure_access_level": infrastructure_access_level,
|
||||
"monitor_access_level": monitor_access_level,
|
||||
"security_and_compliance_access_level": security_and_compliance_access_level,
|
||||
"topics": topics,
|
||||
}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs)
|
||||
|
||||
@@ -282,10 +282,10 @@ def main():
|
||||
ensure_gitlab_package(module)
|
||||
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
'guest': gitlab.const.GUEST_ACCESS,
|
||||
'reporter': gitlab.const.REPORTER_ACCESS,
|
||||
'developer': gitlab.const.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.const.MAINTAINER_ACCESS,
|
||||
}
|
||||
|
||||
gitlab_project = module.params['project']
|
||||
|
||||
@@ -94,9 +94,9 @@ class GitlabProtectedBranch(object):
|
||||
self._module = module
|
||||
self.project = self.get_project(project)
|
||||
self.ACCESS_LEVEL = {
|
||||
'nobody': gitlab.NO_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS
|
||||
'nobody': gitlab.const.NO_ACCESS,
|
||||
'developer': gitlab.const.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.const.MAINTAINER_ACCESS
|
||||
}
|
||||
|
||||
def get_project(self, project_name):
|
||||
|
||||
@@ -244,12 +244,12 @@ class GitLabUser(object):
|
||||
self._gitlab = gitlab_instance
|
||||
self.user_object = None
|
||||
self.ACCESS_LEVEL = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'master': gitlab.MAINTAINER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.OWNER_ACCESS,
|
||||
'guest': gitlab.const.GUEST_ACCESS,
|
||||
'reporter': gitlab.const.REPORTER_ACCESS,
|
||||
'developer': gitlab.const.DEVELOPER_ACCESS,
|
||||
'master': gitlab.const.MAINTAINER_ACCESS,
|
||||
'maintainer': gitlab.const.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.const.OWNER_ACCESS,
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
@@ -78,8 +78,9 @@ options:
|
||||
greedy:
|
||||
description:
|
||||
- Upgrade casks that auto update.
|
||||
- Passes --greedy to brew cask outdated when checking
|
||||
if an installed cask has a newer version available.
|
||||
- Passes C(--greedy) to C(brew outdated --cask) when checking
|
||||
if an installed cask has a newer version available,
|
||||
or to C(brew upgrade --cask) when upgrading all casks.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
@@ -128,6 +129,11 @@ EXAMPLES = '''
|
||||
community.general.homebrew_cask:
|
||||
upgrade_all: true
|
||||
|
||||
- name: Upgrade all casks with greedy option
|
||||
community.general.homebrew_cask:
|
||||
upgrade_all: true
|
||||
greedy: true
|
||||
|
||||
- name: Upgrade given cask with force option
|
||||
community.general.homebrew_cask:
|
||||
name: alfred
|
||||
@@ -581,6 +587,9 @@ class HomebrewCask(object):
|
||||
else:
|
||||
cmd = [self.brew_path, 'cask', 'upgrade']
|
||||
|
||||
if self.greedy:
|
||||
cmd = cmd + ['--greedy']
|
||||
|
||||
rc, out, err = '', '', ''
|
||||
|
||||
if self.sudo_password:
|
||||
|
||||
@@ -256,9 +256,9 @@ def main():
|
||||
state = module.params["state"]
|
||||
name = module.params["name"]
|
||||
zone = module.params["zone"]
|
||||
template = [name]
|
||||
template = []
|
||||
if module.params["template"]:
|
||||
template.append(module.params["template"])
|
||||
template = [module.params["template"]]
|
||||
check_command = module.params["check_command"]
|
||||
ip = module.params["ip"]
|
||||
display_name = module.params["display_name"]
|
||||
@@ -273,20 +273,18 @@ def main():
|
||||
module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
|
||||
|
||||
data = {
|
||||
'templates': template,
|
||||
'attrs': {
|
||||
'address': ip,
|
||||
'display_name': display_name,
|
||||
'check_command': check_command,
|
||||
'zone': zone,
|
||||
'vars': {
|
||||
'made_by': "ansible",
|
||||
},
|
||||
'templates': template,
|
||||
'vars.made_by': "ansible"
|
||||
}
|
||||
}
|
||||
|
||||
if variables:
|
||||
data['attrs']['vars'].update(variables)
|
||||
for key, value in variables.items():
|
||||
data['attrs']['vars.' + key] = value
|
||||
|
||||
changed = False
|
||||
if icinga.exists(name):
|
||||
@@ -308,7 +306,7 @@ def main():
|
||||
module.exit_json(changed=False, name=name, data=data)
|
||||
|
||||
# Template attribute is not allowed in modification
|
||||
del data['attrs']['templates']
|
||||
del data['templates']
|
||||
|
||||
ret = icinga.modify(name, data)
|
||||
|
||||
|
||||
@@ -85,6 +85,14 @@ msg:
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
return_values:
|
||||
description: Dictionary containing command-specific response data from the action.
|
||||
returned: on success
|
||||
type: dict
|
||||
version_added: 6.6.0
|
||||
sample: {
|
||||
"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"
|
||||
}
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -128,10 +136,9 @@ class IdracRedfishUtils(RedfishUtils):
|
||||
return response
|
||||
|
||||
response_output = response['resp'].__dict__
|
||||
job_id = response_output["headers"]["Location"]
|
||||
job_id = re.search("JID_.+", job_id).group()
|
||||
# Currently not passing job_id back to user but patch is coming
|
||||
return {'ret': True, 'msg': "Config job %s created" % job_id}
|
||||
job_id_full = response_output["headers"]["Location"]
|
||||
job_id = re.search("JID_.+", job_id_full).group()
|
||||
return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full}
|
||||
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
@@ -143,6 +150,7 @@ CATEGORY_COMMANDS_ALL = {
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
return_values = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
@@ -199,7 +207,20 @@ def main():
|
||||
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
# NOTE: Currently overriding the usage of 'data_modification' due to
|
||||
# how 'resource_id' is processed. In the case of CreateBiosConfigJob,
|
||||
# we interact with BOTH systems and managers, so you currently cannot
|
||||
# specify a single 'resource_id' to make both '_find_systems_resource'
|
||||
# and '_find_managers_resource' return success. Since
|
||||
# CreateBiosConfigJob doesn't use the matched 'resource_id' for a
|
||||
# system regardless of what's specified, disabling the 'resource_id'
|
||||
# inspection for the next call allows a specific manager to be
|
||||
# specified with 'resource_id'. If we ever need to expand the input
|
||||
# to inspect a specific system and manager in parallel, this will need
|
||||
# updates.
|
||||
rf_utils.data_modification = False
|
||||
result = rf_utils._find_systems_resource()
|
||||
rf_utils.data_modification = True
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
@@ -210,11 +231,13 @@ def main():
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
result = rf_utils.create_bios_config_job()
|
||||
if 'job_id' in result:
|
||||
return_values['job_id'] = result['job_id']
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
del result['ret']
|
||||
module.exit_json(changed=True, msg='Action was successful')
|
||||
module.exit_json(changed=True, msg='Action was successful', return_values=return_values)
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
175
plugins/modules/ilo_redfish_command.py
Normal file
175
plugins/modules/ilo_redfish_command.py
Normal file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ilo_redfish_command
|
||||
short_description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
version_added: 6.6.0
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
perform an action.
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller.
|
||||
type: str
|
||||
choices: ['Systems']
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on OOB controller.
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller.
|
||||
type: str
|
||||
username:
|
||||
required: false
|
||||
description:
|
||||
- Username for authenticating to iLO.
|
||||
type: str
|
||||
password:
|
||||
required: false
|
||||
description:
|
||||
- Password for authenticating to iLO.
|
||||
type: str
|
||||
auth_token:
|
||||
required: false
|
||||
description:
|
||||
- Security token for authenticating to iLO.
|
||||
type: str
|
||||
timeout:
|
||||
required: false
|
||||
description:
|
||||
- Timeout in seconds for HTTP requests to iLO.
|
||||
default: 60
|
||||
type: int
|
||||
author:
|
||||
- Varni H P (@varini-hp)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Wait for iLO Reboot Completion
|
||||
community.general.ilo_redfish_command:
|
||||
category: Systems
|
||||
command: WaitforiLORebootCompletion
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ilo_redfish_command:
|
||||
description: Returns the status of the operation performed on the iLO.
|
||||
type: dict
|
||||
contains:
|
||||
WaitforiLORebootCompletion:
|
||||
description: Returns the output msg and whether the function executed successfully.
|
||||
type: dict
|
||||
contains:
|
||||
ret:
|
||||
description: Return True/False based on whether the operation was performed succesfully.
|
||||
type: bool
|
||||
msg:
|
||||
description: Status of the operation performed on the iLO.
|
||||
type: str
|
||||
returned: always
|
||||
'''
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["WaitforiLORebootCompletion"]
|
||||
}
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
timeout=dict(type="int", default=60),
|
||||
username=dict(),
|
||||
password=dict(no_log=True),
|
||||
auth_token=dict(no_log=True)
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
],
|
||||
required_one_of=[
|
||||
('username', 'auth_token'),
|
||||
],
|
||||
mutually_exclusive=[
|
||||
('username', 'auth_token'),
|
||||
],
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password'],
|
||||
'token': module.params['auth_token']}
|
||||
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native(
|
||||
"Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(
|
||||
msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
|
||||
result = rf_utils._find_systems_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "WaitforiLORebootCompletion":
|
||||
result[command] = rf_utils.wait_for_ilo_reboot_completion()
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if not result[command]['ret']:
|
||||
module.fail_json(msg=result)
|
||||
|
||||
changed = result[command].get('changed', False)
|
||||
module.exit_json(ilo_redfish_command=result, changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -42,10 +42,9 @@ options:
|
||||
description:
|
||||
- Section name in INI file. This is added if I(state=present) automatically when
|
||||
a single value is being set.
|
||||
- If left empty or set to C(null), the I(option) will be placed before the first I(section).
|
||||
- If left empty, being omitted, or being set to C(null), the I(option) will be placed before the first I(section).
|
||||
- Using C(null) is also required if the config format does not support sections.
|
||||
type: str
|
||||
required: true
|
||||
option:
|
||||
description:
|
||||
- If set (required for changing a I(value)), this is the name of the option.
|
||||
@@ -317,14 +316,14 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
# override option with no value to option with value if not allow_no_value
|
||||
if len(values) > 0:
|
||||
for index, line in enumerate(section_lines):
|
||||
if not changed_lines[index] and match_active_opt(option, line):
|
||||
if not changed_lines[index] and match_opt(option, line):
|
||||
newline = assignment_format % (option, values.pop(0))
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
if len(values) == 0:
|
||||
break
|
||||
# remove all remaining option occurrences from the rest of the section
|
||||
for index in range(len(section_lines) - 1, 0, -1):
|
||||
if not changed_lines[index] and match_active_opt(option, section_lines[index]):
|
||||
if not changed_lines[index] and match_opt(option, section_lines[index]):
|
||||
del section_lines[index]
|
||||
del changed_lines[index]
|
||||
changed = True
|
||||
@@ -430,7 +429,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True, aliases=['dest']),
|
||||
section=dict(type='str', required=True),
|
||||
section=dict(type='str'),
|
||||
option=dict(type='str'),
|
||||
value=dict(type='str'),
|
||||
values=dict(type='list', elements='str'),
|
||||
|
||||
@@ -59,7 +59,7 @@ options:
|
||||
record_values:
|
||||
description:
|
||||
- Manage DNS record name with this value.
|
||||
- Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified.
|
||||
- Mutually exclusive with I(record_value), and exactly one of I(record_value) and I(record_values) has to be specified.
|
||||
- In the case of 'A' or 'AAAA' record types, this will be the IP address.
|
||||
- In the case of 'A6' record type, this will be the A6 Record data.
|
||||
- In the case of 'CNAME' record type, this will be the hostname.
|
||||
|
||||
@@ -152,7 +152,8 @@ def ensure(module, client):
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
|
||||
elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
|
||||
elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or \
|
||||
ipa_dnszone.get('idnsallowsyncptr') and ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
|
||||
|
||||
@@ -20,6 +20,13 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
append:
|
||||
description:
|
||||
- If C(true), add the listed I(host) to the I(hostgroup).
|
||||
- If C(false), only the listed I(host) will be in I(hostgroup), removing any other hosts.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.6.0
|
||||
cn:
|
||||
description:
|
||||
- Name of host-group.
|
||||
@@ -147,6 +154,7 @@ def ensure(module, client):
|
||||
state = module.params['state']
|
||||
host = module.params['host']
|
||||
hostgroup = module.params['hostgroup']
|
||||
append = module.params['append']
|
||||
|
||||
ipa_hostgroup = client.hostgroup_find(name=name)
|
||||
module_hostgroup = get_hostgroup_dict(description=module.params['description'])
|
||||
@@ -168,14 +176,18 @@ def ensure(module, client):
|
||||
client.hostgroup_mod(name=name, item=data)
|
||||
|
||||
if host is not None:
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
|
||||
client.hostgroup_add_host, client.hostgroup_remove_host) or changed
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []),
|
||||
[item.lower() for item in host],
|
||||
client.hostgroup_add_host,
|
||||
client.hostgroup_remove_host,
|
||||
append=append) or changed
|
||||
|
||||
if hostgroup is not None:
|
||||
changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
|
||||
[item.lower() for item in hostgroup],
|
||||
client.hostgroup_add_hostgroup,
|
||||
client.hostgroup_remove_hostgroup) or changed
|
||||
client.hostgroup_remove_hostgroup,
|
||||
append=append) or changed
|
||||
|
||||
else:
|
||||
if ipa_hostgroup:
|
||||
@@ -192,7 +204,8 @@ def main():
|
||||
description=dict(type='str'),
|
||||
host=dict(type='list', elements='str'),
|
||||
hostgroup=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
|
||||
append=dict(type='bool', default=False))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
@@ -30,7 +30,9 @@ options:
|
||||
default: 'always'
|
||||
choices: [ always, on_create ]
|
||||
givenname:
|
||||
description: First name.
|
||||
description:
|
||||
- First name.
|
||||
- If user does not exist and I(state=present), the usage of I(givenname) is required.
|
||||
type: str
|
||||
krbpasswordexpiration:
|
||||
description:
|
||||
@@ -54,7 +56,9 @@ options:
|
||||
- Will not be set for an existing user unless I(update_password=always), which is the default.
|
||||
type: str
|
||||
sn:
|
||||
description: Surname.
|
||||
description:
|
||||
- Surname.
|
||||
- If user does not exist and I(state=present), the usage of I(sn) is required.
|
||||
type: str
|
||||
sshpubkey:
|
||||
description:
|
||||
|
||||
@@ -27,7 +27,7 @@ options:
|
||||
group:
|
||||
type: str
|
||||
description:
|
||||
- Name of the Jenkins group on the OS.
|
||||
- GID or name of the Jenkins group on the OS.
|
||||
default: jenkins
|
||||
jenkins_home:
|
||||
type: path
|
||||
@@ -47,7 +47,7 @@ options:
|
||||
owner:
|
||||
type: str
|
||||
description:
|
||||
- Name of the Jenkins user on the OS.
|
||||
- UID or name of the Jenkins user on the OS.
|
||||
default: jenkins
|
||||
state:
|
||||
type: str
|
||||
@@ -195,6 +195,29 @@ EXAMPLES = '''
|
||||
url_password: p4ssw0rd
|
||||
url: http://localhost:8888
|
||||
|
||||
#
|
||||
# Example of how to authenticate with serverless deployment
|
||||
#
|
||||
- name: Update plugins on ECS Fargate Jenkins instance
|
||||
community.general.jenkins_plugin:
|
||||
# plugin name and version
|
||||
name: ws-cleanup
|
||||
version: '0.45'
|
||||
# Jenkins home path mounted on ec2-helper VM (example)
|
||||
jenkins_home: "/mnt/{{ jenkins_instance }}"
|
||||
# matching the UID/GID to one in official Jenkins image
|
||||
owner: 1000
|
||||
group: 1000
|
||||
# Jenkins instance URL and admin credentials
|
||||
url: "https://{{ jenkins_instance }}.com/"
|
||||
url_username: admin
|
||||
url_password: p4ssw0rd
|
||||
# make module work from EC2 which has local access
|
||||
# to EFS mount as well as Jenkins URL
|
||||
delegate_to: ec2-helper
|
||||
vars:
|
||||
jenkins_instance: foobar
|
||||
|
||||
#
|
||||
# Example of a Play which handles Jenkins restarts during the state changes
|
||||
#
|
||||
|
||||
@@ -79,6 +79,14 @@ options:
|
||||
description:
|
||||
- Priority order of the execution.
|
||||
type: int
|
||||
subFlowType:
|
||||
description:
|
||||
- For new subflows, optionally specify the type.
|
||||
- Is only used at creation.
|
||||
choices: ["basic-flow", "form-flow"]
|
||||
default: "basic-flow"
|
||||
type: str
|
||||
version_added: 6.6.0
|
||||
state:
|
||||
description:
|
||||
- Control if the authentication flow must exists or not.
|
||||
@@ -264,7 +272,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
exec_index = find_exec_in_executions(new_exec, existing_executions)
|
||||
if exec_index != -1:
|
||||
# Remove key that doesn't need to be compared with existing_exec
|
||||
exclude_key = ["flowAlias"]
|
||||
exclude_key = ["flowAlias", "subFlowType"]
|
||||
for index_key, key in enumerate(new_exec, start=0):
|
||||
if new_exec[key] is None:
|
||||
exclude_key.append(key)
|
||||
@@ -282,7 +290,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
elif new_exec["displayName"] is not None:
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm)
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"])
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
@@ -299,7 +307,7 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm)
|
||||
for key in new_exec:
|
||||
# remove unwanted key for the next API call
|
||||
if key != "flowAlias" and key != "authenticationConfig":
|
||||
if key not in ("flowAlias", "authenticationConfig", "subFlowType"):
|
||||
updated_exec[key] = new_exec[key]
|
||||
if new_exec["requirement"] is not None:
|
||||
kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm)
|
||||
@@ -334,6 +342,7 @@ def main():
|
||||
flowAlias=dict(type='str'),
|
||||
authenticationConfig=dict(type='dict'),
|
||||
index=dict(type='int'),
|
||||
subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'),
|
||||
)),
|
||||
state=dict(choices=["absent", "present"], default='present'),
|
||||
force=dict(type='bool', default=False),
|
||||
|
||||
280
plugins/modules/keycloak_authz_authorization_scope.py
Normal file
280
plugins/modules/keycloak_authz_authorization_scope.py
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
|
||||
# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_authz_authorization_scope
|
||||
|
||||
short_description: Allows administration of Keycloak client authorization scopes via Keycloak API
|
||||
|
||||
version_added: 6.6.0
|
||||
|
||||
description:
|
||||
- This module allows the administration of Keycloak client Authorization Scopes via the Keycloak REST
|
||||
API. Authorization Scopes are only available if a client has Authorization enabled.
|
||||
|
||||
- This module requires access to the REST API via OpenID Connect; the user connecting and the realm
|
||||
being used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate realm definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase options used by Keycloak.
|
||||
The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
|
||||
U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the authorization scope.
|
||||
- On C(present), the authorization scope will be created (or updated if it exists already).
|
||||
- On C(absent), the authorization scope will be removed if it exists.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the authorization scope to create.
|
||||
type: str
|
||||
required: true
|
||||
display_name:
|
||||
description:
|
||||
- The display name of the authorization scope.
|
||||
type: str
|
||||
required: false
|
||||
icon_uri:
|
||||
description:
|
||||
- The icon URI for the authorization scope.
|
||||
type: str
|
||||
required: false
|
||||
client_id:
|
||||
description:
|
||||
- The C(clientId) of the Keycloak client that should have the authorization scope.
|
||||
- This is usually a human-readable name of the Keycloak client.
|
||||
type: str
|
||||
required: true
|
||||
realm:
|
||||
description:
|
||||
- The name of the Keycloak realm the Keycloak client is in.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Samuli Seppänen (@mattock)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Manage Keycloak file:delete authorization scope
|
||||
keycloak_authz_authorization_scope:
|
||||
name: file:delete
|
||||
state: present
|
||||
display_name: File delete
|
||||
client_id: myclient
|
||||
realm: myrealm
|
||||
auth_keycloak_url: http://localhost:8080/auth
|
||||
auth_username: keycloak
|
||||
auth_password: keycloak
|
||||
auth_realm: master
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
|
||||
end_state:
|
||||
description: Representation of the authorization scope after module execution.
|
||||
returned: on success
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: ID of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
|
||||
name:
|
||||
description: Name of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: file:delete
|
||||
display_name:
|
||||
description: Display name of the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: File delete
|
||||
icon_uri:
|
||||
description: Icon URI for the authorization scope.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: http://localhost/icon.png
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'absent']),
|
||||
name=dict(type='str', required=True),
|
||||
display_name=dict(type='str', required=False),
|
||||
icon_uri=dict(type='str', required=False),
|
||||
client_id=dict(type='str', required=True),
|
||||
realm=dict(type='str', required=True)
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=(
|
||||
[['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
# Convenience variables
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('name')
|
||||
display_name = module.params.get('display_name')
|
||||
icon_uri = module.params.get('icon_uri')
|
||||
client_id = module.params.get('client_id')
|
||||
realm = module.params.get('realm')
|
||||
|
||||
# Get the "id" of the client based on the usually more human-readable
|
||||
# "clientId"
|
||||
cid = kc.get_client_id(client_id, realm=realm)
|
||||
if not cid:
|
||||
module.fail_json(msg='Invalid client %s for realm %s' %
|
||||
(client_id, realm))
|
||||
|
||||
# Get current state of the Authorization Scope using its name as the search
|
||||
# filter. This returns False if it is not found.
|
||||
before_authz_scope = kc.get_authz_authorization_scope_by_name(
|
||||
name=name, client_id=cid, realm=realm)
|
||||
|
||||
# Generate a JSON payload for Keycloak Admin API. This is needed for
|
||||
# "create" and "update" operations.
|
||||
desired_authz_scope = {}
|
||||
desired_authz_scope['name'] = name
|
||||
desired_authz_scope['displayName'] = display_name
|
||||
desired_authz_scope['iconUri'] = icon_uri
|
||||
|
||||
# Add "id" to payload for modify operations
|
||||
if before_authz_scope:
|
||||
desired_authz_scope['id'] = before_authz_scope['id']
|
||||
|
||||
# Ensure that undefined (null) optional parameters are presented as empty
|
||||
# strings in the desired state. This makes comparisons with current state
|
||||
# much easier.
|
||||
for k, v in desired_authz_scope.items():
|
||||
if not v:
|
||||
desired_authz_scope[k] = ''
|
||||
|
||||
# Do the above for the current state
|
||||
if before_authz_scope:
|
||||
for k in ['displayName', 'iconUri']:
|
||||
if k not in before_authz_scope:
|
||||
before_authz_scope[k] = ''
|
||||
|
||||
if before_authz_scope and state == 'present':
|
||||
changes = False
|
||||
for k, v in desired_authz_scope.items():
|
||||
if before_authz_scope[k] != v:
|
||||
changes = True
|
||||
# At this point we know we have to update the object anyways,
|
||||
# so there's no need to do more work.
|
||||
break
|
||||
|
||||
if changes:
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope)
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be updated'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.update_authz_authorization_scope(
|
||||
payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope updated'
|
||||
else:
|
||||
result['changed'] = False
|
||||
result['msg'] = 'Authorization scope not updated'
|
||||
|
||||
result['end_state'] = desired_authz_scope
|
||||
elif not before_authz_scope and state == 'present':
|
||||
if module._diff:
|
||||
result['diff'] = dict(before={}, after=desired_authz_scope)
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be created'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.create_authz_authorization_scope(
|
||||
payload=desired_authz_scope, client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope created'
|
||||
result['end_state'] = desired_authz_scope
|
||||
elif before_authz_scope and state == 'absent':
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=before_authz_scope, after={})
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope would be removed'
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
kc.remove_authz_authorization_scope(
|
||||
id=before_authz_scope['id'], client_id=cid, realm=realm)
|
||||
result['changed'] = True
|
||||
result['msg'] = 'Authorization scope removed'
|
||||
elif not before_authz_scope and state == 'absent':
|
||||
result['changed'] = False
|
||||
else:
|
||||
module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % (
|
||||
name, client_id, realm))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
285
plugins/modules/keycloak_clientscope_type.py
Normal file
285
plugins/modules/keycloak_clientscope_type.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_clientscope_type
|
||||
|
||||
short_description: Set the type of aclientscope in realm or client via Keycloak API
|
||||
|
||||
version_added: 6.6.0
|
||||
|
||||
description:
|
||||
- This module allows you to set the type (optional, default) of clientscopes
|
||||
via the Keycloak REST API. It requires access to the REST API via OpenID
|
||||
Connect; the user connecting and the client being used must have the
|
||||
requisite access rights. In a default Keycloak installation, admin-cli and
|
||||
an admin user would work, as would a separate client definition with the
|
||||
scope tailored to your needs and a user having the expected roles.
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
|
||||
options:
|
||||
realm:
|
||||
type: str
|
||||
description:
|
||||
- The Keycloak realm.
|
||||
default: 'master'
|
||||
|
||||
client_id:
|
||||
description:
|
||||
- The I(client_id) of the client. If not set the clientscop types are set as a default for the realm.
|
||||
aliases:
|
||||
- clientId
|
||||
type: str
|
||||
|
||||
default_clientscopes:
|
||||
description:
|
||||
- Client scopes that should be of type default.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
optional_clientscopes:
|
||||
description:
|
||||
- Client scopes that should be of type optional.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Simon Pahl (@simonpahl)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set default client scopes on realm level
|
||||
community.general.keycloak_clientscope_type:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: USERNAME
|
||||
auth_password: PASSWORD
|
||||
realm: "MyCustomRealm"
|
||||
default_clientscopes: ['profile', 'roles']
|
||||
delegate_to: localhost
|
||||
|
||||
|
||||
- name: Set default and optional client scopes on client level with token auth
|
||||
community.general.keycloak_clientscope_type:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
token: TOKEN
|
||||
realm: "MyCustomRealm"
|
||||
client_id: "MyCustomClient"
|
||||
default_clientscopes: ['profile', 'roles']
|
||||
optional_clientscopes: ['phone']
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
sample: ""
|
||||
proposed:
|
||||
description: Representation of proposed client-scope types mapping.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: []
|
||||
}
|
||||
existing:
|
||||
description:
|
||||
- Representation of client scopes before module execution.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: ["phone"]
|
||||
}
|
||||
end_state:
|
||||
description:
|
||||
- Representation of client scopes after module execution.
|
||||
- The sample is truncated.
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
default_clientscopes: ["profile", "role"],
|
||||
optional_clientscopes: []
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
|
||||
KeycloakAPI, KeycloakError, get_token)
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
|
||||
keycloak_argument_spec
|
||||
|
||||
|
||||
def keycloak_clientscope_type_module():
|
||||
"""
|
||||
Returns an AnsibleModule definition.
|
||||
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
realm=dict(default='master'),
|
||||
client_id=dict(type='str', aliases=['clientId']),
|
||||
default_clientscopes=dict(type='list', elements='str'),
|
||||
optional_clientscopes=dict(type='list', elements='str'),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([
|
||||
['token', 'auth_realm', 'auth_username', 'auth_password'],
|
||||
['default_clientscopes', 'optional_clientscopes']
|
||||
]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]),
|
||||
mutually_exclusive=[
|
||||
['token', 'auth_realm'],
|
||||
['token', 'auth_username'],
|
||||
['token', 'auth_password']
|
||||
])
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def clientscopes_to_add(existing, proposed):
|
||||
to_add = []
|
||||
existing_clientscope_ids = extract_field(existing, 'id')
|
||||
for clientscope in proposed:
|
||||
if not clientscope['id'] in existing_clientscope_ids:
|
||||
to_add.append(clientscope)
|
||||
return to_add
|
||||
|
||||
|
||||
def clientscopes_to_delete(existing, proposed):
|
||||
to_delete = []
|
||||
proposed_clientscope_ids = extract_field(proposed, 'id')
|
||||
for clientscope in existing:
|
||||
if not clientscope['id'] in proposed_clientscope_ids:
|
||||
to_delete.append(clientscope)
|
||||
return to_delete
|
||||
|
||||
|
||||
def extract_field(dictionary, field='name'):
|
||||
return [cs[field] for cs in dictionary]
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module keycloak_clientscope_type
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
module = keycloak_clientscope_type_module()
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
client_id = module.params.get('client_id')
|
||||
default_clientscopes = module.params.get('default_clientscopes')
|
||||
optional_clientscopes = module.params.get('optional_clientscopes')
|
||||
|
||||
result = dict(changed=False, msg='', proposed={}, existing={}, end_state={})
|
||||
|
||||
all_clientscopes = kc.get_clientscopes(realm)
|
||||
default_clientscopes_real = []
|
||||
optional_clientscopes_real = []
|
||||
|
||||
for client_scope in all_clientscopes:
|
||||
if default_clientscopes is not None and client_scope["name"] in default_clientscopes:
|
||||
default_clientscopes_real.append(client_scope)
|
||||
if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes:
|
||||
optional_clientscopes_real.append(client_scope)
|
||||
|
||||
if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes):
|
||||
module.fail_json(msg='At least one of the default_clientscopes does not exist!')
|
||||
|
||||
if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes):
|
||||
module.fail_json(msg='At least one of the optional_clientscopes does not exist!')
|
||||
|
||||
result['proposed'].update({
|
||||
'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes,
|
||||
'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes
|
||||
})
|
||||
|
||||
default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id)
|
||||
optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id)
|
||||
|
||||
result['existing'].update({
|
||||
'default_clientscopes': extract_field(default_clientscopes_existing),
|
||||
'optional_clientscopes': extract_field(optional_clientscopes_existing)
|
||||
})
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=result['existing'], after=result['proposed'])
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real)
|
||||
optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real)
|
||||
|
||||
default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real)
|
||||
optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real)
|
||||
|
||||
# first delete so clientscopes can change type
|
||||
for clientscope in default_clientscopes_delete:
|
||||
kc.delete_default_clientscope(clientscope['id'], realm, client_id)
|
||||
for clientscope in optional_clientscopes_delete:
|
||||
kc.delete_optional_clientscope(clientscope['id'], realm, client_id)
|
||||
|
||||
for clientscope in default_clientscopes_add:
|
||||
kc.add_default_clientscope(clientscope['id'], realm, client_id)
|
||||
for clientscope in optional_clientscopes_add:
|
||||
kc.add_optional_clientscope(clientscope['id'], realm, client_id)
|
||||
|
||||
result["changed"] = (
|
||||
len(default_clientscopes_add) > 0
|
||||
or len(optional_clientscopes_add) > 0
|
||||
or len(default_clientscopes_delete) > 0
|
||||
or len(optional_clientscopes_delete) > 0
|
||||
)
|
||||
|
||||
result['end_state'].update({
|
||||
'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)),
|
||||
'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id))
|
||||
})
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -35,6 +35,8 @@ options:
|
||||
- Whether the locale shall be present.
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
notes:
|
||||
- This module does not support RHEL-based systems.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -74,11 +76,10 @@ def is_available(name, ubuntuMode):
|
||||
checking either :
|
||||
* if the locale is present in /etc/locales.gen
|
||||
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
|
||||
__regexp = r'^#?\s*(?P<locale>\S+[\._\S]*) (?P<charset>\S+)\s*$'
|
||||
if ubuntuMode:
|
||||
__regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
|
||||
__locales_available = '/usr/share/i18n/SUPPORTED'
|
||||
else:
|
||||
__regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
|
||||
__locales_available = '/etc/locale.gen'
|
||||
|
||||
re_compiled = re.compile(__regexp)
|
||||
@@ -88,7 +89,8 @@ def is_available(name, ubuntuMode):
|
||||
if result and result.group('locale') == name:
|
||||
return True
|
||||
fd.close()
|
||||
return False
|
||||
# locale may be installed but not listed in the file, for example C.UTF-8 in some systems
|
||||
return is_present(name)
|
||||
|
||||
|
||||
def is_present(name):
|
||||
@@ -106,20 +108,6 @@ def fix_case(name):
|
||||
return name
|
||||
|
||||
|
||||
def replace_line(existing_line, new_line):
|
||||
"""Replaces lines in /etc/locale.gen"""
|
||||
try:
|
||||
f = open("/etc/locale.gen", "r")
|
||||
lines = [line.replace(existing_line, new_line) for line in f]
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
f = open("/etc/locale.gen", "w")
|
||||
f.write("".join(lines))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def set_locale(name, enabled=True):
|
||||
""" Sets the state of the locale. Defaults to enabled. """
|
||||
search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
|
||||
@@ -209,7 +197,7 @@ def main():
|
||||
# We found the common way to manage locales.
|
||||
ubuntuMode = False
|
||||
else:
|
||||
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
|
||||
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package 'locales' installed?")
|
||||
else:
|
||||
# Ubuntu created its own system to manage locales.
|
||||
ubuntuMode = True
|
||||
|
||||
@@ -354,7 +354,7 @@ def main():
|
||||
# NOTE: Backward compatible with old syntax using '|' as delimiter
|
||||
for hdr in [x.strip() for x in header.split('|')]:
|
||||
try:
|
||||
h_key, h_val = hdr.split('=')
|
||||
h_key, h_val = hdr.split('=', 1)
|
||||
h_val = to_native(Header(h_val, charset))
|
||||
msg.add_header(h_key, h_val)
|
||||
except Exception:
|
||||
|
||||
@@ -49,6 +49,7 @@ options:
|
||||
params:
|
||||
description:
|
||||
- Any extra parameters to pass to make.
|
||||
- If the value is empty, only the key will be used. For example, C(FOO:) will produce C(FOO), not C(FOO=).
|
||||
type: dict
|
||||
target:
|
||||
description:
|
||||
@@ -81,6 +82,18 @@ EXAMPLES = r'''
|
||||
chdir: /home/ubuntu/cool-project
|
||||
target: all
|
||||
file: /some-project/Makefile
|
||||
|
||||
- name: build arm64 kernel on FreeBSD, with 16 parallel jobs
|
||||
community.general.make:
|
||||
chdir: /usr/src
|
||||
jobs: 16
|
||||
target: buildkernel
|
||||
params:
|
||||
# This adds -DWITH_FDT to the command line:
|
||||
-DWITH_FDT:
|
||||
# The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line:
|
||||
TARGET: arm64
|
||||
TARGET_ARCH: aarch64
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -174,7 +187,7 @@ def main():
|
||||
make_path = module.get_bin_path('make', required=True)
|
||||
make_target = module.params['target']
|
||||
if module.params['params'] is not None:
|
||||
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
|
||||
make_parameters = [k + (('=' + str(v)) if v is not None else '') for k, v in iteritems(module.params['params'])]
|
||||
else:
|
||||
make_parameters = []
|
||||
|
||||
|
||||
@@ -146,8 +146,7 @@ class MkSysB(ModuleHelper):
|
||||
def __run__(self):
|
||||
def process(rc, out, err):
|
||||
if rc != 0:
|
||||
self.do_raise("mksysb failed.")
|
||||
self.vars.msg = out
|
||||
self.do_raise("mksysb failed: {0}".format(out))
|
||||
|
||||
runner = CmdRunner(
|
||||
self.module,
|
||||
@@ -158,6 +157,8 @@ class MkSysB(ModuleHelper):
|
||||
'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'],
|
||||
output_process=process, check_mode_skip=True) as ctx:
|
||||
ctx.run(combined_path=[self.vars.storage_path, self.vars.name])
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
|
||||
self.changed = True
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ options:
|
||||
- If C(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
|
||||
- If C(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
|
||||
loaded on next reboot.
|
||||
- If C(disabled), will not toch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
|
||||
- If C(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
|
||||
- Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the
|
||||
kernel modules themselves instead of configuration like this.
|
||||
- In fact, most modern kernel modules are prepared for automatic loading already.
|
||||
|
||||
@@ -63,11 +63,12 @@ options:
|
||||
- Type C(generic) is added in Ansible 2.5.
|
||||
- Type C(infiniband) is added in community.general 2.0.0.
|
||||
- Type C(gsm) is added in community.general 3.7.0.
|
||||
- Type C(macvlan) is added in community.general 6.6.0.
|
||||
- Type C(wireguard) is added in community.general 4.3.0.
|
||||
- Type C(vpn) is added in community.general 5.1.0.
|
||||
type: str
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm,
|
||||
wireguard, vpn ]
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan,
|
||||
wifi, gsm, wireguard, vpn ]
|
||||
mode:
|
||||
description:
|
||||
- This is the type of device or network connection that you wish to create for a bond or bridge.
|
||||
@@ -879,6 +880,38 @@ options:
|
||||
- The username used to authenticate with the network, if required.
|
||||
- Many providers do not require a username, or accept any username.
|
||||
- But if a username is required, it is specified here.
|
||||
macvlan:
|
||||
description:
|
||||
- The configuration of the MAC VLAN connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).'
|
||||
type: dict
|
||||
version_added: 6.6.0
|
||||
suboptions:
|
||||
mode:
|
||||
description:
|
||||
- The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device.
|
||||
- 'Following choices are allowed: C(1) B(vepa), C(2) B(bridge), C(3) B(private), C(4) B(passthru)
|
||||
and C(5) B(source)'
|
||||
type: int
|
||||
choices: [ 1, 2, 3, 4, 5 ]
|
||||
required: true
|
||||
parent:
|
||||
description:
|
||||
- If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should
|
||||
be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a
|
||||
"mac-address" property.
|
||||
type: str
|
||||
required: true
|
||||
promiscuous:
|
||||
description:
|
||||
- Whether the interface should be put in promiscuous mode.
|
||||
type: bool
|
||||
tap:
|
||||
description:
|
||||
- Whether the interface should be a MACVTAP.
|
||||
type: bool
|
||||
wireguard:
|
||||
description:
|
||||
- The configuration of the Wireguard connection.
|
||||
@@ -1357,6 +1390,17 @@ EXAMPLES = r'''
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a macvlan connection
|
||||
community.general.nmcli:
|
||||
type: macvlan
|
||||
conn_name: my-macvlan-connection
|
||||
ifname: mymacvlan0
|
||||
macvlan:
|
||||
mode: 2
|
||||
parent: eth1
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a wireguard connection
|
||||
community.general.nmcli:
|
||||
type: wireguard
|
||||
@@ -1502,13 +1546,14 @@ class Nmcli(object):
|
||||
self.wifi = module.params['wifi']
|
||||
self.wifi_sec = module.params['wifi_sec']
|
||||
self.gsm = module.params['gsm']
|
||||
self.macvlan = module.params['macvlan']
|
||||
self.wireguard = module.params['wireguard']
|
||||
self.vpn = module.params['vpn']
|
||||
self.transport_mode = module.params['transport_mode']
|
||||
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip4:
|
||||
elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4:
|
||||
self.ipv4_method = 'disabled'
|
||||
elif self.ip4:
|
||||
self.ipv4_method = 'manual'
|
||||
@@ -1517,7 +1562,7 @@ class Nmcli(object):
|
||||
|
||||
if self.method6:
|
||||
self.ipv6_method = self.method6
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip6:
|
||||
elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6:
|
||||
self.ipv6_method = 'disabled'
|
||||
elif self.ip6:
|
||||
self.ipv6_method = 'manual'
|
||||
@@ -1700,6 +1745,14 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'gsm.%s' % name: value,
|
||||
})
|
||||
elif self.type == 'macvlan':
|
||||
if self.macvlan:
|
||||
for name, value in self.macvlan.items():
|
||||
options.update({
|
||||
'macvlan.%s' % name: value,
|
||||
})
|
||||
elif self.state == 'present':
|
||||
raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan')
|
||||
elif self.type == 'wireguard':
|
||||
if self.wireguard:
|
||||
for name, value in self.wireguard.items():
|
||||
@@ -1777,6 +1830,7 @@ class Nmcli(object):
|
||||
'wifi',
|
||||
'802-11-wireless',
|
||||
'gsm',
|
||||
'macvlan',
|
||||
'wireguard',
|
||||
'vpn',
|
||||
)
|
||||
@@ -1992,6 +2046,9 @@ class Nmcli(object):
|
||||
if key in self.SECRET_OPTIONS:
|
||||
self.edit_commands += ['set %s %s' % (key, value)]
|
||||
continue
|
||||
if key == 'xmit_hash_policy':
|
||||
cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value])
|
||||
continue
|
||||
cmd.extend([key, value])
|
||||
|
||||
return self.execute_command(cmd)
|
||||
@@ -2047,7 +2104,10 @@ class Nmcli(object):
|
||||
if key and len(pair) > 1:
|
||||
raw_value = pair[1].lstrip()
|
||||
if raw_value == '--':
|
||||
conn_info[key] = None
|
||||
if key_type == list:
|
||||
conn_info[key] = []
|
||||
else:
|
||||
conn_info[key] = None
|
||||
elif key == 'bond.options':
|
||||
# Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
|
||||
opts = raw_value.split(',')
|
||||
@@ -2134,7 +2194,7 @@ class Nmcli(object):
|
||||
# We can't just do `if not value` because then if there's a value
|
||||
# of 0 specified as an integer it'll be interpreted as empty when
|
||||
# it actually isn't.
|
||||
if value != 0 and not value:
|
||||
if value not in (0, []) and not value:
|
||||
continue
|
||||
|
||||
if key in conn_info:
|
||||
@@ -2239,6 +2299,7 @@ def main():
|
||||
'vxlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
'macvlan',
|
||||
'wireguard',
|
||||
'vpn',
|
||||
]),
|
||||
@@ -2342,6 +2403,11 @@ def main():
|
||||
wifi=dict(type='dict'),
|
||||
wifi_sec=dict(type='dict', no_log=True),
|
||||
gsm=dict(type='dict'),
|
||||
macvlan=dict(type='dict', options=dict(
|
||||
mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True),
|
||||
parent=dict(type='str', required=True),
|
||||
promiscuous=dict(type='bool'),
|
||||
tap=dict(type='bool'))),
|
||||
wireguard=dict(type='dict'),
|
||||
vpn=dict(type='dict'),
|
||||
transport_mode=dict(type='str', choices=['datagram', 'connected']),
|
||||
|
||||
@@ -467,10 +467,8 @@ class RecordManager(object):
|
||||
if lookup.rcode() != dns.rcode.NOERROR:
|
||||
self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
|
||||
|
||||
if self.module.params['type'] == 'NS':
|
||||
current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
|
||||
else:
|
||||
current_ttl = lookup.answer[0].ttl
|
||||
current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
|
||||
|
||||
return current_ttl != self.module.params['ttl']
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,8 @@ module: office_365_connector_card
|
||||
short_description: Use webhooks to create Connector Card messages within an Office 365 group
|
||||
description:
|
||||
- Creates Connector Card messages through
|
||||
- Office 365 Connectors U(https://dev.outlook.com/Connectors)
|
||||
Office 365 Connectors
|
||||
U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups).
|
||||
author: "Marc Sensenich (@marc-sensenich)"
|
||||
notes:
|
||||
- This module is not idempotent, therefore if the same task is run twice
|
||||
@@ -62,7 +63,7 @@ options:
|
||||
elements: dict
|
||||
description:
|
||||
- Contains a list of sections to display in the card.
|
||||
- For more information see https://dev.outlook.com/Connectors/reference.
|
||||
- For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields).
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
|
||||
@@ -169,7 +169,11 @@ def get_package_state(names, pkg_spec, module):
|
||||
rc, stdout, stderr = execute_command(command, module)
|
||||
|
||||
if stderr:
|
||||
module.fail_json(msg="failed in get_package_state(): " + stderr)
|
||||
match = re.search(r"^Can't find inst:%s$" % re.escape(name), stderr)
|
||||
if match:
|
||||
pkg_spec[name]['installed_state'] = False
|
||||
else:
|
||||
module.fail_json(msg="failed in get_package_state(): " + stderr)
|
||||
|
||||
if stdout:
|
||||
# If the requested package name is just a stem, like "python", we may
|
||||
|
||||
@@ -133,6 +133,9 @@ notes:
|
||||
it is much more efficient to pass the list directly to the I(name) option.
|
||||
- To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand.
|
||||
For example, a dedicated build user with permissions to install packages could be necessary.
|
||||
- >
|
||||
In the tests, while using C(yay) as the I(executable) option, the module failed to install AUR packages
|
||||
with the error: C(error: target not found: <pkg>).
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -263,6 +266,7 @@ EXAMPLES = """
|
||||
reason_for: all
|
||||
"""
|
||||
|
||||
import re
|
||||
import shlex
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from collections import defaultdict, namedtuple
|
||||
@@ -418,7 +422,7 @@ class Pacman(object):
|
||||
for p in name_ver:
|
||||
# With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that.
|
||||
# When installing from URLs, pacman can also output a 'nothing to do' message. strip that too.
|
||||
if "loading packages" in p or "there is nothing to do" in p:
|
||||
if "loading packages" in p or "there is nothing to do" in p or 'Avoid running' in p:
|
||||
continue
|
||||
name, version = p.split()
|
||||
if name in self.inventory["installed_pkgs"]:
|
||||
@@ -706,11 +710,12 @@ class Pacman(object):
|
||||
installed_pkgs = {}
|
||||
dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True)
|
||||
# Format of a line: "pacman 6.0.1-2"
|
||||
query_re = re.compile(r'^\s*(?P<pkg>\S+)\s+(?P<ver>\S+)\s*$')
|
||||
for l in stdout.splitlines():
|
||||
l = l.strip()
|
||||
if not l:
|
||||
query_match = query_re.match(l)
|
||||
if not query_match:
|
||||
continue
|
||||
pkg, ver = l.split()
|
||||
pkg, ver = query_match.groups()
|
||||
installed_pkgs[pkg] = ver
|
||||
|
||||
installed_groups = defaultdict(set)
|
||||
@@ -721,11 +726,12 @@ class Pacman(object):
|
||||
# base-devel file
|
||||
# base-devel findutils
|
||||
# ...
|
||||
query_groups_re = re.compile(r'^\s*(?P<group>\S+)\s+(?P<pkg>\S+)\s*$')
|
||||
for l in stdout.splitlines():
|
||||
l = l.strip()
|
||||
if not l:
|
||||
query_groups_match = query_groups_re.match(l)
|
||||
if not query_groups_match:
|
||||
continue
|
||||
group, pkgname = l.split()
|
||||
group, pkgname = query_groups_match.groups()
|
||||
installed_groups[group].add(pkgname)
|
||||
|
||||
available_pkgs = {}
|
||||
@@ -747,11 +753,12 @@ class Pacman(object):
|
||||
# vim-plugins vim-airline-themes
|
||||
# vim-plugins vim-ale
|
||||
# ...
|
||||
sync_groups_re = re.compile(r'^\s*(?P<group>\S+)\s+(?P<pkg>\S+)\s*$')
|
||||
for l in stdout.splitlines():
|
||||
l = l.strip()
|
||||
if not l:
|
||||
sync_groups_match = sync_groups_re.match(l)
|
||||
if not sync_groups_match:
|
||||
continue
|
||||
group, pkg = l.split()
|
||||
group, pkg = sync_groups_match.groups()
|
||||
available_groups[group].add(pkg)
|
||||
|
||||
upgradable_pkgs = {}
|
||||
@@ -759,9 +766,14 @@ class Pacman(object):
|
||||
[self.pacman_path, "--query", "--upgrades"], check_rc=False
|
||||
)
|
||||
|
||||
stdout = stdout.splitlines()
|
||||
if stdout and "Avoid running" in stdout[0]:
|
||||
stdout = stdout[1:]
|
||||
stdout = "\n".join(stdout)
|
||||
|
||||
# non-zero exit with nothing in stdout -> nothing to upgrade, all good
|
||||
# stderr can have warnings, so not checked here
|
||||
if rc == 1 and stdout == "":
|
||||
if rc == 1 and not stdout:
|
||||
pass # nothing to upgrade
|
||||
elif rc == 0:
|
||||
# Format of lines:
|
||||
@@ -771,7 +783,7 @@ class Pacman(object):
|
||||
l = l.strip()
|
||||
if not l:
|
||||
continue
|
||||
if "[ignored]" in l:
|
||||
if "[ignored]" in l or "Avoid running" in l:
|
||||
continue
|
||||
s = l.split()
|
||||
if len(s) != 4:
|
||||
|
||||
@@ -20,10 +20,11 @@ description:
|
||||
command line tool. For a full description of the fields and the options
|
||||
check the GNU parted manual.
|
||||
requirements:
|
||||
- This module requires parted version 1.8.3 and above
|
||||
- align option (except 'undefined') requires parted 2.1 and above
|
||||
- If the version of parted is below 3.1, it requires a Linux version running
|
||||
the sysfs file system C(/sys/).
|
||||
- This module requires C(parted) version 1.8.3 and above.
|
||||
- Option I(align) (except C(undefined)) requires C(parted) 2.1 or above.
|
||||
- If the version of C(parted) is below 3.1, it requires a Linux version running
|
||||
the C(sysfs) file system C(/sys/).
|
||||
- Requires the C(resizepart) command when using the I(resize) parameter.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
@@ -33,66 +34,70 @@ attributes:
|
||||
support: none
|
||||
options:
|
||||
device:
|
||||
description: The block device (disk) where to operate.
|
||||
description:
|
||||
- The block device (disk) where to operate.
|
||||
- Regular files can also be partitioned, but it is recommended to create a
|
||||
loopback device using C(losetup) to easily access its partitions.
|
||||
type: str
|
||||
required: true
|
||||
align:
|
||||
description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
|
||||
description:
|
||||
- Set alignment for newly created partitions. Use C(undefined) for parted default aligment.
|
||||
type: str
|
||||
choices: [ cylinder, minimal, none, optimal, undefined ]
|
||||
default: optimal
|
||||
number:
|
||||
description:
|
||||
- The number of the partition to work with or the number of the partition
|
||||
that will be created.
|
||||
- Required when performing any action on the disk, except fetching information.
|
||||
- The partition number being affected.
|
||||
- Required when performing any action on the disk, except fetching information.
|
||||
type: int
|
||||
unit:
|
||||
description:
|
||||
- Selects the current default unit that Parted will use to display
|
||||
locations and capacities on the disk and to interpret those given by the
|
||||
user if they are not suffixed by an unit.
|
||||
- When fetching information about a disk, it is always recommended to specify a unit.
|
||||
- Selects the current default unit that Parted will use to display
|
||||
locations and capacities on the disk and to interpret those given by the
|
||||
user if they are not suffixed by an unit.
|
||||
- When fetching information about a disk, it is recommended to always specify a unit.
|
||||
type: str
|
||||
choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
|
||||
default: KiB
|
||||
label:
|
||||
description:
|
||||
- Disk label type to use.
|
||||
- If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
|
||||
- Disk label type or partition table to use.
|
||||
- If I(device) already contains a different label, it will be changed to I(label)
|
||||
and any previous partitions will be lost.
|
||||
- A I(name) must be specified for a C(gpt) partition table.
|
||||
type: str
|
||||
choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
|
||||
default: msdos
|
||||
part_type:
|
||||
description:
|
||||
- May be specified only with 'msdos' or 'dvh' partition tables.
|
||||
- A C(name) must be specified for a 'gpt' partition table.
|
||||
- Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
|
||||
- May be specified only with I(label=msdos) or I(label=dvh).
|
||||
- Neither I(part_type) nor I(name) may be used with I(label=sun).
|
||||
type: str
|
||||
choices: [ extended, logical, primary ]
|
||||
default: primary
|
||||
part_start:
|
||||
description:
|
||||
- Where the partition will start as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Using negative values may require setting of C(fs_type) (see notes).
|
||||
- Where the partition will start as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Using negative values may require setting of I(fs_type) (see notes).
|
||||
type: str
|
||||
default: 0%
|
||||
part_end:
|
||||
description:
|
||||
- Where the partition will end as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
- Where the partition will end as offset from the beginning of the disk,
|
||||
that is, the "distance" from the start of the disk. Negative numbers
|
||||
specify distance from the end of the disk.
|
||||
- The distance can be specified with all the units supported by parted
|
||||
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
|
||||
type: str
|
||||
default: 100%
|
||||
name:
|
||||
description:
|
||||
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
|
||||
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
|
||||
type: str
|
||||
flags:
|
||||
description: A list of the flags that has to be set on the partition.
|
||||
@@ -100,15 +105,15 @@ options:
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete a partition.
|
||||
- If set to C(info) the module will only return the device information.
|
||||
- Whether to create or delete a partition.
|
||||
- If set to C(info) the module will only return the device information.
|
||||
type: str
|
||||
choices: [ absent, present, info ]
|
||||
default: info
|
||||
fs_type:
|
||||
description:
|
||||
- If specified and the partition does not exist, will set filesystem type to given partition.
|
||||
- Parameter optional, but see notes below about negative C(part_start) values.
|
||||
- If specified and the partition does not exist, will set filesystem type to given partition.
|
||||
- Parameter optional, but see notes below about negative I(part_start) values.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
resize:
|
||||
@@ -123,9 +128,9 @@ notes:
|
||||
installed on the system is before version 3.1, the module queries the kernel
|
||||
through C(/sys/) to obtain disk information. In this case the units CHS and
|
||||
CYL are not supported.
|
||||
- Negative C(part_start) start values were rejected if C(fs_type) was not given.
|
||||
This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
|
||||
specify C(fs_type) as well or make sure your system contains newer parted.
|
||||
- Negative I(part_start) start values were rejected if I(fs_type) was not given.
|
||||
This bug was fixed in parted 3.2.153. If you want to use negative I(part_start),
|
||||
specify I(fs_type) as well or make sure your system contains newer parted.
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
|
||||
@@ -57,7 +57,7 @@ options:
|
||||
install_deps:
|
||||
description:
|
||||
- Include applications of dependent packages.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
- Only used when I(state=install), I(state=latest), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
inject_packages:
|
||||
@@ -69,25 +69,33 @@ options:
|
||||
force:
|
||||
description:
|
||||
- Force modification of the application's virtual environment. See C(pipx) for details.
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject).
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), I(state=latest), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
include_injected:
|
||||
description:
|
||||
- Upgrade the injected packages along with the application.
|
||||
- Only used when I(state=upgrade) or I(state=upgrade_all).
|
||||
- Only used when I(state=upgrade), I(state=upgrade_all), or I(state=latest).
|
||||
- This is used with I(state=upgrade) and I(state=latest) since community.general 6.6.0.
|
||||
type: bool
|
||||
default: false
|
||||
index_url:
|
||||
description:
|
||||
- Base URL of Python Package Index.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=latest), or I(state=inject).
|
||||
type: str
|
||||
python:
|
||||
description:
|
||||
- Python version to be used when creating the application virtual environment. Must be 3.6+.
|
||||
- Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all).
|
||||
- Only used when I(state=install), I(state=latest), I(state=reinstall), or I(state=reinstall_all).
|
||||
type: str
|
||||
system_site_packages:
|
||||
description:
|
||||
- Give application virtual environment access to the system site-packages directory.
|
||||
- Only used when I(state=install) or I(state=latest).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 6.6.0
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(pipx) installed in the system.
|
||||
@@ -112,6 +120,7 @@ notes:
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- This module requires C(pipx) version 0.16.2.1 or above.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
@@ -175,6 +184,7 @@ class PipX(StateModuleHelper):
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
system_site_packages=dict(type='bool', default=False),
|
||||
executable=dict(type='path'),
|
||||
editable=dict(type='bool', default=False),
|
||||
pip_args=dict(type='str'),
|
||||
@@ -242,7 +252,7 @@ class PipX(StateModuleHelper):
|
||||
def state_install(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.changed = True
|
||||
with self.runner('state index_url install_deps force python editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
ctx.run(name_source=[self.vars.name, self.vars.source])
|
||||
self._capture_results(ctx)
|
||||
|
||||
@@ -254,7 +264,7 @@ class PipX(StateModuleHelper):
|
||||
if self.vars.force:
|
||||
self.changed = True
|
||||
|
||||
with self.runner('state index_url install_deps force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
ctx.run()
|
||||
self._capture_results(ctx)
|
||||
|
||||
@@ -303,11 +313,11 @@ class PipX(StateModuleHelper):
|
||||
def state_latest(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.changed = True
|
||||
with self.runner('state index_url install_deps force python editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
|
||||
ctx.run(state='install', name_source=[self.vars.name, self.vars.source])
|
||||
self._capture_results(ctx)
|
||||
|
||||
with self.runner('state index_url install_deps force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
|
||||
ctx.run(state='upgrade')
|
||||
self._capture_results(ctx)
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ notes:
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- This module requires C(pipx) version 0.16.2.1 or above.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
|
||||
@@ -333,9 +333,9 @@ def emerge_packages(module, packages):
|
||||
"""Run emerge command against given list of atoms."""
|
||||
p = module.params
|
||||
|
||||
if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
|
||||
if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'):
|
||||
for package in packages:
|
||||
if p['noreplace'] and not query_package(module, package, 'emerge'):
|
||||
if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'):
|
||||
break
|
||||
else:
|
||||
module.exit_json(changed=False, msg='Packages already present.')
|
||||
@@ -383,14 +383,12 @@ def emerge_packages(module, packages):
|
||||
"""Fallback to default: don't use this argument at all."""
|
||||
continue
|
||||
|
||||
if not flag_val:
|
||||
"""Add the --flag=value pair."""
|
||||
if isinstance(flag_val, bool):
|
||||
args.extend((arg, to_native('y' if flag_val else 'n')))
|
||||
elif not flag_val:
|
||||
"""If the value is 0 or 0.0: add the flag, but not the value."""
|
||||
args.append(arg)
|
||||
continue
|
||||
|
||||
"""Add the --flag=value pair."""
|
||||
if isinstance(p[flag], bool):
|
||||
args.extend((arg, to_native('y' if flag_val else 'n')))
|
||||
else:
|
||||
args.extend((arg, to_native(flag_val)))
|
||||
|
||||
|
||||
@@ -427,7 +427,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
"""Check if the specified container is a template."""
|
||||
proxmox_node = self.proxmox_api.nodes(node)
|
||||
config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
|
||||
return config['template']
|
||||
return config.get('template', False)
|
||||
|
||||
def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
|
||||
|
||||
|
||||
@@ -505,7 +505,9 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
|
||||
ok_str = "Disk %s imported into VM %s"
|
||||
else:
|
||||
config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
|
||||
config_str = self.module.params["storage"]
|
||||
if self.module.params.get("media") != "cdrom":
|
||||
config_str += ":%s" % (self.module.params["size"])
|
||||
ok_str = "Disk %s created in VM %s"
|
||||
timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
|
||||
|
||||
|
||||
@@ -281,8 +281,9 @@ options:
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- Specifies the VM name. Only used on the configuration web interface.
|
||||
- Required only for C(state=present).
|
||||
- Specifies the VM name. Name could be non-unique across the cluster.
|
||||
- Required only for I(state=present).
|
||||
- With I(state=present) if I(vmid) not provided and VM with name exists in the cluster then no changes will be made.
|
||||
type: str
|
||||
nameservers:
|
||||
description:
|
||||
@@ -407,6 +408,14 @@ options:
|
||||
smbios:
|
||||
description:
|
||||
- Specifies SMBIOS type 1 fields.
|
||||
- "Comma separated, Base64 encoded (optional) SMBIOS properties:"
|
||||
- C([base64=<1|0>] [,family=<Base64 encoded string>])
|
||||
- C([,manufacturer=<Base64 encoded string>])
|
||||
- C([,product=<Base64 encoded string>])
|
||||
- C([,serial=<Base64 encoded string>])
|
||||
- C([,sku=<Base64 encoded string>])
|
||||
- C([,uuid=<UUID>])
|
||||
- C([,version=<Base64 encoded string>])
|
||||
type: str
|
||||
snapname:
|
||||
description:
|
||||
@@ -1210,10 +1219,14 @@ def main():
|
||||
# the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
|
||||
if not vmid:
|
||||
if state == 'present' and not update and not clone and not delete and not revert:
|
||||
try:
|
||||
vmid = proxmox.get_nextvmid()
|
||||
except Exception:
|
||||
module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
|
||||
existing_vmid = proxmox.get_vmid(name, ignore_missing=True)
|
||||
if existing_vmid:
|
||||
vmid = existing_vmid
|
||||
else:
|
||||
try:
|
||||
vmid = proxmox.get_nextvmid()
|
||||
except Exception:
|
||||
module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
|
||||
else:
|
||||
clone_target = clone or name
|
||||
vmid = proxmox.get_vmid(clone_target, ignore_missing=True)
|
||||
@@ -1413,7 +1426,7 @@ def main():
|
||||
status['status'] = vm['status']
|
||||
if vm['status'] == 'running':
|
||||
if module.params['force']:
|
||||
proxmox.stop_vm(vm, True)
|
||||
proxmox.stop_vm(vm, True, timeout=module.params['timeout'])
|
||||
else:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid)
|
||||
taskid = proxmox_node.qemu.delete(vmid)
|
||||
|
||||
@@ -160,8 +160,7 @@ def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret'),
|
||||
('api_user', 'api_password')],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
@@ -193,14 +193,14 @@ class ProxmoxUser:
|
||||
self.user[k] = v
|
||||
elif k in ['groups', 'tokens'] and (v == '' or v is None):
|
||||
self.user[k] = []
|
||||
elif k == 'groups' and type(v) == str:
|
||||
elif k == 'groups' and isinstance(v, str):
|
||||
self.user['groups'] = v.split(',')
|
||||
elif k == 'tokens' and type(v) == list:
|
||||
elif k == 'tokens' and isinstance(v, list):
|
||||
for token in v:
|
||||
if 'privsep' in token:
|
||||
token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
|
||||
self.user['tokens'] = v
|
||||
elif k == 'tokens' and type(v) == dict:
|
||||
elif k == 'tokens' and isinstance(v, dict):
|
||||
self.user['tokens'] = list()
|
||||
for tokenid, tokenvalues in v.items():
|
||||
t = tokenvalues
|
||||
|
||||
@@ -81,6 +81,12 @@ options:
|
||||
- A list of puppet tags to be used.
|
||||
type: list
|
||||
elements: str
|
||||
skip_tags:
|
||||
description:
|
||||
- A list of puppet tags to be excluded.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 6.6.0
|
||||
execute:
|
||||
description:
|
||||
- Execute a specific piece of Puppet code.
|
||||
@@ -143,6 +149,8 @@ EXAMPLES = r'''
|
||||
tags:
|
||||
- update
|
||||
- nginx
|
||||
skip_tags:
|
||||
- service
|
||||
|
||||
- name: Run puppet agent in noop mode
|
||||
community.general.puppet:
|
||||
@@ -198,6 +206,7 @@ def main():
|
||||
environment=dict(type='str'),
|
||||
certname=dict(type='str'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
skip_tags=dict(type='list', elements='str'),
|
||||
execute=dict(type='str'),
|
||||
summarize=dict(type='bool', default=False),
|
||||
debug=dict(type='bool', default=False),
|
||||
@@ -232,11 +241,11 @@ def main():
|
||||
runner = puppet_utils.puppet_runner(module)
|
||||
|
||||
if not p['manifest'] and not p['execute']:
|
||||
args_order = "_agent_fixed puppetmaster show_diff confdir environment tags certname noop use_srv_records"
|
||||
args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records"
|
||||
with runner(args_order) as ctx:
|
||||
rc, stdout, stderr = ctx.run()
|
||||
else:
|
||||
args_order = "_apply_fixed logdest modulepath environment certname tags noop _execute summarize debug verbose"
|
||||
args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose"
|
||||
with runner(args_order) as ctx:
|
||||
rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']])
|
||||
|
||||
|
||||
@@ -19,6 +19,16 @@ description:
|
||||
registering using D-Bus if possible.
|
||||
author: "Barnaby Court (@barnabycourt)"
|
||||
notes:
|
||||
- |
|
||||
The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager))
|
||||
to register, starting from community.general 6.5.0: this is done so credentials
|
||||
(username, password, activation keys) can be passed to C(rhsm) in a secure way.
|
||||
C(subscription-manager) itself gets credentials only as arguments of command line
|
||||
parameters, which is I(not) secure, as they can be easily stolen by checking the
|
||||
process listing on the system. Due to limitations of the D-Bus interface of C(rhsm),
|
||||
the module will I(not) use D-Bus for registation when trying either to register
|
||||
using I(token), or when specifying I(environment), or when the system is old
|
||||
(typically RHEL 6 and older).
|
||||
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
|
||||
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
|
||||
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
|
||||
@@ -450,7 +460,7 @@ class Rhsm(RegistrationBase):
|
||||
self.module.debug('Verified system D-Bus bus as usable')
|
||||
return True
|
||||
|
||||
def register(self, username, password, token, auto_attach, activationkey, org_id,
|
||||
def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register, environment,
|
||||
release):
|
||||
'''
|
||||
@@ -461,9 +471,11 @@ class Rhsm(RegistrationBase):
|
||||
* Exception - if any error occurs during the registration
|
||||
'''
|
||||
# There is no support for token-based registration in the D-Bus API
|
||||
# of rhsm, so always use the CLI in that case.
|
||||
if not token and self._can_connect_to_dbus():
|
||||
self._register_using_dbus(username, password, auto_attach,
|
||||
# of rhsm, so always use the CLI in that case;
|
||||
# also, since the specified environments are names, and the D-Bus APIs
|
||||
# require IDs for the environments, use the CLI also in that case
|
||||
if not token and not environment and self._can_connect_to_dbus():
|
||||
self._register_using_dbus(was_registered, username, password, auto_attach,
|
||||
activationkey, org_id, consumer_type,
|
||||
consumer_name, consumer_id,
|
||||
force_register, environment, release)
|
||||
@@ -521,7 +533,7 @@ class Rhsm(RegistrationBase):
|
||||
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
|
||||
|
||||
def _register_using_dbus(self, username, password, auto_attach,
|
||||
def _register_using_dbus(self, was_registered, username, password, auto_attach,
|
||||
activationkey, org_id, consumer_type, consumer_name,
|
||||
consumer_id, force_register, environment, release):
|
||||
'''
|
||||
@@ -544,7 +556,8 @@ class Rhsm(RegistrationBase):
|
||||
return default
|
||||
|
||||
distro_id = distro.id()
|
||||
distro_version = tuple(str2int(p) for p in distro.version_parts())
|
||||
distro_version_parts = distro.version_parts()
|
||||
distro_version = tuple(str2int(p) for p in distro_version_parts)
|
||||
|
||||
# Stop the rhsm service when using systemd (which means Fedora or
|
||||
# RHEL 7+): this is because the service may not use new configuration bits
|
||||
@@ -570,12 +583,39 @@ class Rhsm(RegistrationBase):
|
||||
distro_version[0] > 9)):
|
||||
dbus_force_option_works = True
|
||||
|
||||
if force_register and not dbus_force_option_works:
|
||||
if force_register and not dbus_force_option_works and was_registered:
|
||||
self.unregister()
|
||||
|
||||
register_opts = {}
|
||||
if consumer_type:
|
||||
register_opts['consumer_type'] = consumer_type
|
||||
# The option for the consumer type used to be 'type' in versions
|
||||
# of RHEL before 9 & in RHEL 9 before 9.2, and then it changed to
|
||||
# 'consumer_type'; since the Register*() D-Bus functions reject
|
||||
# unknown options, we have to pass the right option depending on
|
||||
# the version -- funky.
|
||||
def supports_option_consumer_type():
|
||||
# subscription-manager in any supported Fedora version
|
||||
# has the new option.
|
||||
if distro_id == 'fedora':
|
||||
return True
|
||||
# Check for RHEL 9 >= 9.2, or RHEL >= 10.
|
||||
if distro_id == 'rhel' and \
|
||||
((distro_version[0] == 9 and distro_version[1] >= 2) or
|
||||
distro_version[0] >= 10):
|
||||
return True
|
||||
# CentOS: since the change was only done in EL 9, then there is
|
||||
# only CentOS Stream for 9, and thus we can assume it has the
|
||||
# latest version of subscription-manager.
|
||||
if distro_id == 'centos' and distro_version[0] >= 9:
|
||||
return True
|
||||
# Unknown or old distro: assume it does not support
|
||||
# the new option.
|
||||
return False
|
||||
|
||||
consumer_type_key = 'type'
|
||||
if supports_option_consumer_type():
|
||||
consumer_type_key = 'consumer_type'
|
||||
register_opts[consumer_type_key] = consumer_type
|
||||
if consumer_name:
|
||||
register_opts['name'] = consumer_name
|
||||
if consumer_id:
|
||||
@@ -585,14 +625,34 @@ class Rhsm(RegistrationBase):
|
||||
# of RHEL before 8.6, and then it changed to 'environments'; since
|
||||
# the Register*() D-Bus functions reject unknown options, we have
|
||||
# to pass the right option depending on the version -- funky.
|
||||
def supports_option_environments():
|
||||
# subscription-manager in any supported Fedora version
|
||||
# has the new option.
|
||||
if distro_id == 'fedora':
|
||||
return True
|
||||
# Check for RHEL 8 >= 8.6, or RHEL >= 9.
|
||||
if distro_id == 'rhel' and \
|
||||
((distro_version[0] == 8 and distro_version[1] >= 6) or
|
||||
distro_version[0] >= 9):
|
||||
return True
|
||||
# CentOS: similar checks as for RHEL, with one extra bit:
|
||||
# if the 2nd part of the version is empty, it means it is
|
||||
# CentOS Stream, and thus we can assume it has the latest
|
||||
# version of subscription-manager.
|
||||
if distro_id == 'centos' and \
|
||||
((distro_version[0] == 8 and
|
||||
(distro_version[1] >= 6 or distro_version_parts[1] == '')) or
|
||||
distro_version[0] >= 9):
|
||||
return True
|
||||
# Unknown or old distro: assume it does not support
|
||||
# the new option.
|
||||
return False
|
||||
|
||||
environment_key = 'environment'
|
||||
if distro_id == 'fedora' or \
|
||||
(distro_id == 'rhel' and
|
||||
((distro_version[0] == 8 and distro_version[1] >= 6) or
|
||||
distro_version[0] >= 9)):
|
||||
if supports_option_environments():
|
||||
environment_key = 'environments'
|
||||
register_opts[environment_key] = environment
|
||||
if force_register and dbus_force_option_works:
|
||||
if force_register and dbus_force_option_works and was_registered:
|
||||
register_opts['force'] = True
|
||||
# Wrap it as proper D-Bus dict
|
||||
register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1)
|
||||
@@ -1137,8 +1197,11 @@ def main():
|
||||
# Ensure system is registered
|
||||
if state == 'present':
|
||||
|
||||
# Cache the status of the system before the changes
|
||||
was_registered = rhsm.is_registered
|
||||
|
||||
# Register system
|
||||
if rhsm.is_registered and not force_register:
|
||||
if was_registered and not force_register:
|
||||
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
|
||||
try:
|
||||
rhsm.sync_syspurpose()
|
||||
@@ -1165,7 +1228,7 @@ def main():
|
||||
try:
|
||||
rhsm.enable()
|
||||
rhsm.configure(**module.params)
|
||||
rhsm.register(username, password, token, auto_attach, activationkey, org_id,
|
||||
rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register,
|
||||
environment, release)
|
||||
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
|
||||
|
||||
@@ -32,8 +32,8 @@ attributes:
|
||||
options:
|
||||
release:
|
||||
description:
|
||||
- RHSM release version to use (use null to unset)
|
||||
required: true
|
||||
- RHSM release version to use.
|
||||
- To unset either pass C(null) for this option, or omit this option.
|
||||
type: str
|
||||
author:
|
||||
- Sean Myers (@seandst)
|
||||
@@ -43,17 +43,17 @@ EXAMPLES = '''
|
||||
# Set release version to 7.1
|
||||
- name: Set RHSM release version
|
||||
community.general.rhsm_release:
|
||||
release: "7.1"
|
||||
release: "7.1"
|
||||
|
||||
# Set release version to 6Server
|
||||
- name: Set RHSM release version
|
||||
community.general.rhsm_release:
|
||||
release: "6Server"
|
||||
release: "6Server"
|
||||
|
||||
# Unset release version
|
||||
- name: Unset RHSM release release
|
||||
community.general.rhsm_release:
|
||||
release: null
|
||||
release: null
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -107,7 +107,7 @@ def set_release(module, release):
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
release=dict(type='str', required=True),
|
||||
release=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -222,6 +222,9 @@ def repository_modify(module, state, name, purge=False):
|
||||
diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
|
||||
results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
|
||||
rhsm_arguments.extend(['--disable', repoid])
|
||||
for updated_repo in updated_repo_list:
|
||||
if updated_repo['id'] in difference:
|
||||
updated_repo['enabled'] = False
|
||||
|
||||
diff = {'before': diff_before,
|
||||
'after': diff_after,
|
||||
|
||||
@@ -36,22 +36,10 @@ options:
|
||||
description:
|
||||
- Sets the project name.
|
||||
required: true
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Sets the rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Sets the API version used by module.
|
||||
- API version must be at least 14.
|
||||
default: 14
|
||||
token:
|
||||
type: str
|
||||
api_token:
|
||||
description:
|
||||
- Sets the token to authenticate against Rundeck API.
|
||||
required: true
|
||||
aliases: ["token"]
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
@@ -82,8 +70,9 @@ options:
|
||||
validate_certs:
|
||||
version_added: '0.2.0'
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- community.general.rundeck
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -107,7 +96,7 @@ EXAMPLES = '''
|
||||
|
||||
- name: Remove a rundeck system policy
|
||||
community.general.rundeck_acl_policy:
|
||||
name: "Project_02"
|
||||
name: "Project_01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
state: absent
|
||||
@@ -129,49 +118,25 @@ after:
|
||||
'''
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
import json
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request,
|
||||
)
|
||||
|
||||
|
||||
class RundeckACLManager:
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def handle_http_code_if_needed(self, infos):
|
||||
if infos["status"] == 403:
|
||||
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
|
||||
"permissions.", rundeck_response=infos["body"])
|
||||
elif infos["status"] >= 500:
|
||||
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
|
||||
|
||||
def request_rundeck_api(self, query, data=None, method="GET"):
|
||||
resp, info = fetch_url(self.module,
|
||||
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": self.module.params["token"]
|
||||
})
|
||||
|
||||
self.handle_http_code_if_needed(info)
|
||||
if resp is not None:
|
||||
resp = resp.read()
|
||||
if resp != b"":
|
||||
try:
|
||||
json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
|
||||
return json_resp, info
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
|
||||
"Object was: %s" % (str(e), resp))
|
||||
return resp, info
|
||||
|
||||
def get_acl(self):
|
||||
resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
def create_or_update_acl(self):
|
||||
@@ -181,9 +146,12 @@ class RundeckACLManager:
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
|
||||
|
||||
dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="POST",
|
||||
data={"contents": self.module.params["policy"]})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="POST",
|
||||
data={"contents": self.module.params["policy"]},
|
||||
)
|
||||
|
||||
if info["status"] == 201:
|
||||
self.module.exit_json(changed=True, before={}, after=self.get_acl())
|
||||
@@ -202,9 +170,12 @@ class RundeckACLManager:
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before=facts, after=facts)
|
||||
|
||||
dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="PUT",
|
||||
data={"contents": self.module.params["policy"]})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="PUT",
|
||||
data={"contents": self.module.params["policy"]},
|
||||
)
|
||||
|
||||
if info["status"] == 200:
|
||||
self.module.exit_json(changed=True, before=facts, after=self.get_acl())
|
||||
@@ -216,34 +187,39 @@ class RundeckACLManager:
|
||||
|
||||
def remove_acl(self):
|
||||
facts = self.get_acl()
|
||||
|
||||
if facts is None:
|
||||
self.module.exit_json(changed=False, before={}, after={})
|
||||
else:
|
||||
# If not in check mode, remove the project
|
||||
if not self.module.check_mode:
|
||||
self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
|
||||
method="DELETE",
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
|
||||
|
||||
def main():
|
||||
# Also allow the user to set values for fetch_url
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(required=True, type='str'),
|
||||
url=dict(required=True, type='str'),
|
||||
api_version=dict(type='int', default=14),
|
||||
token=dict(required=True, type='str', no_log=True),
|
||||
policy=dict(type='str'),
|
||||
project=dict(type='str'),
|
||||
))
|
||||
|
||||
argument_spec['api_token']['aliases'] = ['token']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
['state', 'present', ['policy']],
|
||||
],
|
||||
supports_check_mode=True
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
|
||||
|
||||
@@ -38,22 +38,10 @@ options:
|
||||
description:
|
||||
- Sets the project name.
|
||||
required: true
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Sets the rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Sets the API version used by module.
|
||||
- API version must be at least 14.
|
||||
default: 14
|
||||
token:
|
||||
type: str
|
||||
api_token:
|
||||
description:
|
||||
- Sets the token to authenticate against Rundeck API.
|
||||
required: true
|
||||
aliases: ["token"]
|
||||
client_cert:
|
||||
version_added: '0.2.0'
|
||||
client_key:
|
||||
@@ -73,24 +61,27 @@ options:
|
||||
validate_certs:
|
||||
version_added: '0.2.0'
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- ansible.builtin.url
|
||||
- community.general.attributes
|
||||
- community.general.rundeck
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a rundeck project
|
||||
community.general.rundeck_project:
|
||||
name: "Project_01"
|
||||
api_version: 18
|
||||
label: "Project 01"
|
||||
description: "My Project 01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
state: present
|
||||
|
||||
- name: Remove a rundeck project
|
||||
community.general.rundeck_project:
|
||||
name: "Project_02"
|
||||
name: "Project_01"
|
||||
url: "https://rundeck.example.org"
|
||||
token: "mytoken"
|
||||
api_token: "mytoken"
|
||||
state: absent
|
||||
'''
|
||||
|
||||
@@ -111,60 +102,47 @@ after:
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
import json
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request,
|
||||
)
|
||||
|
||||
|
||||
class RundeckProjectManager(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
def handle_http_code_if_needed(self, infos):
|
||||
if infos["status"] == 403:
|
||||
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
|
||||
"permissions.", rundeck_response=infos["body"])
|
||||
elif infos["status"] >= 500:
|
||||
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
|
||||
|
||||
def request_rundeck_api(self, query, data=None, method="GET"):
|
||||
resp, info = fetch_url(self.module,
|
||||
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": self.module.params["token"]
|
||||
})
|
||||
|
||||
self.handle_http_code_if_needed(info)
|
||||
if resp is not None:
|
||||
resp = resp.read()
|
||||
if resp != "":
|
||||
try:
|
||||
json_resp = json.loads(resp)
|
||||
return json_resp, info
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
|
||||
"Object was: %s" % (to_native(e), resp))
|
||||
return resp, info
|
||||
|
||||
def get_project_facts(self):
|
||||
resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="project/%s" % self.module.params["name"],
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
def create_or_update_project(self):
|
||||
facts = self.get_project_facts()
|
||||
|
||||
if facts is None:
|
||||
# If in check mode don't create project, simulate a fake project creation
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
before={},
|
||||
after={
|
||||
"name": self.module.params["name"]
|
||||
},
|
||||
)
|
||||
|
||||
resp, info = self.request_rundeck_api("projects", method="POST", data={
|
||||
"name": self.module.params["name"],
|
||||
"config": {}
|
||||
})
|
||||
resp, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="projects",
|
||||
method="POST",
|
||||
data={
|
||||
"name": self.module.params["name"],
|
||||
"config": {},
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 201:
|
||||
self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
|
||||
@@ -181,21 +159,25 @@ class RundeckProjectManager(object):
|
||||
else:
|
||||
# If not in check mode, remove the project
|
||||
if not self.module.check_mode:
|
||||
self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="project/%s" % self.module.params["name"],
|
||||
method="DELETE",
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=True, before=facts, after={})
|
||||
|
||||
|
||||
def main():
|
||||
# Also allow the user to set values for fetch_url
|
||||
argument_spec = url_argument_spec()
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(required=True, type='str'),
|
||||
url=dict(required=True, type='str'),
|
||||
api_version=dict(type='int', default=14),
|
||||
token=dict(required=True, type='str', no_log=True),
|
||||
))
|
||||
|
||||
argument_spec['api_token']['aliases'] = ['token']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
|
||||
@@ -98,7 +98,7 @@ _state_map = dict(
|
||||
|
||||
|
||||
class SnapAlias(StateModuleHelper):
|
||||
_RE_ALIAS_LIST = re.compile(r"^(?P<snap>[\w-]+)\s+(?P<alias>[\w-]+)\s+.*$")
|
||||
_RE_ALIAS_LIST = re.compile(r"^(?P<snap>\S+)\s+(?P<alias>[\w-]+)\s+.*$")
|
||||
|
||||
module = dict(
|
||||
argument_spec={
|
||||
@@ -142,7 +142,10 @@ class SnapAlias(StateModuleHelper):
|
||||
return results
|
||||
|
||||
with self.runner("state name", check_rc=True, output_process=process) as ctx:
|
||||
return ctx.run(state="info")
|
||||
aliases = ctx.run(state="info")
|
||||
if self.verbosity >= 4:
|
||||
self.vars.get_aliases_run_info = ctx.run_info
|
||||
return aliases
|
||||
|
||||
def _get_aliases_for(self, name):
|
||||
return self._get_aliases().get(name, [])
|
||||
|
||||
@@ -218,7 +218,7 @@ EXAMPLES = """
|
||||
community.general.terraform:
|
||||
project_path: '{{ project_dir }}'
|
||||
state: present
|
||||
camplex_vars: true
|
||||
complex_vars: true
|
||||
variables:
|
||||
vm_name: "{{ inventory_hostname }}"
|
||||
vm_vcpus: 2
|
||||
@@ -312,11 +312,11 @@ def preflight_validation(bin_path, project_path, version, variables_args=None, p
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
if state_file and os.path.exists(state_file):
|
||||
return ['-state', state_file]
|
||||
if state_file and not os.path.exists(state_file):
|
||||
module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
|
||||
return []
|
||||
if not state_file:
|
||||
return []
|
||||
if not os.path.exists(state_file):
|
||||
module.warn('Could not find state_file "{0}", the process will not destroy any resources, please check your state file path.'.format(state_file))
|
||||
return ['-state', state_file]
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace):
|
||||
@@ -325,7 +325,7 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
|
||||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
'-backend-config',
|
||||
shlex_quote('{0}={1}'.format(key, val))
|
||||
'{0}={1}'.format(key, val)
|
||||
])
|
||||
if backend_config_files:
|
||||
for f in backend_config_files:
|
||||
|
||||
@@ -97,19 +97,9 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''#'''
|
||||
|
||||
HAVE_UNIVENTION = False
|
||||
HAVE_IPADDRESS = False
|
||||
try:
|
||||
from univention.admin.handlers.dns import (
|
||||
forward_zone,
|
||||
reverse_zone,
|
||||
)
|
||||
HAVE_UNIVENTION = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils import deps
|
||||
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
|
||||
umc_module_for_add,
|
||||
umc_module_for_edit,
|
||||
@@ -118,27 +108,26 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i
|
||||
config,
|
||||
uldap,
|
||||
)
|
||||
try:
|
||||
|
||||
|
||||
with deps.declare("univention", msg="This module requires univention python bindings"):
|
||||
from univention.admin.handlers.dns import (
|
||||
forward_zone,
|
||||
reverse_zone,
|
||||
)
|
||||
|
||||
with deps.declare("ipaddress"):
|
||||
import ipaddress
|
||||
HAVE_IPADDRESS = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
type=dict(required=True,
|
||||
type='str'),
|
||||
zone=dict(required=True,
|
||||
type='str'),
|
||||
name=dict(required=True,
|
||||
type='str'),
|
||||
data=dict(default={},
|
||||
type='dict'),
|
||||
state=dict(default='present',
|
||||
choices=['present', 'absent'],
|
||||
type='str')
|
||||
type=dict(required=True, type='str'),
|
||||
zone=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
data=dict(default={}, type='dict'),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_if=([
|
||||
@@ -146,8 +135,7 @@ def main():
|
||||
])
|
||||
)
|
||||
|
||||
if not HAVE_UNIVENTION:
|
||||
module.fail_json(msg="This module requires univention python bindings")
|
||||
deps.validate(module, "univention")
|
||||
|
||||
type = module.params['type']
|
||||
zone = module.params['zone']
|
||||
@@ -159,8 +147,8 @@ def main():
|
||||
|
||||
workname = name
|
||||
if type == 'ptr_record':
|
||||
if not HAVE_IPADDRESS:
|
||||
module.fail_json(msg=missing_required_lib('ipaddress'))
|
||||
deps.validate(module, "ipaddress")
|
||||
|
||||
try:
|
||||
if 'arpa' not in zone:
|
||||
raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)")
|
||||
@@ -196,7 +184,7 @@ def main():
|
||||
'(zoneName={0})'.format(zone),
|
||||
scope='domain',
|
||||
)
|
||||
if len(so) == 0:
|
||||
if not so == 0:
|
||||
raise Exception("Did not find zone '{0}' in Univention".format(zone))
|
||||
obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
|
||||
else:
|
||||
|
||||
@@ -300,31 +300,35 @@ def main():
|
||||
prj_set = False
|
||||
break
|
||||
|
||||
if not prj_set and not module.check_mode:
|
||||
cmd = "project -s"
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Could not get quota realtime block report.", **result
|
||||
)
|
||||
if state == "present" and not prj_set:
|
||||
if not module.check_mode:
|
||||
cmd = "project -s %s" % name
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Could not get quota realtime block report.", **result
|
||||
)
|
||||
|
||||
result["changed"] = True
|
||||
|
||||
elif not prj_set and module.check_mode:
|
||||
result["changed"] = True
|
||||
elif state == "absent" and prj_set and name != quota_default:
|
||||
if not module.check_mode:
|
||||
cmd = "project -C %s" % name
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(
|
||||
msg="Failed to clear managed tree from project quota control.", **result
|
||||
)
|
||||
|
||||
# Set limits
|
||||
if state == "absent":
|
||||
bhard = 0
|
||||
bsoft = 0
|
||||
ihard = 0
|
||||
isoft = 0
|
||||
rtbhard = 0
|
||||
rtbsoft = 0
|
||||
result["changed"] = True
|
||||
|
||||
current_bsoft, current_bhard = quota_report(
|
||||
module, xfs_quota_bin, mountpoint, name, quota_type, "b"
|
||||
@@ -336,6 +340,23 @@ def main():
|
||||
module, xfs_quota_bin, mountpoint, name, quota_type, "rtb"
|
||||
)
|
||||
|
||||
# Set limits
|
||||
if state == "absent":
|
||||
bhard = 0
|
||||
bsoft = 0
|
||||
ihard = 0
|
||||
isoft = 0
|
||||
rtbhard = 0
|
||||
rtbsoft = 0
|
||||
|
||||
# Ensure that a non-existing quota does not trigger a change
|
||||
current_bsoft = current_bsoft if current_bsoft is not None else 0
|
||||
current_bhard = current_bhard if current_bhard is not None else 0
|
||||
current_isoft = current_isoft if current_isoft is not None else 0
|
||||
current_ihard = current_ihard if current_ihard is not None else 0
|
||||
current_rtbsoft = current_rtbsoft if current_rtbsoft is not None else 0
|
||||
current_rtbhard = current_rtbhard if current_rtbhard is not None else 0
|
||||
|
||||
result["xfs_quota"] = dict(
|
||||
bsoft=current_bsoft,
|
||||
bhard=current_bhard,
|
||||
@@ -370,25 +391,23 @@ def main():
|
||||
limit.append("rtbhard=%s" % rtbhard)
|
||||
result["rtbhard"] = int(rtbhard)
|
||||
|
||||
if len(limit) > 0 and not module.check_mode:
|
||||
if name == quota_default:
|
||||
cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
|
||||
else:
|
||||
cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
|
||||
if len(limit) > 0:
|
||||
if not module.check_mode:
|
||||
if name == quota_default:
|
||||
cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
|
||||
else:
|
||||
cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
|
||||
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(msg="Could not set limits.", **result)
|
||||
rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
|
||||
if rc != 0:
|
||||
result["cmd"] = cmd
|
||||
result["rc"] = rc
|
||||
result["stdout"] = stdout
|
||||
result["stderr"] = stderr
|
||||
module.fail_json(msg="Could not set limits.", **result)
|
||||
|
||||
result["changed"] = True
|
||||
|
||||
elif len(limit) > 0 and module.check_mode:
|
||||
result["changed"] = True
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
|
||||
@@ -324,10 +324,11 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
|
||||
m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
|
||||
else:
|
||||
return {}, rc, stdout, stderr
|
||||
elif rc in [0, 106, 103]:
|
||||
elif rc in [0, 102, 103, 106]:
|
||||
# zypper exit codes
|
||||
# 0: success
|
||||
# 106: signature verification failed
|
||||
# 102: ZYPPER_EXIT_INF_REBOOT_NEEDED - Returned after a successful installation of a patch which requires reboot of computer.
|
||||
# 103: zypper was upgraded, run same command again
|
||||
if packages is None:
|
||||
firstrun = True
|
||||
@@ -587,12 +588,12 @@ def main():
|
||||
elif state in ['installed', 'present', 'latest']:
|
||||
packages_changed, retvals = package_present(module, name, state == 'latest')
|
||||
|
||||
retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
|
||||
retvals['changed'] = retvals['rc'] in [0, 102] and bool(packages_changed)
|
||||
|
||||
if module._diff:
|
||||
set_diff(module, retvals, packages_changed)
|
||||
|
||||
if retvals['rc'] != 0:
|
||||
if retvals['rc'] not in [0, 102]:
|
||||
module.fail_json(msg="Zypper run failed.", **retvals)
|
||||
|
||||
if not retvals['changed']:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user