mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 09:26:44 +00:00
Compare commits
118 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8f9f21be1 | ||
|
|
f1fee975ba | ||
|
|
d4e831f31d | ||
|
|
07d0de5640 | ||
|
|
c1309ceb8b | ||
|
|
00efbe6ea2 | ||
|
|
d18092a128 | ||
|
|
b783d025df | ||
|
|
113764215d | ||
|
|
ef8fb888cd | ||
|
|
8385d2eb39 | ||
|
|
de38d23bdc | ||
|
|
3cb9b0fa91 | ||
|
|
551e5e4bd5 | ||
|
|
c75711167f | ||
|
|
b279694779 | ||
|
|
625d22391f | ||
|
|
1b488b53f5 | ||
|
|
51648d5328 | ||
|
|
87aedc7bd6 | ||
|
|
1a0c9eb5e6 | ||
|
|
b862c0db49 | ||
|
|
adba23c223 | ||
|
|
7fa84e8ec7 | ||
|
|
14a86ed0ad | ||
|
|
dcfd0f47e6 | ||
|
|
481570d0e3 | ||
|
|
9254110b8b | ||
|
|
17c8e274dc | ||
|
|
30289c7a03 | ||
|
|
e8861cafa6 | ||
|
|
c47888a5f9 | ||
|
|
58ba101990 | ||
|
|
bf54291500 | ||
|
|
8f27ef76f5 | ||
|
|
61e82c50e4 | ||
|
|
dfbde55aeb | ||
|
|
24b6441580 | ||
|
|
4381ac1bf3 | ||
|
|
e83bb285b2 | ||
|
|
edd4637b9f | ||
|
|
eefdf5b58e | ||
|
|
39c39e3de1 | ||
|
|
50284d1292 | ||
|
|
1590892a56 | ||
|
|
f6722c142d | ||
|
|
417db583e7 | ||
|
|
aa3b53fb87 | ||
|
|
ffca7eaf52 | ||
|
|
5b9b98340b | ||
|
|
4be9bb1118 | ||
|
|
d50476cdab | ||
|
|
363e8662b0 | ||
|
|
5365dcef3c | ||
|
|
89accbfa2b | ||
|
|
63210f4fc4 | ||
|
|
01864514c2 | ||
|
|
418589e346 | ||
|
|
88fab247ca | ||
|
|
56edbfc539 | ||
|
|
c94fa6132d | ||
|
|
2fa17c32a3 | ||
|
|
926f627128 | ||
|
|
7c6f286df2 | ||
|
|
b6ed6787b5 | ||
|
|
94a350e72b | ||
|
|
46d454eae0 | ||
|
|
adfd73d7ed | ||
|
|
aa2a5d9578 | ||
|
|
0f300bddb9 | ||
|
|
3785b656d6 | ||
|
|
16499072ff | ||
|
|
cad6b30036 | ||
|
|
2df1126d27 | ||
|
|
0d5ec37249 | ||
|
|
7c04aaa48f | ||
|
|
80113063ac | ||
|
|
1b09e8168a | ||
|
|
aadd48461c | ||
|
|
d565a20013 | ||
|
|
c69fb82ee0 | ||
|
|
cffc3dad11 | ||
|
|
a27025946b | ||
|
|
1825feb652 | ||
|
|
0c2d1eda44 | ||
|
|
d617f6919f | ||
|
|
b17cc09b07 | ||
|
|
ee7f44b09b | ||
|
|
a357944fb0 | ||
|
|
5d7d973f6d | ||
|
|
f3a516b79d | ||
|
|
d4eaef2d83 | ||
|
|
235e55fa9f | ||
|
|
c3baaa8cfa | ||
|
|
d68f6fcfff | ||
|
|
70e4ae440c | ||
|
|
8b66bb9a02 | ||
|
|
76fbb50270 | ||
|
|
93971b292a | ||
|
|
724bba79d5 | ||
|
|
e44f43b4d2 | ||
|
|
f82422502b | ||
|
|
5588ce3741 | ||
|
|
719ecc9e85 | ||
|
|
1a801323a8 | ||
|
|
7ebb301930 | ||
|
|
fb5047b605 | ||
|
|
b7977b8fa9 | ||
|
|
bae1440425 | ||
|
|
04f3dd2b56 | ||
|
|
99e3965ece | ||
|
|
14625a214a | ||
|
|
3c067aa2c3 | ||
|
|
01004bd27b | ||
|
|
f8265ecc4e | ||
|
|
2e355bef9f | ||
|
|
e6f65634fe | ||
|
|
61314898ca |
@@ -73,6 +73,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_16
|
||||
displayName: Sanity 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.16/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_15
|
||||
displayName: Sanity 2.15
|
||||
dependsOn: []
|
||||
@@ -99,19 +112,6 @@ stages:
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.13/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_devel
|
||||
displayName: Units devel
|
||||
@@ -122,13 +122,23 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- test: '3.12'
|
||||
- stage: Units_2_16
|
||||
displayName: Units 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.16/units/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: "3.11"
|
||||
- stage: Units_2_15
|
||||
displayName: Units 2.15
|
||||
dependsOn: []
|
||||
@@ -150,17 +160,6 @@ stages:
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.8
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel_extra_vms
|
||||
@@ -171,8 +170,8 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: Alpine 3.17
|
||||
test: alpine/3.17
|
||||
- name: Alpine 3.18
|
||||
test: alpine/3.18
|
||||
# - name: Fedora 38
|
||||
# test: fedora/38
|
||||
- name: Ubuntu 22.04
|
||||
@@ -191,14 +190,30 @@ stages:
|
||||
test: macos/13.2
|
||||
- name: RHEL 9.2
|
||||
test: rhel/9.2
|
||||
- name: RHEL 8.8
|
||||
test: rhel/8.8
|
||||
- name: FreeBSD 13.2
|
||||
test: freebsd/13.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_16
|
||||
displayName: Remote 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.16/{0}
|
||||
targets:
|
||||
#- name: macOS 13.2
|
||||
# test: macos/13.2
|
||||
- name: RHEL 8.8
|
||||
test: rhel/8.8
|
||||
#- name: FreeBSD 13.2
|
||||
# test: freebsd/13.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_15
|
||||
displayName: Remote 2.15
|
||||
dependsOn: []
|
||||
@@ -231,26 +246,10 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 12.3
|
||||
test: freebsd/12.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
#- name: FreeBSD 12.4
|
||||
# test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -267,8 +266,6 @@ stages:
|
||||
targets:
|
||||
- name: Fedora 38
|
||||
test: fedora38
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Ubuntu 22.04
|
||||
@@ -279,6 +276,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_16
|
||||
displayName: Docker 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.16/linux/{0}
|
||||
targets:
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_15
|
||||
displayName: Docker 2.15
|
||||
dependsOn: []
|
||||
@@ -309,24 +320,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
@@ -343,8 +336,6 @@ stages:
|
||||
test: debian-bookworm/3.11
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.11
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.9
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -360,7 +351,19 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/generic/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.7'
|
||||
- test: '3.12'
|
||||
- stage: Generic_2_16
|
||||
displayName: Generic 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.16/generic/{0}/1
|
||||
targets:
|
||||
- test: '2.7'
|
||||
- test: '3.6'
|
||||
- test: '3.11'
|
||||
- stage: Generic_2_15
|
||||
displayName: Generic 2.15
|
||||
@@ -371,7 +374,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.15/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- test: '3.9'
|
||||
- stage: Generic_2_14
|
||||
displayName: Generic 2.14
|
||||
dependsOn: []
|
||||
@@ -382,42 +385,32 @@ stages:
|
||||
testFormat: 2.14/generic/{0}/1
|
||||
targets:
|
||||
- test: '3.10'
|
||||
- stage: Generic_2_13
|
||||
displayName: Generic 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/generic/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_devel
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Sanity_2_16
|
||||
- Sanity_2_15
|
||||
- Sanity_2_14
|
||||
- Units_devel
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Units_2_16
|
||||
- Units_2_15
|
||||
- Units_2_14
|
||||
- Remote_devel_extra_vms
|
||||
- Remote_devel
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Remote_2_16
|
||||
- Remote_2_15
|
||||
- Remote_2_14
|
||||
- Docker_devel
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_2_16
|
||||
- Docker_2_15
|
||||
- Docker_2_14
|
||||
- Docker_community_devel
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
# - Generic_2_16
|
||||
# - Generic_2_15
|
||||
# - Generic_2_14
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
30
.github/BOTMETA.yml
vendored
30
.github/BOTMETA.yml
vendored
@@ -249,9 +249,11 @@ files:
|
||||
labels: onepassword
|
||||
maintainers: samdoran
|
||||
$lookups/onepassword.py:
|
||||
maintainers: azenk scottsb
|
||||
ignore: scottsb
|
||||
maintainers: azenk
|
||||
$lookups/onepassword_raw.py:
|
||||
maintainers: azenk scottsb
|
||||
ignore: scottsb
|
||||
maintainers: azenk
|
||||
$lookups/passwordstore.py: {}
|
||||
$lookups/random_pet.py:
|
||||
maintainers: Akasurde
|
||||
@@ -276,6 +278,8 @@ files:
|
||||
$module_utils/gconftool2.py:
|
||||
labels: gconftool2
|
||||
maintainers: russoz
|
||||
$module_utils/gio_mime.py:
|
||||
maintainers: russoz
|
||||
$module_utils/gitlab.py:
|
||||
keywords: gitlab source_control
|
||||
labels: gitlab
|
||||
@@ -328,6 +332,9 @@ files:
|
||||
$module_utils/scaleway.py:
|
||||
labels: cloud scaleway
|
||||
maintainers: $team_scaleway
|
||||
$module_utils/snap.py:
|
||||
labels: snap
|
||||
maintainers: russoz
|
||||
$module_utils/ssh.py:
|
||||
maintainers: russoz
|
||||
$module_utils/storage/hpe3par/hpe3par.py:
|
||||
@@ -512,6 +519,8 @@ files:
|
||||
$modules/gem.py:
|
||||
labels: gem
|
||||
maintainers: $team_ansible_core johanwiren
|
||||
$modules/gio_mime.py:
|
||||
maintainers: russoz
|
||||
$modules/git_config.py:
|
||||
maintainers: djmattyg007 mgedmin
|
||||
$modules/github_:
|
||||
@@ -669,7 +678,9 @@ files:
|
||||
labels: jboss
|
||||
maintainers: $team_jboss jhoekx
|
||||
$modules/jenkins_build.py:
|
||||
maintainers: brettmilford unnecessary-username
|
||||
maintainers: brettmilford unnecessary-username juanmcasanova
|
||||
$modules/jenkins_build_info.py:
|
||||
maintainers: juanmcasanova
|
||||
$modules/jenkins_job.py:
|
||||
maintainers: sermilrod
|
||||
$modules/jenkins_job_info.py:
|
||||
@@ -696,6 +707,8 @@ files:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_authz_permission.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_authz_custom_policy.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_authz_permission_info.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_client_rolemapping.py:
|
||||
@@ -716,6 +729,8 @@ files:
|
||||
maintainers: kris2kris
|
||||
$modules/keycloak_realm_info.py:
|
||||
maintainers: fynncfchen
|
||||
$modules/keycloak_realm_key.py:
|
||||
maintainers: mattock
|
||||
$modules/keycloak_role.py:
|
||||
maintainers: laurpaum
|
||||
$modules/keycloak_user.py:
|
||||
@@ -937,7 +952,7 @@ files:
|
||||
labels: pagerduty
|
||||
maintainers: suprememoocow thaumos
|
||||
$modules/pagerduty_alert.py:
|
||||
maintainers: ApsOps
|
||||
maintainers: ApsOps xshen1
|
||||
$modules/pagerduty_change.py:
|
||||
maintainers: adamvaughan
|
||||
$modules/pagerduty_user.py:
|
||||
@@ -980,6 +995,9 @@ files:
|
||||
maintainers: $team_solaris dermute
|
||||
$modules/pmem.py:
|
||||
maintainers: mizumm
|
||||
$modules/pnpm.py:
|
||||
ignore: chrishoffman
|
||||
maintainers: aretrosen
|
||||
$modules/portage.py:
|
||||
ignore: sayap
|
||||
labels: portage
|
||||
@@ -1195,6 +1213,8 @@ files:
|
||||
ignore: ryansb
|
||||
$modules/shutdown.py:
|
||||
maintainers: nitzmahone samdoran aminvakil
|
||||
$modules/simpleinit_msb.py:
|
||||
maintainers: vaygr
|
||||
$modules/sl_vm.py:
|
||||
maintainers: mcltn
|
||||
$modules/slack.py:
|
||||
@@ -1207,7 +1227,7 @@ files:
|
||||
maintainers: $team_solaris
|
||||
$modules/snap.py:
|
||||
labels: snap
|
||||
maintainers: angristan vcarceler
|
||||
maintainers: angristan vcarceler russoz
|
||||
$modules/snap_alias.py:
|
||||
labels: snap
|
||||
maintainers: russoz
|
||||
|
||||
47
.github/workflows/ansible-test.yml
vendored
47
.github/workflows/ansible-test.yml
vendored
@@ -31,6 +31,7 @@ jobs:
|
||||
ansible:
|
||||
- '2.11'
|
||||
- '2.12'
|
||||
- '2.13'
|
||||
# Ansible-test on various stable branches does not yet work well with cgroups v2.
|
||||
# Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
|
||||
# image for these stable branches. The list of branches where this is necessary will
|
||||
@@ -80,6 +81,10 @@ jobs:
|
||||
python: '2.6'
|
||||
- ansible: '2.12'
|
||||
python: '3.8'
|
||||
- ansible: '2.13'
|
||||
python: '2.7'
|
||||
- ansible: '2.13'
|
||||
python: '3.8'
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
@@ -211,6 +216,48 @@ jobs:
|
||||
# docker: default
|
||||
# python: '3.8'
|
||||
# target: azp/generic/1/
|
||||
# 2.13
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: fedora35
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: opensuse15py2
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/1/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/2/
|
||||
- ansible: '2.13'
|
||||
docker: alpine3
|
||||
python: ''
|
||||
target: azp/posix/3/
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - ansible: '2.13'
|
||||
# docker: default
|
||||
# python: '3.9'
|
||||
# target: azp/generic/1/
|
||||
|
||||
steps:
|
||||
- name: >-
|
||||
|
||||
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
2
.github/workflows/reuse.yml
vendored
2
.github/workflows/reuse.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha || '' }}
|
||||
|
||||
|
||||
153
CHANGELOG.rst
153
CHANGELOG.rst
@@ -6,6 +6,157 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 6.0.0.
|
||||
|
||||
v7.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Please note that this is the last minor 7.x.0 release. Further releases
|
||||
with major version 7 will be bugfix releases 7.5.y.
|
||||
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cargo - add option ``executable``, which allows user to specify path to the cargo binary (https://github.com/ansible-collections/community.general/pull/7352).
|
||||
- cargo - add option ``locked`` which allows user to specify install the locked version of dependency instead of latest compatible version (https://github.com/ansible-collections/community.general/pull/6134).
|
||||
- dig lookup plugin - add TCP option to enable the use of TCP connection during DNS lookup (https://github.com/ansible-collections/community.general/pull/7343).
|
||||
- gitlab_group - add option ``force_delete`` (default: false) which allows delete group even if projects exists in it (https://github.com/ansible-collections/community.general/pull/7364).
|
||||
- ini_file - add ``ignore_spaces`` option (https://github.com/ansible-collections/community.general/pull/7273).
|
||||
- newrelic_deployment - add option ``app_name_exact_match``, which filters results for the exact app_name provided (https://github.com/ansible-collections/community.general/pull/7355).
|
||||
- onepassword lookup plugin - introduce ``account_id`` option which allows specifying which account to use (https://github.com/ansible-collections/community.general/pull/7308).
|
||||
- onepassword_raw lookup plugin - introduce ``account_id`` option which allows specifying which account to use (https://github.com/ansible-collections/community.general/pull/7308).
|
||||
- parted - on resize, use ``--fix`` option if available (https://github.com/ansible-collections/community.general/pull/7304).
|
||||
- pnpm - set correct version when state is latest or version is not mentioned. Resolves previous idempotency problem (https://github.com/ansible-collections/community.general/pull/7339).
|
||||
- proxmox - add ``vmid`` (and ``taskid`` when possible) to return values (https://github.com/ansible-collections/community.general/pull/7263).
|
||||
- random_string - added new ``ignore_similar_chars`` and ``similar_chars`` option to ignore certain chars (https://github.com/ansible-collections/community.general/pull/7242).
|
||||
- redfish_command - add new option ``update_oem_params`` for the ``MultipartHTTPPushUpdate`` command (https://github.com/ansible-collections/community.general/issues/7331).
|
||||
- redfish_config - add ``CreateVolume`` command to allow creation of volumes on servers (https://github.com/ansible-collections/community.general/pull/6813).
|
||||
- redfish_config - adding ``SetSecureBoot`` command (https://github.com/ansible-collections/community.general/pull/7129).
|
||||
- redfish_info - add support for ``GetBiosRegistries`` command (https://github.com/ansible-collections/community.general/pull/7144).
|
||||
- redfish_info - adds ``LinkStatus`` to NIC inventory (https://github.com/ansible-collections/community.general/pull/7318).
|
||||
- redis_info - refactor the redis_info module to use the redis module_utils enabling to pass TLS parameters to the Redis client (https://github.com/ansible-collections/community.general/pull/7267).
|
||||
- supervisorctl - allow to stop matching running processes before removing them with ``stop_before_removing=true`` (https://github.com/ansible-collections/community.general/pull/7284).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- The next major release, community.general 8.0.0, will drop support for ansible-core 2.11 and 2.12, which have been End of Life for some time now. This means that this collection no longer supports Python 2.6 on the target. Individual content might still work with unsupported ansible-core versions, but that can change at any time. Also please note that from now on, for every new major community.general release, we will drop support for all ansible-core versions that have been End of Life for more than a few weeks on the date of the major release (https://github.com/ansible-community/community-topics/discussions/271, https://github.com/ansible-collections/community.general/pull/7259).
|
||||
- redfish_info, redfish_config, redfish_command - the default value ``10`` for the ``timeout`` option is deprecated and will change to ``60`` in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/7295).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gitlab_group_variable - deleted all variables when used with ``purge=true`` due to missing ``raw`` property in KNOWN attributes (https://github.com/ansible-collections/community.general/issues/7250).
|
||||
- gitlab_project_variable - deleted all variables when used with ``purge=true`` due to missing ``raw`` property in KNOWN attributes (https://github.com/ansible-collections/community.general/issues/7250).
|
||||
- ldap_search - fix string normalization and the ``base64_attributes`` option on Python 3 (https://github.com/ansible-collections/community.general/issues/5704, https://github.com/ansible-collections/community.general/pull/7264).
|
||||
- lxc connection plugin - properly evaluate options (https://github.com/ansible-collections/community.general/pull/7369).
|
||||
- mail - skip headers containing equals characters due to missing ``maxsplit`` on header key/value parsing (https://github.com/ansible-collections/community.general/pull/7303).
|
||||
- nmap inventory plugin - fix ``get_option`` calls (https://github.com/ansible-collections/community.general/pull/7323).
|
||||
- onepassword - fix KeyError exception when trying to access value of a field that is not filled out in OnePassword item (https://github.com/ansible-collections/community.general/pull/7241).
|
||||
- snap - change the change detection mechanism from "parsing installation" to "comparing end state with initial state" (https://github.com/ansible-collections/community.general/pull/7340, https://github.com/ansible-collections/community.general/issues/7265).
|
||||
- terraform - prevents ``-backend-config`` option double encapsulating with ``shlex_quote`` function. (https://github.com/ansible-collections/community.general/pull/7301).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- consul_role - Manipulate Consul roles
|
||||
- gio_mime - Set default handler for MIME type, for applications using Gnome GIO
|
||||
- keycloak_authz_custom_policy - Allows administration of Keycloak client custom Javascript policies via Keycloak API
|
||||
- keycloak_realm_key - Allows administration of Keycloak realm keys via Keycloak API
|
||||
- simpleinit_msb - Manage services on Source Mage GNU/Linux
|
||||
|
||||
v7.4.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- cobbler inventory plugin - add ``exclude_mgmt_classes`` and ``include_mgmt_classes`` options to exclude or include hosts based on management classes (https://github.com/ansible-collections/community.general/pull/7184).
|
||||
- cpanm - minor refactor when creating the ``CmdRunner`` object (https://github.com/ansible-collections/community.general/pull/7231).
|
||||
- gitlab_group_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/7132).
|
||||
- gitlab_project_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/7132).
|
||||
- jenkins_build - add new ``detach`` option, which allows the module to exit successfully as long as the build is created (default functionality is still waiting for the build to end before exiting) (https://github.com/ansible-collections/community.general/pull/7204).
|
||||
- jenkins_build - add new ``time_between_checks`` option, which allows to configure the wait time between requests to the Jenkins server (https://github.com/ansible-collections/community.general/pull/7204).
|
||||
- make - allows ``params`` to be used without value (https://github.com/ansible-collections/community.general/pull/7180).
|
||||
- nmap inventory plugin - now has a ``use_arp_ping`` option to allow the user to disable the default ARP ping query for a more reliable form (https://github.com/ansible-collections/community.general/pull/7119).
|
||||
- pagerduty - adds in option to use v2 API for creating pagerduty incidents (https://github.com/ansible-collections/community.general/issues/6151)
|
||||
- pritunl module utils - ensure ``validate_certs`` parameter is honoured in all methods (https://github.com/ansible-collections/community.general/pull/7156).
|
||||
- redfish_info - report ``Id`` in the output of ``GetManagerInventory`` (https://github.com/ansible-collections/community.general/pull/7140).
|
||||
- redfish_utils module utils - support ``Volumes`` in response for ``GetDiskInventory`` (https://github.com/ansible-collections/community.general/pull/6819).
|
||||
- unixy callback plugin - add support for ``check_mode_markers`` option (https://github.com/ansible-collections/community.general/pull/7179).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- CmdRunner module utils - does not attempt to resolve path if executable is a relative or absolute path (https://github.com/ansible-collections/community.general/pull/7200).
|
||||
- nmap inventory plugin - now uses ``get_option`` in all cases to get its configuration information (https://github.com/ansible-collections/community.general/pull/7119).
|
||||
- nsupdate - fix a possible ``list index out of range`` exception (https://github.com/ansible-collections/community.general/issues/836).
|
||||
- oci_utils module util - fix inappropriate logical comparison expressions and makes them simpler. The previous checks had logical short circuits (https://github.com/ansible-collections/community.general/pull/7125).
|
||||
- pritunl module utils - fix incorrect URL parameter for orgnization add method (https://github.com/ansible-collections/community.general/pull/7161).
|
||||
- snap - an exception was being raised when snap list was empty (https://github.com/ansible-collections/community.general/pull/7124, https://github.com/ansible-collections/community.general/issues/7120).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- jenkins_build_info - Get information about Jenkins builds
|
||||
- pnpm - Manage node.js packages with pnpm
|
||||
|
||||
v7.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- chroot connection plugin - add ``disable_root_check`` option (https://github.com/ansible-collections/community.general/pull/7099).
|
||||
- ejabberd_user - module now using ``CmdRunner`` to execute external command (https://github.com/ansible-collections/community.general/pull/7075).
|
||||
- ipa_config - add module parameters to manage FreeIPA user and group objectclasses (https://github.com/ansible-collections/community.general/pull/7019).
|
||||
- ipa_config - adds ``idp`` choice to ``ipauserauthtype`` parameter's choices (https://github.com/ansible-collections/community.general/pull/7051).
|
||||
- npm - module now using ``CmdRunner`` to execute external commands (https://github.com/ansible-collections/community.general/pull/6989).
|
||||
- proxmox_kvm - enabled force restart of VM, bringing the ``force`` parameter functionality in line with what is described in the docs (https://github.com/ansible-collections/community.general/pull/6914).
|
||||
- proxmox_vm_info - ``node`` parameter is no longer required. Information can be obtained for the whole cluster (https://github.com/ansible-collections/community.general/pull/6976).
|
||||
- proxmox_vm_info - non-existing provided by name/vmid VM would return empty results instead of failing (https://github.com/ansible-collections/community.general/pull/7049).
|
||||
- redfish_config - add ``DeleteAllVolumes`` command to allow deletion of all volumes on servers (https://github.com/ansible-collections/community.general/pull/6814).
|
||||
- redfish_utils - use ``Controllers`` key in redfish data to obtain Storage controllers properties (https://github.com/ansible-collections/community.general/pull/7081).
|
||||
- redfish_utils module utils - add support for ``PowerCycle`` reset type for ``redfish_command`` responses feature (https://github.com/ansible-collections/community.general/issues/7083).
|
||||
- redfish_utils module utils - add support for following ``@odata.nextLink`` pagination in ``software_inventory`` responses feature (https://github.com/ansible-collections/community.general/pull/7020).
|
||||
- shutdown - use ``shutdown -p ...`` with FreeBSD to halt and power off machine (https://github.com/ansible-collections/community.general/pull/7102).
|
||||
- sorcery - add grimoire (repository) management support (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- ejabberd_user - deprecate the parameter ``logging`` in favour of producing more detailed information in the module output (https://github.com/ansible-collections/community.general/pull/7043).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- bitwarden lookup plugin - the plugin made assumptions about the structure of a Bitwarden JSON object which may have been broken by an update in the Bitwarden API. Remove assumptions, and allow queries for general fields such as ``notes`` (https://github.com/ansible-collections/community.general/pull/7061).
|
||||
- ejabberd_user - module was failing to detect whether user was already created and/or password was changed (https://github.com/ansible-collections/community.general/pull/7033).
|
||||
- keycloak module util - fix missing ``http_agent``, ``timeout``, and ``validate_certs`` ``open_url()`` parameters (https://github.com/ansible-collections/community.general/pull/7067).
|
||||
- keycloak_client inventory plugin - fix missing client secret (https://github.com/ansible-collections/community.general/pull/6931).
|
||||
- lvol - add support for percentage of origin size specification when creating snapshot volumes (https://github.com/ansible-collections/community.general/issues/1630, https://github.com/ansible-collections/community.general/pull/7053).
|
||||
- lxc connection plugin - now handles ``remote_addr`` defaulting to ``inventory_hostname`` correctly (https://github.com/ansible-collections/community.general/pull/7104).
|
||||
- oci_utils module utils - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- proxmox_user_info - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- snap - fix crash when multiple snaps are specified and one has ``---`` in its description (https://github.com/ansible-collections/community.general/pull/7046).
|
||||
- sorcery - fix interruption of the multi-stage process (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
- sorcery - fix queue generation before the whole system rebuild (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
- sorcery - latest state no longer triggers update_cache (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
|
||||
v7.2.1
|
||||
======
|
||||
|
||||
@@ -422,7 +573,7 @@ Deprecated Features
|
||||
we have not heard about anyone using them in those setups. Hence, these
|
||||
modules are deprecated, and will be removed in community.general 10.0.0
|
||||
in case there are no reports about being still useful, and potentially
|
||||
noone that steps up to maintain them
|
||||
no one that steps up to maintain them
|
||||
(https://github.com/ansible-collections/community.general/pull/6493).
|
||||
|
||||
Removed Features (previously deprecated)
|
||||
|
||||
@@ -24,7 +24,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14, ansible-core 2.15 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14, ansible-core 2.15, ansible-core 2.16 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
|
||||
Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
|
||||
|
||||
@@ -34,13 +34,13 @@ Some modules and plugins require external libraries. Please check the requiremen
|
||||
|
||||
## Included content
|
||||
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
|
||||
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
|
||||
If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool:
|
||||
|
||||
ansible-galaxy collection install community.general
|
||||
|
||||
@@ -57,7 +57,7 @@ Note that if you install the collection manually, it will not be upgraded automa
|
||||
ansible-galaxy collection install community.general --upgrade
|
||||
```
|
||||
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
|
||||
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/):
|
||||
|
||||
```bash
|
||||
ansible-galaxy collection install community.general:==X.Y.Z
|
||||
|
||||
@@ -331,7 +331,7 @@ releases:
|
||||
|
||||
in case there are no reports about being still useful, and potentially
|
||||
|
||||
noone that steps up to maintain them
|
||||
no one that steps up to maintain them
|
||||
|
||||
(https://github.com/ansible-collections/community.general/pull/6493).
|
||||
|
||||
@@ -1295,3 +1295,269 @@ releases:
|
||||
- 6983-rundeck-fix-typerrror-on-404-api-response.yml
|
||||
- 7.2.1.yml
|
||||
release_date: '2023-07-31'
|
||||
7.3.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- bitwarden lookup plugin - the plugin made assumptions about the structure
|
||||
of a Bitwarden JSON object which may have been broken by an update in the
|
||||
Bitwarden API. Remove assumptions, and allow queries for general fields such
|
||||
as ``notes`` (https://github.com/ansible-collections/community.general/pull/7061).
|
||||
- ejabberd_user - module was failing to detect whether user was already created
|
||||
and/or password was changed (https://github.com/ansible-collections/community.general/pull/7033).
|
||||
- keycloak module util - fix missing ``http_agent``, ``timeout``, and ``validate_certs``
|
||||
``open_url()`` parameters (https://github.com/ansible-collections/community.general/pull/7067).
|
||||
- keycloak_client inventory plugin - fix missing client secret (https://github.com/ansible-collections/community.general/pull/6931).
|
||||
- lvol - add support for percentage of origin size specification when creating
|
||||
snapshot volumes (https://github.com/ansible-collections/community.general/issues/1630,
|
||||
https://github.com/ansible-collections/community.general/pull/7053).
|
||||
- lxc connection plugin - now handles ``remote_addr`` defaulting to ``inventory_hostname``
|
||||
correctly (https://github.com/ansible-collections/community.general/pull/7104).
|
||||
- oci_utils module utils - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- proxmox_user_info - avoid direct type comparisons (https://github.com/ansible-collections/community.general/pull/7085).
|
||||
- snap - fix crash when multiple snaps are specified and one has ``---`` in
|
||||
its description (https://github.com/ansible-collections/community.general/pull/7046).
|
||||
- sorcery - fix interruption of the multi-stage process (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
- sorcery - fix queue generation before the whole system rebuild (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
- sorcery - latest state no longer triggers update_cache (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
deprecated_features:
|
||||
- ejabberd_user - deprecate the parameter ``logging`` in favour of producing
|
||||
more detailed information in the module output (https://github.com/ansible-collections/community.general/pull/7043).
|
||||
minor_changes:
|
||||
- chroot connection plugin - add ``disable_root_check`` option (https://github.com/ansible-collections/community.general/pull/7099).
|
||||
- ejabberd_user - module now using ``CmdRunner`` to execute external command
|
||||
(https://github.com/ansible-collections/community.general/pull/7075).
|
||||
- ipa_config - add module parameters to manage FreeIPA user and group objectclasses
|
||||
(https://github.com/ansible-collections/community.general/pull/7019).
|
||||
- ipa_config - adds ``idp`` choice to ``ipauserauthtype`` parameter's choices
|
||||
(https://github.com/ansible-collections/community.general/pull/7051).
|
||||
- npm - module now using ``CmdRunner`` to execute external commands (https://github.com/ansible-collections/community.general/pull/6989).
|
||||
- proxmox_kvm - enabled force restart of VM, bringing the ``force`` parameter
|
||||
functionality in line with what is described in the docs (https://github.com/ansible-collections/community.general/pull/6914).
|
||||
- proxmox_vm_info - ``node`` parameter is no longer required. Information can
|
||||
be obtained for the whole cluster (https://github.com/ansible-collections/community.general/pull/6976).
|
||||
- proxmox_vm_info - non-existing provided by name/vmid VM would return empty
|
||||
results instead of failing (https://github.com/ansible-collections/community.general/pull/7049).
|
||||
- redfish_config - add ``DeleteAllVolumes`` command to allow deletion of all
|
||||
volumes on servers (https://github.com/ansible-collections/community.general/pull/6814).
|
||||
- redfish_utils - use ``Controllers`` key in redfish data to obtain Storage
|
||||
controllers properties (https://github.com/ansible-collections/community.general/pull/7081).
|
||||
- redfish_utils module utils - add support for ``PowerCycle`` reset type for
|
||||
``redfish_command`` responses feature (https://github.com/ansible-collections/community.general/issues/7083).
|
||||
- redfish_utils module utils - add support for following ``@odata.nextLink``
|
||||
pagination in ``software_inventory`` responses feature (https://github.com/ansible-collections/community.general/pull/7020).
|
||||
- shutdown - use ``shutdown -p ...`` with FreeBSD to halt and power off machine
|
||||
(https://github.com/ansible-collections/community.general/pull/7102).
|
||||
- sorcery - add grimoire (repository) management support (https://github.com/ansible-collections/community.general/pull/7012).
|
||||
release_summary: Feature and bugfix release.
|
||||
fragments:
|
||||
- 6814-redfish-config-add-delete-all-volumes.yml
|
||||
- 6914-proxmox_kvm-enable-force-restart.yml
|
||||
- 6931-keycloak_client-inventory-bugfix.yml
|
||||
- 6976-proxmox-vm-info-not-require-node.yml
|
||||
- 6989-npm-cmdrunner.yml
|
||||
- 7.3.0.yml
|
||||
- 7012-sorcery-grimoire-mgmt.yml
|
||||
- 7019-ipa_config-user-and-group-objectclasses.yml
|
||||
- 7020-redfish-utils-pagination.yml
|
||||
- 7033-ejabberd-user-bugs.yml
|
||||
- 7043-ejabberd-user-deprecate-logging.yml
|
||||
- 7046-snap-newline-before-separator.yml
|
||||
- 7049-proxmox-vm-info-empty-results.yml
|
||||
- 7051-ipa-config-new-choice-idp-to-ipauserauthtype.yml
|
||||
- 7061-fix-bitwarden-get_field.yml
|
||||
- 7067-keycloak-api-paramerter-fix.yml
|
||||
- 7075-ejabberd-user-cmdrunner.yml
|
||||
- 7081-redfish-utils-fix-for-storagecontrollers-deprecated-key.yaml
|
||||
- 7085-sanity.yml
|
||||
- 7099-chroot-disable-root-check-option.yml
|
||||
- 7102-freebsd-shutdown-p.yml
|
||||
- 7104_fix_lxc_remoteaddr_default.yml
|
||||
- 7113-redfish-utils-power-cycle.yml
|
||||
- lvol-pct-of-origin.yml
|
||||
release_date: '2023-08-15'
|
||||
7.4.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- CmdRunner module utils - does not attempt to resolve path if executable is
|
||||
a relative or absolute path (https://github.com/ansible-collections/community.general/pull/7200).
|
||||
- nmap inventory plugin - now uses ``get_option`` in all cases to get its configuration
|
||||
information (https://github.com/ansible-collections/community.general/pull/7119).
|
||||
- nsupdate - fix a possible ``list index out of range`` exception (https://github.com/ansible-collections/community.general/issues/836).
|
||||
- oci_utils module util - fix inappropriate logical comparison expressions and
|
||||
makes them simpler. The previous checks had logical short circuits (https://github.com/ansible-collections/community.general/pull/7125).
|
||||
- pritunl module utils - fix incorrect URL parameter for orgnization add method
|
||||
(https://github.com/ansible-collections/community.general/pull/7161).
|
||||
- snap - an exception was being raised when snap list was empty (https://github.com/ansible-collections/community.general/pull/7124,
|
||||
https://github.com/ansible-collections/community.general/issues/7120).
|
||||
minor_changes:
|
||||
- cobbler inventory plugin - add ``exclude_mgmt_classes`` and ``include_mgmt_classes``
|
||||
options to exclude or include hosts based on management classes (https://github.com/ansible-collections/community.general/pull/7184).
|
||||
- cpanm - minor refactor when creating the ``CmdRunner`` object (https://github.com/ansible-collections/community.general/pull/7231).
|
||||
- gitlab_group_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/7132).
|
||||
- gitlab_project_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/7132).
|
||||
- jenkins_build - add new ``detach`` option, which allows the module to exit
|
||||
successfully as long as the build is created (default functionality is still
|
||||
waiting for the build to end before exiting) (https://github.com/ansible-collections/community.general/pull/7204).
|
||||
- jenkins_build - add new ``time_between_checks`` option, which allows to configure
|
||||
the wait time between requests to the Jenkins server (https://github.com/ansible-collections/community.general/pull/7204).
|
||||
- make - allows ``params`` to be used without value (https://github.com/ansible-collections/community.general/pull/7180).
|
||||
- nmap inventory plugin - now has a ``use_arp_ping`` option to allow the user
|
||||
to disable the default ARP ping query for a more reliable form (https://github.com/ansible-collections/community.general/pull/7119).
|
||||
- pagerduty - adds in option to use v2 API for creating pagerduty incidents
|
||||
(https://github.com/ansible-collections/community.general/issues/6151)
|
||||
- pritunl module utils - ensure ``validate_certs`` parameter is honoured in
|
||||
all methods (https://github.com/ansible-collections/community.general/pull/7156).
|
||||
- redfish_info - report ``Id`` in the output of ``GetManagerInventory`` (https://github.com/ansible-collections/community.general/pull/7140).
|
||||
- redfish_utils module utils - support ``Volumes`` in response for ``GetDiskInventory``
|
||||
(https://github.com/ansible-collections/community.general/pull/6819).
|
||||
- unixy callback plugin - add support for ``check_mode_markers`` option (https://github.com/ansible-collections/community.general/pull/7179).
|
||||
release_summary: Bugfix and feature release.
|
||||
fragments:
|
||||
- 6819-redfish-utils-add-links-parameter-for-get_disk_inventory.yml
|
||||
- 7.4.0.yml
|
||||
- 7118-nmap_inv_plugin_no_arp_option.yml
|
||||
- 7124-snap-empty-list.yml
|
||||
- 7125-fix-inappropriate-comparison.yml
|
||||
- 7132-gitlab-raw-variables.yml
|
||||
- 7140-id-getmanagerinv-output.yml
|
||||
- 7156-ensure-validate-certs-parameter-is-honoured.yml
|
||||
- 7161-fix-incorrect-post-parameter.yml
|
||||
- 7179-unixy-support-checkmode-markers.yml
|
||||
- 7180-make_params_without_value.yml
|
||||
- 7184-cobbler-mgmt-classes.yml
|
||||
- 7200-cmd-runner-abs-path.yml
|
||||
- 7219-fix-nsupdate-cname.yaml
|
||||
- 7231-cpanm-adjustments.yml
|
||||
- improvements-to-jenkins-build-module.yml
|
||||
- update-v2-pagerduty-alert.yml
|
||||
modules:
|
||||
- description: Get information about Jenkins builds
|
||||
name: jenkins_build_info
|
||||
namespace: ''
|
||||
- description: Manage node.js packages with pnpm
|
||||
name: pnpm
|
||||
namespace: ''
|
||||
release_date: '2023-09-11'
|
||||
7.5.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gitlab_group_variable - deleted all variables when used with ``purge=true``
|
||||
due to missing ``raw`` property in KNOWN attributes (https://github.com/ansible-collections/community.general/issues/7250).
|
||||
- gitlab_project_variable - deleted all variables when used with ``purge=true``
|
||||
due to missing ``raw`` property in KNOWN attributes (https://github.com/ansible-collections/community.general/issues/7250).
|
||||
- ldap_search - fix string normalization and the ``base64_attributes`` option
|
||||
on Python 3 (https://github.com/ansible-collections/community.general/issues/5704,
|
||||
https://github.com/ansible-collections/community.general/pull/7264).
|
||||
- lxc connection plugin - properly evaluate options (https://github.com/ansible-collections/community.general/pull/7369).
|
||||
- mail - skip headers containing equals characters due to missing ``maxsplit``
|
||||
on header key/value parsing (https://github.com/ansible-collections/community.general/pull/7303).
|
||||
- nmap inventory plugin - fix ``get_option`` calls (https://github.com/ansible-collections/community.general/pull/7323).
|
||||
- onepassword - fix KeyError exception when trying to access value of a field
|
||||
that is not filled out in OnePassword item (https://github.com/ansible-collections/community.general/pull/7241).
|
||||
- snap - change the change detection mechanism from "parsing installation" to
|
||||
"comparing end state with initial state" (https://github.com/ansible-collections/community.general/pull/7340,
|
||||
https://github.com/ansible-collections/community.general/issues/7265).
|
||||
- terraform - prevents ``-backend-config`` option double encapsulating with
|
||||
``shlex_quote`` function. (https://github.com/ansible-collections/community.general/pull/7301).
|
||||
deprecated_features:
|
||||
- The next major release, community.general 8.0.0, will drop support for ansible-core
|
||||
2.11 and 2.12, which have been End of Life for some time now. This means that
|
||||
this collection no longer supports Python 2.6 on the target. Individual content
|
||||
might still work with unsupported ansible-core versions, but that can change
|
||||
at any time. Also please note that from now on, for every new major community.general
|
||||
release, we will drop support for all ansible-core versions that have been
|
||||
End of Life for more than a few weeks on the date of the major release (https://github.com/ansible-community/community-topics/discussions/271,
|
||||
https://github.com/ansible-collections/community.general/pull/7259).
|
||||
- redfish_info, redfish_config, redfish_command - the default value ``10`` for
|
||||
the ``timeout`` option is deprecated and will change to ``60`` in community.general
|
||||
9.0.0 (https://github.com/ansible-collections/community.general/pull/7295).
|
||||
minor_changes:
|
||||
- cargo - add option ``executable``, which allows user to specify path to the
|
||||
cargo binary (https://github.com/ansible-collections/community.general/pull/7352).
|
||||
- cargo - add option ``locked`` which allows user to specify install the locked
|
||||
version of dependency instead of latest compatible version (https://github.com/ansible-collections/community.general/pull/6134).
|
||||
- dig lookup plugin - add TCP option to enable the use of TCP connection during
|
||||
DNS lookup (https://github.com/ansible-collections/community.general/pull/7343).
|
||||
- 'gitlab_group - add option ``force_delete`` (default: false) which allows
|
||||
delete group even if projects exists in it (https://github.com/ansible-collections/community.general/pull/7364).'
|
||||
- ini_file - add ``ignore_spaces`` option (https://github.com/ansible-collections/community.general/pull/7273).
|
||||
- newrelic_deployment - add option ``app_name_exact_match``, which filters results
|
||||
for the exact app_name provided (https://github.com/ansible-collections/community.general/pull/7355).
|
||||
- onepassword lookup plugin - introduce ``account_id`` option which allows specifying
|
||||
which account to use (https://github.com/ansible-collections/community.general/pull/7308).
|
||||
- onepassword_raw lookup plugin - introduce ``account_id`` option which allows
|
||||
specifying which account to use (https://github.com/ansible-collections/community.general/pull/7308).
|
||||
- parted - on resize, use ``--fix`` option if available (https://github.com/ansible-collections/community.general/pull/7304).
|
||||
- pnpm - set correct version when state is latest or version is not mentioned.
|
||||
Resolves previous idempotency problem (https://github.com/ansible-collections/community.general/pull/7339).
|
||||
- proxmox - add ``vmid`` (and ``taskid`` when possible) to return values (https://github.com/ansible-collections/community.general/pull/7263).
|
||||
- random_string - added new ``ignore_similar_chars`` and ``similar_chars`` option
|
||||
to ignore certain chars (https://github.com/ansible-collections/community.general/pull/7242).
|
||||
- redfish_command - add new option ``update_oem_params`` for the ``MultipartHTTPPushUpdate``
|
||||
command (https://github.com/ansible-collections/community.general/issues/7331).
|
||||
- redfish_config - add ``CreateVolume`` command to allow creation of volumes
|
||||
on servers (https://github.com/ansible-collections/community.general/pull/6813).
|
||||
- redfish_config - adding ``SetSecureBoot`` command (https://github.com/ansible-collections/community.general/pull/7129).
|
||||
- redfish_info - add support for ``GetBiosRegistries`` command (https://github.com/ansible-collections/community.general/pull/7144).
|
||||
- redfish_info - adds ``LinkStatus`` to NIC inventory (https://github.com/ansible-collections/community.general/pull/7318).
|
||||
- redis_info - refactor the redis_info module to use the redis module_utils
|
||||
enabling to pass TLS parameters to the Redis client (https://github.com/ansible-collections/community.general/pull/7267).
|
||||
- supervisorctl - allow to stop matching running processes before removing them
|
||||
with ``stop_before_removing=true`` (https://github.com/ansible-collections/community.general/pull/7284).
|
||||
release_summary: 'Regular bugfix and feature release.
|
||||
|
||||
|
||||
Please note that this is the last minor 7.x.0 release. Further releases
|
||||
|
||||
with major version 7 will be bugfix releases 7.5.y.
|
||||
|
||||
'
|
||||
fragments:
|
||||
- 6134-add-locked-option-for-cargo.yml
|
||||
- 6813-redfish-config-add-create-volume.yml
|
||||
- 7.5.0.yml
|
||||
- 7129-adding_set_secure_boot_command_to_redfish_config.yml
|
||||
- 7144-add-getbiosregistry-command-to-redfish-info.yml
|
||||
- 7241-prevent-key-error-when-value-does-not-exist.yml
|
||||
- 7242_ignore_similar_chars.yml
|
||||
- 7251-gitlab-variables-deleteing-all-variables.yml
|
||||
- 7263-proxmox-return-vmid-and-taskid.yaml
|
||||
- 7264-ldap_search-strings.yml
|
||||
- 7267-redis_info.yml
|
||||
- 7273-ini_file_ignore_spaces.yml
|
||||
- 7284-supervisorctl-stop-before-remove.yaml
|
||||
- 7295-adding_deprecation_for_timeout_in_redfish_info_config_command.yml
|
||||
- 7301-fix-backend-config-string-encapsulation.yml
|
||||
- 7303-mail-incorrect-header-parsing.yml
|
||||
- 7304-prevent-parted-warnings.yml
|
||||
- 7308-onepassword-multi-acc.yml
|
||||
- 7318-add-linkstatus-attribute-to-nic-inventory.yml
|
||||
- 7323-nmap.yml
|
||||
- 7330-redfish-utils-oem-params.yml
|
||||
- 7339-pnpm-correct-version-when-state-latest.yml
|
||||
- 7340-snap-fix.yml
|
||||
- 7343-dig-tcp-option.yml
|
||||
- 7352-add-executable-option-for-cargo.yml
|
||||
- 7355-newrelic-deployment-add-exact-name.yml
|
||||
- 7364-add-option-force-gitlab-group.yml
|
||||
- 7369-fix-lxc-options.yml
|
||||
- deprecate-ansible-core-2.11-2.12.yml
|
||||
modules:
|
||||
- description: Manipulate Consul roles
|
||||
name: consul_role
|
||||
namespace: ''
|
||||
- description: Set default handler for MIME type, for applications using Gnome
|
||||
GIO
|
||||
name: gio_mime
|
||||
namespace: ''
|
||||
- description: Allows administration of Keycloak client custom Javascript policies
|
||||
via Keycloak API
|
||||
name: keycloak_authz_custom_policy
|
||||
namespace: ''
|
||||
- description: Allows administration of Keycloak realm keys via Keycloak API
|
||||
name: keycloak_realm_key
|
||||
namespace: ''
|
||||
- description: Manage services on Source Mage GNU/Linux
|
||||
name: simpleinit_msb
|
||||
namespace: ''
|
||||
release_date: '2023-10-09'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 7.2.1
|
||||
version: 7.5.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -45,7 +45,7 @@ class ActionModule(ActionBase):
|
||||
SHUTDOWN_COMMAND_ARGS = {
|
||||
'alpine': '',
|
||||
'void': '-h +{delay_min} "{message}"',
|
||||
'freebsd': '-h +{delay_sec}s "{message}"',
|
||||
'freebsd': '-p +{delay_sec}s "{message}"',
|
||||
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
|
||||
'macosx': '-h +{delay_min} "{message}"',
|
||||
'openbsd': '-h +{delay_min} "{message}"',
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017, Allyson Bowles <@akatch>
|
||||
# Copyright (c) 2023, Al Bowles <@akatch>
|
||||
# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
name: unixy
|
||||
type: stdout
|
||||
author: Allyson Bowles (@akatch)
|
||||
author: Al Bowles (@akatch)
|
||||
short_description: condensed Ansible output
|
||||
description:
|
||||
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.
|
||||
@@ -40,7 +40,6 @@ class CallbackModule(CallbackModule_default):
|
||||
- Only display task names if the task runs on at least one host
|
||||
- Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
|
||||
- Consolidate stats display
|
||||
- Display whether run is in --check mode
|
||||
- Don't show play name if no hosts found
|
||||
'''
|
||||
|
||||
@@ -92,19 +91,31 @@ class CallbackModule(CallbackModule_default):
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._get_task_display_name(task)
|
||||
if self.task_display_name is not None:
|
||||
self._display.display("%s..." % self.task_display_name)
|
||||
if task.check_mode and self.get_option('check_mode_markers'):
|
||||
self._display.display("%s (check mode)..." % self.task_display_name)
|
||||
else:
|
||||
self._display.display("%s..." % self.task_display_name)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self._get_task_display_name(task)
|
||||
if self.task_display_name is not None:
|
||||
self._display.display("%s (via handler)... " % self.task_display_name)
|
||||
if task.check_mode and self.get_option('check_mode_markers'):
|
||||
self._display.display("%s (via handler in check mode)... " % self.task_display_name)
|
||||
else:
|
||||
self._display.display("%s (via handler)... " % self.task_display_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
name = play.get_name().strip()
|
||||
if name and play.hosts:
|
||||
msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
|
||||
if play.check_mode and self.get_option('check_mode_markers'):
|
||||
if name and play.hosts:
|
||||
msg = u"\n- %s (in check mode) on hosts: %s -" % (name, ",".join(play.hosts))
|
||||
else:
|
||||
msg = u"- check mode -"
|
||||
else:
|
||||
msg = u"---"
|
||||
if name and play.hosts:
|
||||
msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
|
||||
else:
|
||||
msg = u"---"
|
||||
|
||||
self._display.display(msg)
|
||||
|
||||
@@ -227,8 +238,10 @@ class CallbackModule(CallbackModule_default):
|
||||
self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
# TODO display whether this run is happening in check mode
|
||||
self._display.display("Executing playbook %s" % basename(playbook._file_name))
|
||||
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
|
||||
self._display.display("Executing playbook %s in check mode" % basename(playbook._file_name))
|
||||
else:
|
||||
self._display.display("Executing playbook %s" % basename(playbook._file_name))
|
||||
|
||||
# show CLI arguments
|
||||
if self._display.verbosity > 3:
|
||||
|
||||
@@ -46,11 +46,26 @@ DOCUMENTATION = '''
|
||||
vars:
|
||||
- name: ansible_chroot_exe
|
||||
default: chroot
|
||||
disable_root_check:
|
||||
description:
|
||||
- Do not check that the user is not root.
|
||||
ini:
|
||||
- section: chroot_connection
|
||||
key: disable_root_check
|
||||
env:
|
||||
- name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK
|
||||
vars:
|
||||
- name: ansible_chroot_disable_root_check
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 7.3.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
# Static inventory file
|
||||
# Plugin requires root privileges for chroot, -E preserves your env (and location of ~/.ansible):
|
||||
# sudo -E ansible-playbook ...
|
||||
#
|
||||
# Static inventory file
|
||||
# [chroots]
|
||||
# /path/to/debootstrap
|
||||
# /path/to/feboostrap
|
||||
@@ -100,11 +115,7 @@ class Connection(ConnectionBase):
|
||||
|
||||
self.chroot = self._play_context.remote_addr
|
||||
|
||||
if os.geteuid() != 0:
|
||||
raise AnsibleError("chroot connection requires running as root")
|
||||
|
||||
# we're running as root on the local system so do some
|
||||
# trivial checks for ensuring 'host' is actually a chroot'able dir
|
||||
# do some trivial checks for ensuring 'host' is actually a chroot'able dir
|
||||
if not os.path.isdir(self.chroot):
|
||||
raise AnsibleError("%s is not a directory" % self.chroot)
|
||||
|
||||
@@ -118,6 +129,11 @@ class Connection(ConnectionBase):
|
||||
|
||||
def _connect(self):
|
||||
""" connect to the chroot """
|
||||
if not self.get_option('disable_root_check') and os.geteuid() != 0:
|
||||
raise AnsibleError(
|
||||
"chroot connection requires running as root. "
|
||||
"You can override this check with the `disable_root_check` option.")
|
||||
|
||||
if os.path.isabs(self.get_option('chroot_exe')):
|
||||
self.chroot_cmd = self.get_option('chroot_exe')
|
||||
else:
|
||||
|
||||
@@ -19,6 +19,7 @@ DOCUMENTATION = '''
|
||||
- Container identifier
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_lxc_host
|
||||
executable:
|
||||
@@ -59,7 +60,7 @@ class Connection(ConnectionBase):
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self.container_name = self._play_context.remote_addr
|
||||
self.container_name = None
|
||||
self.container = None
|
||||
|
||||
def _connect(self):
|
||||
@@ -67,12 +68,14 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self)._connect()
|
||||
|
||||
if not HAS_LIBLXC:
|
||||
msg = "lxc bindings for python2 are not installed"
|
||||
msg = "lxc python bindings are not installed"
|
||||
raise errors.AnsibleError(msg)
|
||||
|
||||
if self.container:
|
||||
return
|
||||
|
||||
self.container_name = self.get_option('remote_addr')
|
||||
|
||||
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
|
||||
self.container = _lxc.Container(self.container_name)
|
||||
if self.container.state == "STOPPED":
|
||||
@@ -117,7 +120,7 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# python2-lxc needs bytes. python3-lxc needs text.
|
||||
executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
|
||||
executable = to_native(self.get_option('executable'), errors='surrogate_or_strict')
|
||||
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
|
||||
|
||||
read_stdout, write_stdout = None, None
|
||||
|
||||
@@ -56,7 +56,7 @@ EXAMPLES = '''
|
||||
- name: Parse a CSV file's contents
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ csv_data | community.genera.from_csv(dialect='unix') }}
|
||||
{{ csv_data | community.general.from_csv(dialect='unix') }}
|
||||
vars:
|
||||
csv_data: |
|
||||
Column 1,Value
|
||||
|
||||
@@ -42,6 +42,12 @@ DOCUMENTATION = '''
|
||||
description: Fallback to cached results if connection to cobbler fails.
|
||||
type: boolean
|
||||
default: false
|
||||
exclude_mgmt_classes:
|
||||
description: Management classes to exclude from inventory.
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
version_added: 7.4.0
|
||||
exclude_profiles:
|
||||
description:
|
||||
- Profiles to exclude from inventory.
|
||||
@@ -49,6 +55,12 @@ DOCUMENTATION = '''
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
include_mgmt_classes:
|
||||
description: Management classes to include from inventory.
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
version_added: 7.4.0
|
||||
include_profiles:
|
||||
description:
|
||||
- Profiles to include from inventory.
|
||||
@@ -216,6 +228,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes')
|
||||
self.include_mgmt_classes = self.get_option('include_mgmt_classes')
|
||||
self.exclude_profiles = self.get_option('exclude_profiles')
|
||||
self.include_profiles = self.get_option('include_profiles')
|
||||
self.group_by = self.get_option('group_by')
|
||||
@@ -265,9 +279,16 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
hostname = host['hostname'] # None
|
||||
interfaces = host['interfaces']
|
||||
|
||||
if self._exclude_profile(host['profile']):
|
||||
self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
|
||||
continue
|
||||
if set(host['mgmt_classes']) & set(self.include_mgmt_classes):
|
||||
self.display.vvvv('Including host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
|
||||
else:
|
||||
if self._exclude_profile(host['profile']):
|
||||
self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
|
||||
continue
|
||||
|
||||
if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes):
|
||||
self.display.vvvv('Excluding host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
|
||||
continue
|
||||
|
||||
# hostname is often empty for non-static IP hosts
|
||||
if hostname == '':
|
||||
|
||||
@@ -47,7 +47,7 @@ DOCUMENTATION = r'''
|
||||
- You need to set this password on the lxd server before
|
||||
running this module using the following command
|
||||
C(lxc config set core.trust_password <some random password>)
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
|
||||
- If O(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||
type: str
|
||||
state:
|
||||
@@ -359,7 +359,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Kwargs:
|
||||
None
|
||||
Source:
|
||||
https://github.com/lxc/lxd/blob/master/doc/rest-api.md
|
||||
https://documentation.ubuntu.com/lxd/en/latest/rest-api/
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
|
||||
@@ -85,6 +85,11 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 6.1.0
|
||||
use_arp_ping:
|
||||
description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
|
||||
type: boolean
|
||||
default: true
|
||||
version_added: 7.4.0
|
||||
notes:
|
||||
- At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
|
||||
- 'TODO: add OS fingerprinting'
|
||||
@@ -196,40 +201,43 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
|
||||
if self._options['sudo']:
|
||||
if self.get_option('sudo'):
|
||||
cmd.insert(0, 'sudo')
|
||||
|
||||
if self._options['port']:
|
||||
if self.get_option('port'):
|
||||
cmd.append('-p')
|
||||
cmd.append(self._options['port'])
|
||||
cmd.append(self.get_option('port'))
|
||||
|
||||
if not self._options['ports']:
|
||||
if not self.get_option('ports'):
|
||||
cmd.append('-sP')
|
||||
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
if self.get_option('ipv4') and not self.get_option('ipv6'):
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
elif self.get_option('ipv6') and not self.get_option('ipv4'):
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
elif not self.get_option('ipv6') and not self.get_option('ipv4'):
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
|
||||
if self._options['exclude']:
|
||||
if self.get_option('exclude'):
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
cmd.append(','.join(self.get_option('exclude')))
|
||||
|
||||
if self._options['dns_resolve']:
|
||||
if self.get_option('dns_resolve'):
|
||||
cmd.append('-n')
|
||||
|
||||
if self._options['udp_scan']:
|
||||
if self.get_option('udp_scan'):
|
||||
cmd.append('-sU')
|
||||
|
||||
if self._options['icmp_timestamp']:
|
||||
if self.get_option('icmp_timestamp'):
|
||||
cmd.append('-PP')
|
||||
|
||||
if self._options['open']:
|
||||
if self.get_option('open'):
|
||||
cmd.append('--open')
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
if not self.get_option('use_arp_ping'):
|
||||
cmd.append('--disable-arp-ping')
|
||||
|
||||
cmd.append(self.get_option('address'))
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
|
||||
@@ -132,20 +132,29 @@ class Bitwarden(object):
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value, search_field, collection_id)
|
||||
|
||||
if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
|
||||
return [match['login'][field] for match in matches]
|
||||
elif not field:
|
||||
if not field:
|
||||
return matches
|
||||
else:
|
||||
custom_field_matches = []
|
||||
for match in matches:
|
||||
field_matches = []
|
||||
for match in matches:
|
||||
# if there are no custom fields, then `match` has no key 'fields'
|
||||
if 'fields' in match:
|
||||
custom_field_found = False
|
||||
for custom_field in match['fields']:
|
||||
if custom_field['name'] == field:
|
||||
custom_field_matches.append(custom_field['value'])
|
||||
if matches and not custom_field_matches:
|
||||
raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return custom_field_matches
|
||||
if field == custom_field['name']:
|
||||
field_matches.append(custom_field['value'])
|
||||
custom_field_found = True
|
||||
break
|
||||
if custom_field_found:
|
||||
continue
|
||||
if 'login' in match and field in match['login']:
|
||||
field_matches.append(match['login'][field])
|
||||
continue
|
||||
if field in match:
|
||||
field_matches.append(match[field])
|
||||
continue
|
||||
if matches and not field_matches:
|
||||
raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return field_matches
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
@@ -70,6 +70,11 @@ DOCUMENTATION = '''
|
||||
- "Class."
|
||||
type: str
|
||||
default: 'IN'
|
||||
tcp:
|
||||
description: Use TCP to lookup DNS records.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 7.5.0
|
||||
notes:
|
||||
- ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
|
||||
- While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
|
||||
@@ -329,6 +334,7 @@ class LookupModule(LookupBase):
|
||||
flat = self.get_option('flat')
|
||||
fail_on_error = self.get_option('fail_on_error')
|
||||
real_empty = self.get_option('real_empty')
|
||||
tcp = self.get_option('tcp')
|
||||
try:
|
||||
rdclass = dns.rdataclass.from_text(self.get_option('class'))
|
||||
except Exception as e:
|
||||
@@ -375,6 +381,8 @@ class LookupModule(LookupBase):
|
||||
fail_on_error = boolean(arg)
|
||||
elif opt == 'real_empty':
|
||||
real_empty = boolean(arg)
|
||||
elif opt == 'tcp':
|
||||
tcp = boolean(arg)
|
||||
|
||||
continue
|
||||
|
||||
@@ -408,7 +416,7 @@ class LookupModule(LookupBase):
|
||||
|
||||
for domain in domains:
|
||||
try:
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass)
|
||||
answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp)
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
if qtype.upper() == 'TXT':
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
|
||||
# Copyright (c) 2018, Scott Buchanan <scott@buchanan.works>
|
||||
# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -38,6 +38,10 @@ DOCUMENTATION = '''
|
||||
type: str
|
||||
subdomain:
|
||||
description: The 1Password subdomain to authenticate against.
|
||||
account_id:
|
||||
description: The account ID to target.
|
||||
type: str
|
||||
version_added: 7.5.0
|
||||
username:
|
||||
description: The username used to sign in.
|
||||
secret_key:
|
||||
@@ -55,6 +59,7 @@ DOCUMENTATION = '''
|
||||
performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the
|
||||
C(master_password) is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
|
||||
- This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
|
||||
- Can target a specific account by providing the O(account_id).
|
||||
- Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
|
||||
needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
|
||||
to the 1Password master password.
|
||||
@@ -93,6 +98,12 @@ EXAMPLES = """
|
||||
master_password=vault_master_password,
|
||||
username='tweety@acme.com',
|
||||
secret_key=vault_secret_key)
|
||||
|
||||
- name: Retrieve password from specific account
|
||||
ansible.builtin.debug:
|
||||
var: lookup('community.general.onepassword',
|
||||
'HAL 9000',
|
||||
account_id='abc123')
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -119,13 +130,23 @@ from ansible_collections.community.general.plugins.module_utils.onepassword impo
|
||||
class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
|
||||
bin = "op"
|
||||
|
||||
def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None, service_account_token=None):
|
||||
def __init__(
|
||||
self,
|
||||
subdomain=None,
|
||||
domain="1password.com",
|
||||
username=None,
|
||||
secret_key=None,
|
||||
master_password=None,
|
||||
service_account_token=None,
|
||||
account_id=None,
|
||||
):
|
||||
self.subdomain = subdomain
|
||||
self.domain = domain
|
||||
self.username = username
|
||||
self.master_password = master_password
|
||||
self.secret_key = secret_key
|
||||
self.service_account_token = service_account_token
|
||||
self.account_id = account_id
|
||||
|
||||
self._path = None
|
||||
self._version = None
|
||||
@@ -293,7 +314,9 @@ class OnePassCLIv1(OnePassCLIBase):
|
||||
|
||||
def assert_logged_in(self):
|
||||
args = ["get", "account"]
|
||||
if self.subdomain:
|
||||
if self.account_id:
|
||||
args.extend(["--account", self.account_id])
|
||||
elif self.subdomain:
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
@@ -326,6 +349,10 @@ class OnePassCLIv1(OnePassCLIBase):
|
||||
|
||||
def get_raw(self, item_id, vault=None, token=None):
|
||||
args = ["get", "item", item_id]
|
||||
|
||||
if self.account_id:
|
||||
args.extend(["--account", self.account_id])
|
||||
|
||||
if vault is not None:
|
||||
args += ["--vault={0}".format(vault)]
|
||||
|
||||
@@ -462,10 +489,10 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
# If the field name doesn't exist in the section, match on the value of "label"
|
||||
# then "id" and return "value"
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
# Look at the section data and get an indentifier. The value of 'id' is either a unique ID
|
||||
# or a human-readable string. If a 'label' field exists, prefer that since
|
||||
@@ -475,10 +502,10 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
if section_title == current_section_title:
|
||||
# In the correct section. Check "label" then "id" for the desired field_name
|
||||
if field.get("label") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
if field.get("id") == field_name:
|
||||
return field["value"]
|
||||
return field.get("value", "")
|
||||
|
||||
return ""
|
||||
|
||||
@@ -502,7 +529,9 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
# an interactive prompt. Only run 'op account get' after first listing accounts to see
|
||||
# if there are any previously configured accounts.
|
||||
args = ["account", "get"]
|
||||
if self.subdomain:
|
||||
if self.account_id:
|
||||
args.extend(["--account", self.account_id])
|
||||
elif self.subdomain:
|
||||
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
|
||||
args.extend(["--account", account])
|
||||
|
||||
@@ -533,6 +562,10 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
|
||||
def get_raw(self, item_id, vault=None, token=None):
|
||||
args = ["item", "get", item_id, "--format", "json"]
|
||||
|
||||
if self.account_id:
|
||||
args.extend(["--account", self.account_id])
|
||||
|
||||
if vault is not None:
|
||||
args += ["--vault={0}".format(vault)]
|
||||
|
||||
@@ -559,13 +592,14 @@ class OnePassCLIv2(OnePassCLIBase):
|
||||
|
||||
class OnePass(object):
|
||||
def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None,
|
||||
service_account_token=None):
|
||||
service_account_token=None, account_id=None):
|
||||
self.subdomain = subdomain
|
||||
self.domain = domain
|
||||
self.username = username
|
||||
self.secret_key = secret_key
|
||||
self.master_password = master_password
|
||||
self.service_account_token = service_account_token
|
||||
self.account_id = account_id
|
||||
|
||||
self.logged_in = False
|
||||
self.token = None
|
||||
@@ -578,7 +612,7 @@ class OnePass(object):
|
||||
for cls in OnePassCLIBase.__subclasses__():
|
||||
if cls.supports_version == version.split(".")[0]:
|
||||
try:
|
||||
return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token)
|
||||
return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, self.account_id)
|
||||
except TypeError as e:
|
||||
raise AnsibleLookupError(e)
|
||||
|
||||
@@ -642,8 +676,9 @@ class LookupModule(LookupBase):
|
||||
secret_key = self.get_option("secret_key")
|
||||
master_password = self.get_option("master_password")
|
||||
service_account_token = self.get_option("service_account_token")
|
||||
account_id = self.get_option("account_id")
|
||||
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password, service_account_token)
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password, service_account_token, account_id)
|
||||
op.assert_logged_in()
|
||||
|
||||
values = []
|
||||
|
||||
@@ -35,6 +35,10 @@ DOCUMENTATION = '''
|
||||
version_added: 6.0.0
|
||||
default: '1password.com'
|
||||
type: str
|
||||
account_id:
|
||||
description: The account ID to target.
|
||||
type: str
|
||||
version_added: 7.5.0
|
||||
username:
|
||||
description: The username used to sign in.
|
||||
secret_key:
|
||||
@@ -52,6 +56,7 @@ DOCUMENTATION = '''
|
||||
performed an initial sign in (meaning C(~/.op/config exists)), then only the O(master_password) is required.
|
||||
You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
|
||||
- This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
|
||||
- Can target a specific account by providing the O(account_id).
|
||||
- Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
|
||||
needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
|
||||
to the 1Password master password.
|
||||
@@ -96,8 +101,9 @@ class LookupModule(LookupBase):
|
||||
secret_key = self.get_option("secret_key")
|
||||
master_password = self.get_option("master_password")
|
||||
service_account_token = self.get_option("service_account_token")
|
||||
account_id = self.get_option("account_id")
|
||||
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password, service_account_token)
|
||||
op = OnePass(subdomain, domain, username, secret_key, master_password, service_account_token, account_id)
|
||||
op.assert_logged_in()
|
||||
|
||||
values = []
|
||||
|
||||
@@ -80,6 +80,19 @@ DOCUMENTATION = r"""
|
||||
- Override all values of O(numbers), O(upper), O(lower), and O(special) with
|
||||
the given list of characters.
|
||||
type: str
|
||||
ignore_similar_chars:
|
||||
description:
|
||||
- Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
|
||||
- These characters can be configured in O(similar_chars).
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 7.5.0
|
||||
similar_chars:
|
||||
description:
|
||||
- Overide a list of characters not to be use in the string.
|
||||
default: "il1LoO0"
|
||||
type: str
|
||||
version_added: 7.5.0
|
||||
base64:
|
||||
description:
|
||||
- Returns base64 encoded string.
|
||||
@@ -173,9 +186,17 @@ class LookupModule(LookupBase):
|
||||
length = self.get_option("length")
|
||||
base64_flag = self.get_option("base64")
|
||||
override_all = self.get_option("override_all")
|
||||
ignore_similar_chars = self.get_option("ignore_similar_chars")
|
||||
similar_chars = self.get_option("similar_chars")
|
||||
values = ""
|
||||
available_chars_set = ""
|
||||
|
||||
if ignore_similar_chars:
|
||||
number_chars = "".join([sc for sc in number_chars if sc not in similar_chars])
|
||||
lower_chars = "".join([sc for sc in lower_chars if sc not in similar_chars])
|
||||
upper_chars = "".join([sc for sc in upper_chars if sc not in similar_chars])
|
||||
special_chars = "".join([sc for sc in special_chars if sc not in similar_chars])
|
||||
|
||||
if override_all:
|
||||
# Override all the values
|
||||
available_chars_set = override_all
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
from functools import wraps
|
||||
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
@@ -204,12 +205,17 @@ class CmdRunner(object):
|
||||
environ_update = {}
|
||||
self.environ_update = environ_update
|
||||
|
||||
self.command[0] = module.get_bin_path(self.command[0], opt_dirs=path_prefix, required=True)
|
||||
_cmd = self.command[0]
|
||||
self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
|
||||
|
||||
for mod_param_name, spec in iteritems(module.argument_spec):
|
||||
if mod_param_name not in self.arg_formats:
|
||||
self.arg_formats[mod_param_name] = _Format.as_default_type(spec.get('type', 'str'), mod_param_name)
|
||||
|
||||
@property
|
||||
def binary(self):
|
||||
return self.command[0]
|
||||
|
||||
def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs):
|
||||
if output_process is None:
|
||||
output_process = _process_as_is
|
||||
|
||||
29
plugins/module_utils/consul.py
Normal file
29
plugins/module_utils/consul.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Håkon Lerring
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def get_consul_url(configuration):
|
||||
return '%s://%s:%s/v1' % (configuration.scheme,
|
||||
configuration.host, configuration.port)
|
||||
|
||||
|
||||
def get_auth_headers(configuration):
|
||||
if configuration.token is None:
|
||||
return {}
|
||||
else:
|
||||
return {'X-Consul-Token': configuration.token}
|
||||
|
||||
|
||||
class RequestError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_consul_response_error(response):
|
||||
if 400 <= response.status_code < 600:
|
||||
raise RequestError('%d %s' % (response.status_code, response.content))
|
||||
32
plugins/module_utils/gio_mime.py
Normal file
32
plugins/module_utils/gio_mime.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
def gio_mime_runner(module, **kwargs):
|
||||
return CmdRunner(
|
||||
module,
|
||||
command=['gio', 'mime'],
|
||||
arg_formats=dict(
|
||||
mime_type=cmd_runner_fmt.as_list(),
|
||||
handler=cmd_runner_fmt.as_list(),
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def gio_mime_get(runner, mime_type):
|
||||
def process(rc, out, err):
|
||||
if err.startswith("No default applications for"):
|
||||
return None
|
||||
out = out.splitlines()[0]
|
||||
return out.split()[-1]
|
||||
|
||||
with runner("mime_type", output_process=process) as ctx:
|
||||
return ctx.run(mime_type=mime_type)
|
||||
@@ -116,7 +116,7 @@ def gitlab_authentication(module):
|
||||
def filter_returned_variables(gitlab_variables):
|
||||
# pop properties we don't know
|
||||
existing_variables = [dict(x.attributes) for x in gitlab_variables]
|
||||
KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope']
|
||||
KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope', 'raw']
|
||||
for item in existing_variables:
|
||||
for key in list(item.keys()):
|
||||
if key not in KNOWN:
|
||||
@@ -135,6 +135,7 @@ def vars_to_variables(vars, module):
|
||||
"value": str(value),
|
||||
"masked": False,
|
||||
"protected": False,
|
||||
"raw": False,
|
||||
"variable_type": "env_var",
|
||||
}
|
||||
)
|
||||
@@ -145,6 +146,7 @@ def vars_to_variables(vars, module):
|
||||
"value": value.get('value'),
|
||||
"masked": value.get('masked'),
|
||||
"protected": value.get('protected'),
|
||||
"raw": value.get('raw'),
|
||||
"variable_type": value.get('variable_type'),
|
||||
}
|
||||
|
||||
|
||||
@@ -116,6 +116,9 @@ URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/re
|
||||
|
||||
URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource"
|
||||
|
||||
URL_AUTHZ_CUSTOM_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{policy_type}"
|
||||
URL_AUTHZ_CUSTOM_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -777,7 +780,8 @@ class KeycloakAPI(object):
|
||||
users_url += '?username=%s&exact=true' % username
|
||||
try:
|
||||
userrep = None
|
||||
users = json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
users = json.loads(to_native(open_url(users_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for user in users:
|
||||
if user['username'] == username:
|
||||
@@ -803,7 +807,8 @@ class KeycloakAPI(object):
|
||||
|
||||
service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
return json.loads(to_native(open_url(service_account_user_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
|
||||
@@ -1347,7 +1352,8 @@ class KeycloakAPI(object):
|
||||
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='POST', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
@@ -1370,7 +1376,8 @@ class KeycloakAPI(object):
|
||||
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
return json.loads(to_native(open_url(clientsecret_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
@@ -2678,7 +2685,9 @@ class KeycloakAPI(object):
|
||||
open_url(
|
||||
user_url,
|
||||
method='GET',
|
||||
headers=self.restheaders))
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs))
|
||||
return userrep
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not get user %s in realm %s: %s'
|
||||
@@ -2700,8 +2709,10 @@ class KeycloakAPI(object):
|
||||
realm=realm)
|
||||
open_url(users_url,
|
||||
method='POST',
|
||||
headers=self.restheaders,
|
||||
data=json.dumps(userrep))
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
data=json.dumps(userrep),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
created_user = self.get_user_by_username(
|
||||
username=userrep['username'],
|
||||
realm=realm)
|
||||
@@ -2744,8 +2755,10 @@ class KeycloakAPI(object):
|
||||
open_url(
|
||||
user_url,
|
||||
method='PUT',
|
||||
headers=self.restheaders,
|
||||
data=json.dumps(userrep))
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
data=json.dumps(userrep),
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
updated_user = self.get_user_by_id(
|
||||
user_id=userrep['id'],
|
||||
realm=realm)
|
||||
@@ -2769,7 +2782,9 @@ class KeycloakAPI(object):
|
||||
return open_url(
|
||||
user_url,
|
||||
method='DELETE',
|
||||
headers=self.restheaders)
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete user %s in realm %s: %s'
|
||||
% (user_id, realm, str(e)))
|
||||
@@ -2791,7 +2806,9 @@ class KeycloakAPI(object):
|
||||
open_url(
|
||||
user_groups_url,
|
||||
method='GET',
|
||||
headers=self.restheaders))
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs))
|
||||
for user_group in user_groups:
|
||||
groups.append(user_group["name"])
|
||||
return groups
|
||||
@@ -2816,7 +2833,9 @@ class KeycloakAPI(object):
|
||||
return open_url(
|
||||
user_group_url,
|
||||
method='PUT',
|
||||
headers=self.restheaders)
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not add user %s in group %s in realm %s: %s'
|
||||
% (user_id, group_id, realm, str(e)))
|
||||
@@ -2838,7 +2857,9 @@ class KeycloakAPI(object):
|
||||
return open_url(
|
||||
user_group_url,
|
||||
method='DELETE',
|
||||
headers=self.restheaders)
|
||||
http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not remove user %s from group %s in realm %s: %s'
|
||||
% (user_id, group_id, realm, str(e)))
|
||||
@@ -2904,6 +2925,27 @@ class KeycloakAPI(object):
|
||||
list_of_groups.append(group_dict)
|
||||
return list_of_groups
|
||||
|
||||
def create_authz_custom_policy(self, policy_type, payload, client_id, realm):
|
||||
"""Create a custom policy for a Keycloak client"""
|
||||
url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
data=json.dumps(payload), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
|
||||
|
||||
def remove_authz_custom_policy(self, policy_id, client_id, realm):
|
||||
"""Remove a custom policy from a Keycloak client"""
|
||||
url = URL_AUTHZ_CUSTOM_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
delete_url = "%s/%s" % (url, policy_id)
|
||||
|
||||
try:
|
||||
return open_url(delete_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
|
||||
|
||||
def get_authz_permission_by_name(self, name, client_id, realm):
|
||||
"""Get authorization permission by name"""
|
||||
url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm)
|
||||
|
||||
@@ -79,7 +79,7 @@ def _post_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
path="/organization",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
@@ -220,7 +220,7 @@ def post_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -248,7 +248,7 @@ def post_pritunl_user(
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -267,7 +267,7 @@ def post_pritunl_user(
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -287,7 +287,7 @@ def delete_pritunl_organization(
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
@@ -307,7 +307,7 @@ def delete_pritunl_user(
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
|
||||
@@ -561,7 +561,7 @@ def are_lists_equal(s, t):
|
||||
if s is None and t is None:
|
||||
return True
|
||||
|
||||
if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
|
||||
if s is None or t is None or (len(s) != len(t)):
|
||||
return False
|
||||
|
||||
if len(s) == 0:
|
||||
@@ -570,7 +570,7 @@ def are_lists_equal(s, t):
|
||||
s = to_dict(s)
|
||||
t = to_dict(t)
|
||||
|
||||
if type(s[0]) == dict:
|
||||
if isinstance(s[0], dict):
|
||||
# Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
|
||||
# service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
|
||||
# `service_name` which is not provided in the list of `services` by a user while making an update call; only
|
||||
@@ -604,9 +604,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
|
||||
user_provided_attr_value = module.params.get(attr, None)
|
||||
|
||||
unequal_list_attr = (
|
||||
type(resources_attr_value) == list or type(user_provided_attr_value) == list
|
||||
isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list)
|
||||
) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
|
||||
unequal_attr = type(resources_attr_value) != list and to_dict(
|
||||
unequal_attr = not isinstance(resources_attr_value, list) and to_dict(
|
||||
resources_attr_value
|
||||
) != to_dict(user_provided_attr_value)
|
||||
if unequal_list_attr or unequal_attr:
|
||||
@@ -936,9 +936,9 @@ def tuplize(d):
|
||||
list_of_tuples = []
|
||||
key_list = sorted(list(d.keys()))
|
||||
for key in key_list:
|
||||
if type(d[key]) == list:
|
||||
if isinstance(d[key], list):
|
||||
# Convert a value which is itself a list of dict to a list of tuples.
|
||||
if d[key] and type(d[key][0]) == dict:
|
||||
if d[key] and isinstance(d[key][0], dict):
|
||||
sub_tuples = []
|
||||
for sub_dict in d[key]:
|
||||
sub_tuples.append(tuplize(sub_dict))
|
||||
@@ -948,7 +948,7 @@ def tuplize(d):
|
||||
list_of_tuples.append((sub_tuples is None, key, sub_tuples))
|
||||
else:
|
||||
list_of_tuples.append((d[key] is None, key, d[key]))
|
||||
elif type(d[key]) == dict:
|
||||
elif isinstance(d[key], dict):
|
||||
tupled_value = tuplize(d[key])
|
||||
list_of_tuples.append((tupled_value is None, key, tupled_value))
|
||||
else:
|
||||
@@ -969,13 +969,13 @@ def sort_dictionary(d):
|
||||
"""
|
||||
sorted_d = {}
|
||||
for key in d:
|
||||
if type(d[key]) == list:
|
||||
if d[key] and type(d[key][0]) == dict:
|
||||
if isinstance(d[key], list):
|
||||
if d[key] and isinstance(d[key][0], dict):
|
||||
sorted_value = sort_list_of_dictionary(d[key])
|
||||
sorted_d[key] = sorted_value
|
||||
else:
|
||||
sorted_d[key] = sorted(d[key])
|
||||
elif type(d[key]) == dict:
|
||||
elif isinstance(d[key], dict):
|
||||
sorted_d[key] = sort_dictionary(d[key])
|
||||
else:
|
||||
sorted_d[key] = d[key]
|
||||
@@ -1026,10 +1026,7 @@ def check_if_user_value_matches_resources_attr(
|
||||
return
|
||||
|
||||
if (
|
||||
resources_value_for_attr is None
|
||||
and len(user_provided_value_for_attr) >= 0
|
||||
or user_provided_value_for_attr is None
|
||||
and len(resources_value_for_attr) >= 0
|
||||
resources_value_for_attr is None or user_provided_value_for_attr is None
|
||||
):
|
||||
res[0] = False
|
||||
return
|
||||
@@ -1044,7 +1041,7 @@ def check_if_user_value_matches_resources_attr(
|
||||
|
||||
if (
|
||||
user_provided_value_for_attr
|
||||
and type(user_provided_value_for_attr[0]) == dict
|
||||
and isinstance(user_provided_value_for_attr[0], dict)
|
||||
):
|
||||
# Process a list of dict
|
||||
sorted_user_provided_value_for_attr = sort_list_of_dictionary(
|
||||
@@ -1547,7 +1544,7 @@ def delete_and_wait(
|
||||
except ServiceError as ex:
|
||||
# DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
|
||||
# resource is not available, instead of the expected 404. So working around this for now.
|
||||
if type(client) == oci.dns.DnsClient:
|
||||
if isinstance(client, oci.dns.DnsClient):
|
||||
if ex.status == 400 and ex.code == "InvalidParameter":
|
||||
_debug(
|
||||
"Resource {0} with {1} already deleted. So returning changed=False".format(
|
||||
|
||||
@@ -10,6 +10,8 @@ import json
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import gzip
|
||||
from io import BytesIO
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
@@ -128,8 +130,10 @@ class RedfishUtils(object):
|
||||
return resp
|
||||
|
||||
# The following functions are to send GET/POST/PATCH/DELETE requests
|
||||
def get_request(self, uri):
|
||||
def get_request(self, uri, override_headers=None):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
if override_headers:
|
||||
req_headers.update(override_headers)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
# Service root is an unauthenticated resource; remove credentials
|
||||
@@ -141,8 +145,13 @@ class RedfishUtils(object):
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
if override_headers:
|
||||
resp = gzip.open(BytesIO(resp.read()), 'rt', encoding='utf-8')
|
||||
data = json.loads(to_native(resp.read()))
|
||||
headers = req_headers
|
||||
else:
|
||||
data = json.loads(to_native(resp.read()))
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
msg = self._get_extended_message(e)
|
||||
return {'ret': False,
|
||||
@@ -717,7 +726,8 @@ class RedfishUtils(object):
|
||||
properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
|
||||
'Location', 'Manufacturer', 'Model', 'Name', 'Id',
|
||||
'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
|
||||
key = "StorageControllers"
|
||||
key = "Controllers"
|
||||
deprecated_key = "StorageControllers"
|
||||
|
||||
# Find Storage service
|
||||
response = self.get_request(self.root_uri + systems_uri)
|
||||
@@ -745,7 +755,30 @@ class RedfishUtils(object):
|
||||
data = response['data']
|
||||
|
||||
if key in data:
|
||||
controller_list = data[key]
|
||||
controllers_uri = data[key][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controllers_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if data[u'Members']:
|
||||
for controller_member in data[u'Members']:
|
||||
controller_member_uri = controller_member[u'@odata.id']
|
||||
response = self.get_request(self.root_uri + controller_member_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
controller_result = {}
|
||||
for property in properties:
|
||||
if property in data:
|
||||
controller_result[property] = data[property]
|
||||
controller_results.append(controller_result)
|
||||
elif deprecated_key in data:
|
||||
controller_list = data[deprecated_key]
|
||||
for controller in controller_list:
|
||||
controller_result = {}
|
||||
for property in properties:
|
||||
@@ -767,7 +800,7 @@ class RedfishUtils(object):
|
||||
properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
|
||||
'EncryptionAbility', 'EncryptionStatus',
|
||||
'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
|
||||
'Manufacturer', 'MediaType', 'Model', 'Name',
|
||||
'Links', 'Manufacturer', 'MediaType', 'Model', 'Name',
|
||||
'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
|
||||
'RotationSpeedRPM', 'SerialNumber', 'Status']
|
||||
|
||||
@@ -800,7 +833,25 @@ class RedfishUtils(object):
|
||||
return response
|
||||
data = response['data']
|
||||
controller_name = 'Controller 1'
|
||||
if 'StorageControllers' in data:
|
||||
if 'Controllers' in data:
|
||||
controllers_uri = data['Controllers'][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controllers_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
cdata = response['data']
|
||||
|
||||
if cdata[u'Members']:
|
||||
controller_member_uri = cdata[u'Members'][0][u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + controller_member_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
cdata = response['data']
|
||||
controller_name = cdata['Name']
|
||||
elif 'StorageControllers' in data:
|
||||
sc = data['StorageControllers']
|
||||
if sc:
|
||||
if 'Name' in sc[0]:
|
||||
@@ -819,7 +870,12 @@ class RedfishUtils(object):
|
||||
for property in properties:
|
||||
if property in data:
|
||||
if data[property] is not None:
|
||||
drive_result[property] = data[property]
|
||||
if property == "Links":
|
||||
if "Volumes" in data["Links"].keys():
|
||||
volumes = [v["@odata.id"] for v in data["Links"]["Volumes"]]
|
||||
drive_result["Volumes"] = volumes
|
||||
else:
|
||||
drive_result[property] = data[property]
|
||||
drive_results.append(drive_result)
|
||||
drives = {'Controller': controller_name,
|
||||
'Drives': drive_results}
|
||||
@@ -904,15 +960,7 @@ class RedfishUtils(object):
|
||||
return response
|
||||
data = response['data']
|
||||
controller_name = 'Controller %s' % str(idx)
|
||||
if 'StorageControllers' in data:
|
||||
sc = data['StorageControllers']
|
||||
if sc:
|
||||
if 'Name' in sc[0]:
|
||||
controller_name = sc[0]['Name']
|
||||
else:
|
||||
sc_id = sc[0].get('Id', '1')
|
||||
controller_name = 'Controller %s' % sc_id
|
||||
elif 'Controllers' in data:
|
||||
if 'Controllers' in data:
|
||||
response = self.get_request(self.root_uri + data['Controllers'][u'@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
@@ -930,6 +978,14 @@ class RedfishUtils(object):
|
||||
else:
|
||||
controller_id = member_data.get('Id', '1')
|
||||
controller_name = 'Controller %s' % controller_id
|
||||
elif 'StorageControllers' in data:
|
||||
sc = data['StorageControllers']
|
||||
if sc:
|
||||
if 'Name' in sc[0]:
|
||||
controller_name = sc[0]['Name']
|
||||
else:
|
||||
sc_id = sc[0].get('Id', '1')
|
||||
controller_name = 'Controller %s' % sc_id
|
||||
volume_results = []
|
||||
volume_list = []
|
||||
if 'Volumes' in data:
|
||||
@@ -1032,7 +1088,12 @@ class RedfishUtils(object):
|
||||
# command should be PowerOn, PowerForceOff, etc.
|
||||
if not command.startswith('Power'):
|
||||
return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
|
||||
reset_type = command[5:]
|
||||
|
||||
# Commands (except PowerCycle) will be stripped of the 'Power' prefix
|
||||
if command == 'PowerCycle':
|
||||
reset_type = command
|
||||
else:
|
||||
reset_type = command[5:]
|
||||
|
||||
# map Reboot to a ResetType that does a reboot
|
||||
if reset_type == 'Reboot':
|
||||
@@ -1499,29 +1560,37 @@ class RedfishUtils(object):
|
||||
|
||||
def _software_inventory(self, uri):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
result['entries'] = []
|
||||
for member in data[u'Members']:
|
||||
uri = self.root_uri + member[u'@odata.id']
|
||||
# Get details for each software or firmware member
|
||||
response = self.get_request(uri)
|
||||
|
||||
while uri:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
|
||||
data = response['data']
|
||||
software = {}
|
||||
# Get these standard properties if present
|
||||
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
|
||||
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
|
||||
'ReleaseDate']:
|
||||
if key in data:
|
||||
software[key] = data.get(key)
|
||||
result['entries'].append(software)
|
||||
if data.get('Members@odata.nextLink'):
|
||||
uri = data.get('Members@odata.nextLink')
|
||||
else:
|
||||
uri = None
|
||||
|
||||
for member in data[u'Members']:
|
||||
fw_uri = self.root_uri + member[u'@odata.id']
|
||||
# Get details for each software or firmware member
|
||||
response = self.get_request(fw_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
software = {}
|
||||
# Get these standard properties if present
|
||||
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
|
||||
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
|
||||
'ReleaseDate']:
|
||||
if key in data:
|
||||
software[key] = data.get(key)
|
||||
result['entries'].append(software)
|
||||
|
||||
return result
|
||||
|
||||
def get_firmware_inventory(self):
|
||||
@@ -1685,6 +1754,7 @@ class RedfishUtils(object):
|
||||
image_file = update_opts.get('update_image_file')
|
||||
targets = update_opts.get('update_targets')
|
||||
apply_time = update_opts.get('update_apply_time')
|
||||
oem_params = update_opts.get('update_oem_params')
|
||||
|
||||
# Ensure the image file is provided
|
||||
if not image_file:
|
||||
@@ -1715,6 +1785,8 @@ class RedfishUtils(object):
|
||||
payload["Targets"] = targets
|
||||
if apply_time:
|
||||
payload["@Redfish.OperationApplyTime"] = apply_time
|
||||
if oem_params:
|
||||
payload["Oem"] = oem_params
|
||||
multipart_payload = {
|
||||
'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'},
|
||||
'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'}
|
||||
@@ -2400,7 +2472,7 @@ class RedfishUtils(object):
|
||||
result = {}
|
||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus']
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
@@ -3289,7 +3361,7 @@ class RedfishUtils(object):
|
||||
result = {}
|
||||
inventory = {}
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model',
|
||||
properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model',
|
||||
'PartNumber', 'PowerState', 'SerialNumber', 'Status', 'UUID']
|
||||
|
||||
response = self.get_request(self.root_uri + manager_uri)
|
||||
@@ -3372,6 +3444,25 @@ class RedfishUtils(object):
|
||||
|
||||
return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
|
||||
|
||||
def set_secure_boot(self, secure_boot_enable):
|
||||
# This function enable Secure Boot on an OOB controller
|
||||
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if response["ret"] is False:
|
||||
return response
|
||||
|
||||
server_details = response["data"]
|
||||
secure_boot_url = server_details["SecureBoot"]["@odata.id"]
|
||||
|
||||
response = self.get_request(self.root_uri + secure_boot_url)
|
||||
if response["ret"] is False:
|
||||
return response
|
||||
|
||||
body = {}
|
||||
body["SecureBootEnable"] = secure_boot_enable
|
||||
|
||||
return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
|
||||
|
||||
def get_hpe_thermal_config(self):
|
||||
result = {}
|
||||
key = "Thermal"
|
||||
@@ -3403,3 +3494,238 @@ class RedfishUtils(object):
|
||||
fan_percent_min_config = hpe.get('FanPercentMinimum')
|
||||
result["fan_percent_min"] = fan_percent_min_config
|
||||
return result
|
||||
|
||||
def delete_volumes(self, storage_subsystem_id, volume_ids):
|
||||
# Find the Storage resource from the requested ComputerSystem resource
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
storage_uri = data.get('Storage', {}).get('@odata.id')
|
||||
if storage_uri is None:
|
||||
return {'ret': False, 'msg': 'Storage resource not found'}
|
||||
|
||||
# Get Storage Collection
|
||||
response = self.get_request(self.root_uri + storage_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
# Collect Storage Subsystems
|
||||
self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
|
||||
if not self.storage_subsystems_uris:
|
||||
return {
|
||||
'ret': False,
|
||||
'msg': "StorageCollection's Members array is either empty or missing"}
|
||||
|
||||
# Matching Storage Subsystem ID with user input
|
||||
self.storage_subsystem_uri = ""
|
||||
for storage_subsystem_uri in self.storage_subsystems_uris:
|
||||
if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id:
|
||||
self.storage_subsystem_uri = storage_subsystem_uri
|
||||
|
||||
if not self.storage_subsystem_uri:
|
||||
return {
|
||||
'ret': False,
|
||||
'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id}
|
||||
|
||||
# Get Volume Collection
|
||||
response = self.get_request(self.root_uri + self.storage_subsystem_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
response = self.get_request(self.root_uri + data['Volumes']['@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
# Collect Volumes
|
||||
self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
|
||||
if not self.volume_uris:
|
||||
return {
|
||||
'ret': True, 'changed': False,
|
||||
'msg': "VolumeCollection's Members array is either empty or missing"}
|
||||
|
||||
# Delete each volume
|
||||
for volume in self.volume_uris:
|
||||
if volume.split("/")[-1] in volume_ids:
|
||||
response = self.delete_request(self.root_uri + volume)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': "The following volumes were deleted: %s" % str(volume_ids)}
|
||||
|
||||
def create_volume(self, volume_details, storage_subsystem_id):
|
||||
# Find the Storage resource from the requested ComputerSystem resource
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
storage_uri = data.get('Storage', {}).get('@odata.id')
|
||||
if storage_uri is None:
|
||||
return {'ret': False, 'msg': 'Storage resource not found'}
|
||||
|
||||
# Get Storage Collection
|
||||
response = self.get_request(self.root_uri + storage_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
# Collect Storage Subsystems
|
||||
self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
|
||||
if not self.storage_subsystems_uris:
|
||||
return {
|
||||
'ret': False,
|
||||
'msg': "StorageCollection's Members array is either empty or missing"}
|
||||
|
||||
# Matching Storage Subsystem ID with user input
|
||||
self.storage_subsystem_uri = ""
|
||||
for storage_subsystem_uri in self.storage_subsystems_uris:
|
||||
if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id:
|
||||
self.storage_subsystem_uri = storage_subsystem_uri
|
||||
|
||||
if not self.storage_subsystem_uri:
|
||||
return {
|
||||
'ret': False,
|
||||
'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id}
|
||||
|
||||
# Validate input parameters
|
||||
required_parameters = ['RAIDType', 'Drives', 'CapacityBytes']
|
||||
allowed_parameters = ['DisplayName', 'InitializeMethod', 'MediaSpanCount',
|
||||
'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy']
|
||||
|
||||
for parameter in required_parameters:
|
||||
if not volume_details.get(parameter):
|
||||
return {
|
||||
'ret': False,
|
||||
'msg': "%s are required parameter to create a volume" % str(required_parameters)}
|
||||
|
||||
# Navigate to the volume uri of the correct storage subsystem
|
||||
response = self.get_request(self.root_uri + self.storage_subsystem_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
# Deleting any volumes of RAIDType None present on the Storage Subsystem
|
||||
response = self.get_request(self.root_uri + data['Volumes']['@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
volume_data = response['data']
|
||||
|
||||
if "Members" in volume_data:
|
||||
for member in volume_data["Members"]:
|
||||
response = self.get_request(self.root_uri + member['@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
member_data = response['data']
|
||||
|
||||
if member_data["RAIDType"] == "None":
|
||||
response = self.delete_request(self.root_uri + member['@odata.id'])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
# Construct payload and issue POST command to create volume
|
||||
volume_details["Links"] = {}
|
||||
volume_details["Links"]["Drives"] = []
|
||||
for drive in volume_details["Drives"]:
|
||||
volume_details["Links"]["Drives"].append({"@odata.id": drive})
|
||||
del volume_details["Drives"]
|
||||
payload = volume_details
|
||||
response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': "Volume Created"}
|
||||
|
||||
def get_bios_registries(self):
|
||||
# Get /redfish/v1
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
|
||||
server_details = response["data"]
|
||||
|
||||
# Get Registries URI
|
||||
if "Bios" not in server_details:
|
||||
msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s"
|
||||
return {
|
||||
"ret": False,
|
||||
"msg": msg % str(server_details)
|
||||
}
|
||||
|
||||
bios_uri = server_details["Bios"]["@odata.id"]
|
||||
bios_resp = self.get_request(self.root_uri + bios_uri)
|
||||
if not bios_resp["ret"]:
|
||||
return bios_resp
|
||||
|
||||
bios_data = bios_resp["data"]
|
||||
attribute_registry = bios_data["AttributeRegistry"]
|
||||
|
||||
reg_uri = self.root_uri + self.service_root + "Registries/" + attribute_registry
|
||||
reg_resp = self.get_request(reg_uri)
|
||||
if not reg_resp["ret"]:
|
||||
return reg_resp
|
||||
|
||||
reg_data = reg_resp["data"]
|
||||
|
||||
# Get BIOS attribute registry URI
|
||||
lst = []
|
||||
|
||||
# Get the location URI
|
||||
response = self.check_location_uri(reg_data, reg_uri)
|
||||
if not response["ret"]:
|
||||
return response
|
||||
|
||||
rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"]
|
||||
|
||||
if "RegistryEntries" not in rsp_data:
|
||||
return {
|
||||
"msg": "'RegistryEntries' not present in %s response, %s" % (rsp_uri, str(rsp_data)),
|
||||
"ret": False
|
||||
}
|
||||
|
||||
return {
|
||||
"bios_registry": rsp_data,
|
||||
"bios_registry_uri": rsp_uri,
|
||||
"ret": True
|
||||
}
|
||||
|
||||
def check_location_uri(self, resp_data, resp_uri):
|
||||
# Get the location URI response
|
||||
# return {"msg": self.creds, "ret": False}
|
||||
vendor = self._get_vendor()['Vendor']
|
||||
rsp_uri = ""
|
||||
for loc in resp_data['Location']:
|
||||
if loc['Language'] == "en":
|
||||
rsp_uri = loc['Uri']
|
||||
if vendor == 'HPE':
|
||||
# WORKAROUND
|
||||
# HPE systems with iLO 4 will have BIOS Atrribute Registries location URI as a dictonary with key 'extref'
|
||||
# Hence adding condition to fetch the Uri
|
||||
if type(loc['Uri']) is dict and "extref" in loc['Uri'].keys():
|
||||
rsp_uri = loc['Uri']['extref']
|
||||
if not rsp_uri:
|
||||
msg = "Language 'en' not found in BIOS Atrribute Registries location, URI: %s, response: %s"
|
||||
return {
|
||||
"ret": False,
|
||||
"msg": msg % (resp_uri, str(resp_data))
|
||||
}
|
||||
|
||||
res = self.get_request(self.root_uri + rsp_uri)
|
||||
if res['ret'] is False:
|
||||
# WORKAROUND
|
||||
# HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs
|
||||
# Hence adding encoding to the header
|
||||
if vendor == 'HPE':
|
||||
override_headers = {"Accept-Encoding": "gzip"}
|
||||
res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers)
|
||||
if res['ret']:
|
||||
return {
|
||||
"ret": True,
|
||||
"rsp_data": res["data"],
|
||||
"rsp_uri": rsp_uri
|
||||
}
|
||||
return res
|
||||
|
||||
@@ -154,7 +154,7 @@ def _get_ctl_binary(module):
|
||||
if ctl_binary is not None:
|
||||
return ctl_binary
|
||||
|
||||
module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
|
||||
module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.")
|
||||
|
||||
|
||||
def _module_is_enabled(module):
|
||||
|
||||
@@ -25,6 +25,12 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(cargo) installed in the system.
|
||||
- If not specified, the module will look C(cargo) in E(PATH).
|
||||
type: path
|
||||
version_added: 7.5.0
|
||||
name:
|
||||
description:
|
||||
- The name of a Rust package to install.
|
||||
@@ -44,6 +50,14 @@ options:
|
||||
try to install all of them in this version.
|
||||
type: str
|
||||
required: false
|
||||
locked:
|
||||
description:
|
||||
- Install with locked dependencies.
|
||||
- This is only used when installing packages.
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
state:
|
||||
description:
|
||||
- The state of the Rust package.
|
||||
@@ -52,7 +66,7 @@ options:
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
requirements:
|
||||
- cargo installed in bin path (recommended /usr/local/bin)
|
||||
- cargo installed
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
@@ -60,6 +74,11 @@ EXAMPLES = r"""
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
|
||||
- name: Install "ludusavi" Rust package with locked dependencies
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
locked: true
|
||||
|
||||
- name: Install "ludusavi" Rust package in version 0.10.0
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
@@ -90,12 +109,12 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
class Cargo(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
|
||||
self.name = kwargs["name"]
|
||||
self.path = kwargs["path"]
|
||||
self.state = kwargs["state"]
|
||||
self.version = kwargs["version"]
|
||||
|
||||
self.executable = [module.get_bin_path("cargo", True)]
|
||||
self.locked = kwargs["locked"]
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
@@ -132,6 +151,8 @@ class Cargo(object):
|
||||
def install(self, packages=None):
|
||||
cmd = ["install"]
|
||||
cmd.extend(packages or self.name)
|
||||
if self.locked:
|
||||
cmd.append("--locked")
|
||||
if self.path:
|
||||
cmd.append("--root")
|
||||
cmd.append(self.path)
|
||||
@@ -160,15 +181,16 @@ class Cargo(object):
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
executable=dict(default=None, type="path"),
|
||||
name=dict(required=True, type="list", elements="str"),
|
||||
path=dict(default=None, type="path"),
|
||||
state=dict(default="present", choices=["present", "absent", "latest"]),
|
||||
version=dict(default=None, type="str"),
|
||||
locked=dict(default=False, type="bool"),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
||||
name = module.params["name"]
|
||||
path = module.params["path"]
|
||||
state = module.params["state"]
|
||||
version = module.params["version"]
|
||||
|
||||
@@ -180,7 +202,7 @@ def main():
|
||||
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
|
||||
)
|
||||
|
||||
cargo = Cargo(module, name=name, path=path, state=state, version=version)
|
||||
cargo = Cargo(module, **module.params)
|
||||
changed, out, err = False, None, None
|
||||
installed_packages = cargo.get_installed()
|
||||
if state == "present":
|
||||
|
||||
644
plugins/modules/consul_role.py
Normal file
644
plugins/modules/consul_role.py
Normal file
@@ -0,0 +1,644 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Håkon Lerring
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: consul_role
|
||||
short_description: Manipulate Consul roles
|
||||
version_added: 7.5.0
|
||||
description:
|
||||
- Allows the addition, modification and deletion of roles in a consul
|
||||
cluster via the agent. For more details on using and configuring ACLs,
|
||||
see U(https://www.consul.io/docs/guides/acl.html).
|
||||
author:
|
||||
- Håkon Lerring (@Hakon)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- A name used to identify the role.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- whether the role should be present or absent.
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the role.
|
||||
- If not specified, the assigned description will not be changed.
|
||||
required: false
|
||||
type: str
|
||||
policies:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- List of policies to attach to the role. Each policy is a dict.
|
||||
- If the parameter is left blank, any policies currently assigned will not be changed.
|
||||
- Any empty array (V([])) will clear any policies previously set.
|
||||
required: false
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the policy to attach to this role; see M(community.general.consul_policy) for more info.
|
||||
- Either this or O(policies[].id) must be specified.
|
||||
type: str
|
||||
id:
|
||||
description:
|
||||
- The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info.
|
||||
- Either this or O(policies[].name) must be specified.
|
||||
type: str
|
||||
service_identities:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- List of service identities to attach to the role.
|
||||
- If not specified, any service identities currently assigned will not be changed.
|
||||
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
|
||||
required: false
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the node.
|
||||
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
|
||||
- May only contain lowercase alphanumeric characters as well as - and _.
|
||||
type: str
|
||||
required: true
|
||||
datacenters:
|
||||
description:
|
||||
- The datacenters the policies will be effective.
|
||||
- This will result in effective policy only being valid in this datacenter.
|
||||
- If an empty array (V([])) is specified, the policies will valid in all datacenters.
|
||||
- including those which do not yet exist but may in the future.
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
node_identities:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- List of node identities to attach to the role.
|
||||
- If not specified, any node identities currently assigned will not be changed.
|
||||
- If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
|
||||
required: false
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the node.
|
||||
- Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
|
||||
- May only contain lowercase alphanumeric characters as well as - and _.
|
||||
type: str
|
||||
required: true
|
||||
datacenter:
|
||||
description:
|
||||
- The nodes datacenter.
|
||||
- This will result in effective policy only being valid in this datacenter.
|
||||
type: str
|
||||
required: true
|
||||
host:
|
||||
description:
|
||||
- Host of the consul agent, defaults to V(localhost).
|
||||
required: false
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- The port on which the consul agent is running.
|
||||
required: false
|
||||
default: 8500
|
||||
scheme:
|
||||
description:
|
||||
- The protocol scheme on which the consul agent is running.
|
||||
required: false
|
||||
default: http
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- A management token is required to manipulate the roles.
|
||||
type: str
|
||||
validate_certs:
|
||||
type: bool
|
||||
description:
|
||||
- Whether to verify the TLS certificate of the consul agent.
|
||||
required: false
|
||||
default: true
|
||||
requirements:
|
||||
- requests
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create a role with 2 policies
|
||||
community.general.consul_role:
|
||||
host: consul1.example.com
|
||||
token: some_management_acl
|
||||
name: foo-role
|
||||
policies:
|
||||
- id: 783beef3-783f-f41f-7422-7087dc272765
|
||||
- name: "policy-1"
|
||||
|
||||
- name: Create a role with service identity
|
||||
community.general.consul_role:
|
||||
host: consul1.example.com
|
||||
token: some_management_acl
|
||||
name: foo-role-2
|
||||
service_identities:
|
||||
- name: web
|
||||
datacenters:
|
||||
- dc1
|
||||
|
||||
- name: Create a role with node identity
|
||||
community.general.consul_role:
|
||||
host: consul1.example.com
|
||||
token: some_management_acl
|
||||
name: foo-role-3
|
||||
node_identities:
|
||||
- name: node-1
|
||||
datacenter: dc2
|
||||
|
||||
- name: Remove a role
|
||||
community.general.consul_role:
|
||||
host: consul1.example.com
|
||||
token: some_management_acl
|
||||
name: foo-role-3
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
role:
|
||||
description: The role object.
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"CreateIndex": 39,
|
||||
"Description": "",
|
||||
"Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
|
||||
"ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
|
||||
"ModifyIndex": 39,
|
||||
"Name": "foo-role",
|
||||
"Policies": [
|
||||
{"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"}
|
||||
]
|
||||
}
|
||||
operation:
|
||||
description: The operation performed on the role.
|
||||
returned: changed
|
||||
type: str
|
||||
sample: update
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.consul import (
|
||||
get_consul_url, get_auth_headers, handle_consul_response_error)
|
||||
import traceback
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
|
||||
try:
|
||||
from requests.exceptions import ConnectionError
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
|
||||
TOKEN_PARAMETER_NAME = "token"
|
||||
HOST_PARAMETER_NAME = "host"
|
||||
SCHEME_PARAMETER_NAME = "scheme"
|
||||
VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
|
||||
NAME_PARAMETER_NAME = "name"
|
||||
DESCRIPTION_PARAMETER_NAME = "description"
|
||||
PORT_PARAMETER_NAME = "port"
|
||||
POLICIES_PARAMETER_NAME = "policies"
|
||||
SERVICE_IDENTITIES_PARAMETER_NAME = "service_identities"
|
||||
NODE_IDENTITIES_PARAMETER_NAME = "node_identities"
|
||||
STATE_PARAMETER_NAME = "state"
|
||||
|
||||
PRESENT_STATE_VALUE = "present"
|
||||
ABSENT_STATE_VALUE = "absent"
|
||||
|
||||
REMOVE_OPERATION = "remove"
|
||||
UPDATE_OPERATION = "update"
|
||||
CREATE_OPERATION = "create"
|
||||
|
||||
POLICY_RULE_SPEC = dict(
|
||||
name=dict(type='str'),
|
||||
id=dict(type='str'),
|
||||
)
|
||||
|
||||
NODE_ID_RULE_SPEC = dict(
|
||||
name=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
SERVICE_ID_RULE_SPEC = dict(
|
||||
name=dict(type='str', required=True),
|
||||
datacenters=dict(type='list', elements='str', required=True),
|
||||
)
|
||||
|
||||
_ARGUMENT_SPEC = {
|
||||
TOKEN_PARAMETER_NAME: dict(no_log=True),
|
||||
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
|
||||
HOST_PARAMETER_NAME: dict(default='localhost'),
|
||||
SCHEME_PARAMETER_NAME: dict(default='http'),
|
||||
VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
|
||||
NAME_PARAMETER_NAME: dict(required=True),
|
||||
DESCRIPTION_PARAMETER_NAME: dict(required=False, type='str', default=None),
|
||||
POLICIES_PARAMETER_NAME: dict(type='list', elements='dict', options=POLICY_RULE_SPEC,
|
||||
mutually_exclusive=[('name', 'id')], required_one_of=[('name', 'id')], default=None),
|
||||
SERVICE_IDENTITIES_PARAMETER_NAME: dict(type='list', elements='dict', options=SERVICE_ID_RULE_SPEC, default=None),
|
||||
NODE_IDENTITIES_PARAMETER_NAME: dict(type='list', elements='dict', options=NODE_ID_RULE_SPEC, default=None),
|
||||
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
|
||||
}
|
||||
|
||||
|
||||
def compare_consul_api_role_policy_objects(first, second):
|
||||
# compare two lists of dictionaries, ignoring the ID element
|
||||
for x in first:
|
||||
x.pop('ID', None)
|
||||
|
||||
for x in second:
|
||||
x.pop('ID', None)
|
||||
|
||||
return first == second
|
||||
|
||||
|
||||
def update_role(role, configuration):
|
||||
url = '%s/acl/role/%s' % (get_consul_url(configuration),
|
||||
role['ID'])
|
||||
headers = get_auth_headers(configuration)
|
||||
|
||||
update_role_data = {
|
||||
'Name': configuration.name,
|
||||
'Description': configuration.description,
|
||||
}
|
||||
|
||||
# check if the user omitted the description, policies, service identities, or node identities
|
||||
|
||||
description_specified = configuration.description is not None
|
||||
|
||||
policy_specified = True
|
||||
if len(configuration.policies) == 1 and configuration.policies[0] is None:
|
||||
policy_specified = False
|
||||
|
||||
service_id_specified = True
|
||||
if len(configuration.service_identities) == 1 and configuration.service_identities[0] is None:
|
||||
service_id_specified = False
|
||||
|
||||
node_id_specified = True
|
||||
if len(configuration.node_identities) == 1 and configuration.node_identities[0] is None:
|
||||
node_id_specified = False
|
||||
|
||||
if description_specified:
|
||||
update_role_data["Description"] = configuration.description
|
||||
|
||||
if policy_specified:
|
||||
update_role_data["Policies"] = [x.to_dict() for x in configuration.policies]
|
||||
|
||||
if configuration.version >= ConsulVersion("1.5.0") and service_id_specified:
|
||||
update_role_data["ServiceIdentities"] = [
|
||||
x.to_dict() for x in configuration.service_identities]
|
||||
|
||||
if configuration.version >= ConsulVersion("1.8.0") and node_id_specified:
|
||||
update_role_data["NodeIdentities"] = [
|
||||
x.to_dict() for x in configuration.node_identities]
|
||||
|
||||
if configuration.check_mode:
|
||||
description_changed = False
|
||||
if description_specified:
|
||||
description_changed = role.get('Description') != update_role_data["Description"]
|
||||
else:
|
||||
update_role_data["Description"] = role.get("Description")
|
||||
|
||||
policies_changed = False
|
||||
if policy_specified:
|
||||
policies_changed = not (
|
||||
compare_consul_api_role_policy_objects(role.get('Policies', []), update_role_data.get('Policies', [])))
|
||||
else:
|
||||
if role.get('Policies') is not None:
|
||||
update_role_data["Policies"] = role.get('Policies')
|
||||
|
||||
service_ids_changed = False
|
||||
if service_id_specified:
|
||||
service_ids_changed = role.get('ServiceIdentities') != update_role_data.get('ServiceIdentities')
|
||||
else:
|
||||
if role.get('ServiceIdentities') is not None:
|
||||
update_role_data["ServiceIdentities"] = role.get('ServiceIdentities')
|
||||
|
||||
node_ids_changed = False
|
||||
if node_id_specified:
|
||||
node_ids_changed = role.get('NodeIdentities') != update_role_data.get('NodeIdentities')
|
||||
else:
|
||||
if role.get('NodeIdentities'):
|
||||
update_role_data["NodeIdentities"] = role.get('NodeIdentities')
|
||||
|
||||
changed = (
|
||||
description_changed or
|
||||
policies_changed or
|
||||
service_ids_changed or
|
||||
node_ids_changed
|
||||
)
|
||||
return Output(changed=changed, operation=UPDATE_OPERATION, role=update_role_data)
|
||||
else:
|
||||
# if description, policies, service or node id are not specified; we need to get the existing value and apply it
|
||||
if not description_specified and role.get('Description') is not None:
|
||||
update_role_data["Description"] = role.get('Description')
|
||||
|
||||
if not policy_specified and role.get('Policies') is not None:
|
||||
update_role_data["Policies"] = role.get('Policies')
|
||||
|
||||
if not service_id_specified and role.get('ServiceIdentities') is not None:
|
||||
update_role_data["ServiceIdentities"] = role.get('ServiceIdentities')
|
||||
|
||||
if not node_id_specified and role.get('NodeIdentities') is not None:
|
||||
update_role_data["NodeIdentities"] = role.get('NodeIdentities')
|
||||
|
||||
response = requests.put(url, headers=headers, json=update_role_data, verify=configuration.validate_certs)
|
||||
handle_consul_response_error(response)
|
||||
|
||||
resulting_role = response.json()
|
||||
changed = (
|
||||
role['Description'] != resulting_role['Description'] or
|
||||
role.get('Policies', None) != resulting_role.get('Policies', None) or
|
||||
role.get('ServiceIdentities', None) != resulting_role.get('ServiceIdentities', None) or
|
||||
role.get('NodeIdentities', None) != resulting_role.get('NodeIdentities', None)
|
||||
)
|
||||
|
||||
return Output(changed=changed, operation=UPDATE_OPERATION, role=resulting_role)
|
||||
|
||||
|
||||
def create_role(configuration):
|
||||
url = '%s/acl/role' % get_consul_url(configuration)
|
||||
headers = get_auth_headers(configuration)
|
||||
|
||||
# check if the user omitted policies, service identities, or node identities
|
||||
policy_specified = True
|
||||
if len(configuration.policies) == 1 and configuration.policies[0] is None:
|
||||
policy_specified = False
|
||||
|
||||
service_id_specified = True
|
||||
if len(configuration.service_identities) == 1 and configuration.service_identities[0] is None:
|
||||
service_id_specified = False
|
||||
|
||||
node_id_specified = True
|
||||
if len(configuration.node_identities) == 1 and configuration.node_identities[0] is None:
|
||||
node_id_specified = False
|
||||
|
||||
# get rid of None item so we can set an emtpy list for policies, service identities and node identities
|
||||
if not policy_specified:
|
||||
configuration.policies.pop()
|
||||
|
||||
if not service_id_specified:
|
||||
configuration.service_identities.pop()
|
||||
|
||||
if not node_id_specified:
|
||||
configuration.node_identities.pop()
|
||||
|
||||
create_role_data = {
|
||||
'Name': configuration.name,
|
||||
'Description': configuration.description,
|
||||
'Policies': [x.to_dict() for x in configuration.policies],
|
||||
}
|
||||
if configuration.version >= ConsulVersion("1.5.0"):
|
||||
create_role_data["ServiceIdentities"] = [x.to_dict() for x in configuration.service_identities]
|
||||
|
||||
if configuration.version >= ConsulVersion("1.8.0"):
|
||||
create_role_data["NodeIdentities"] = [x.to_dict() for x in configuration.node_identities]
|
||||
|
||||
if not configuration.check_mode:
|
||||
response = requests.put(url, headers=headers, json=create_role_data, verify=configuration.validate_certs)
|
||||
handle_consul_response_error(response)
|
||||
|
||||
resulting_role = response.json()
|
||||
|
||||
return Output(changed=True, operation=CREATE_OPERATION, role=resulting_role)
|
||||
else:
|
||||
return Output(changed=True, operation=CREATE_OPERATION)
|
||||
|
||||
|
||||
def remove_role(configuration):
|
||||
roles = get_roles(configuration)
|
||||
|
||||
if configuration.name in roles:
|
||||
|
||||
role_id = roles[configuration.name]['ID']
|
||||
|
||||
if not configuration.check_mode:
|
||||
url = '%s/acl/role/%s' % (get_consul_url(configuration), role_id)
|
||||
headers = get_auth_headers(configuration)
|
||||
response = requests.delete(url, headers=headers, verify=configuration.validate_certs)
|
||||
handle_consul_response_error(response)
|
||||
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
return Output(changed=changed, operation=REMOVE_OPERATION)
|
||||
|
||||
|
||||
def get_roles(configuration):
|
||||
url = '%s/acl/roles' % get_consul_url(configuration)
|
||||
headers = get_auth_headers(configuration)
|
||||
response = requests.get(url, headers=headers, verify=configuration.validate_certs)
|
||||
handle_consul_response_error(response)
|
||||
roles = response.json()
|
||||
existing_roles_mapped_by_id = dict((role['Name'], role) for role in roles if role['Name'] is not None)
|
||||
return existing_roles_mapped_by_id
|
||||
|
||||
|
||||
def get_consul_version(configuration):
|
||||
url = '%s/agent/self' % get_consul_url(configuration)
|
||||
headers = get_auth_headers(configuration)
|
||||
response = requests.get(url, headers=headers, verify=configuration.validate_certs)
|
||||
handle_consul_response_error(response)
|
||||
config = response.json()["Config"]
|
||||
return ConsulVersion(config["Version"])
|
||||
|
||||
|
||||
def set_role(configuration):
|
||||
roles = get_roles(configuration)
|
||||
|
||||
if configuration.name in roles:
|
||||
role = roles[configuration.name]
|
||||
return update_role(role, configuration)
|
||||
else:
|
||||
return create_role(configuration)
|
||||
|
||||
|
||||
class ConsulVersion:
|
||||
def __init__(self, version_string):
|
||||
split = version_string.split('.')
|
||||
self.major = split[0]
|
||||
self.minor = split[1]
|
||||
self.patch = split[2]
|
||||
|
||||
def __ge__(self, other):
|
||||
return int(self.major + self.minor +
|
||||
self.patch) >= int(other.major + other.minor + other.patch)
|
||||
|
||||
def __le__(self, other):
|
||||
return int(self.major + self.minor +
|
||||
self.patch) <= int(other.major + other.minor + other.patch)
|
||||
|
||||
|
||||
class ServiceIdentity:
|
||||
def __init__(self, input):
|
||||
if not isinstance(input, dict) or 'name' not in input:
|
||||
raise ValueError(
|
||||
"Each element of service_identities must be a dict with the keys name and optionally datacenters")
|
||||
self.name = input["name"]
|
||||
self.datacenters = input["datacenters"] if "datacenters" in input else None
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"ServiceName": self.name,
|
||||
"Datacenters": self.datacenters
|
||||
}
|
||||
|
||||
|
||||
class NodeIdentity:
|
||||
def __init__(self, input):
|
||||
if not isinstance(input, dict) or 'name' not in input:
|
||||
raise ValueError(
|
||||
"Each element of node_identities must be a dict with the keys name and optionally datacenter")
|
||||
self.name = input["name"]
|
||||
self.datacenter = input["datacenter"] if "datacenter" in input else None
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"NodeName": self.name,
|
||||
"Datacenter": self.datacenter
|
||||
}
|
||||
|
||||
|
||||
class RoleLink:
|
||||
def __init__(self, dict):
|
||||
self.id = dict.get("id", None)
|
||||
self.name = dict.get("name", None)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"ID": self.id,
|
||||
"Name": self.name
|
||||
}
|
||||
|
||||
|
||||
class PolicyLink:
|
||||
def __init__(self, dict):
|
||||
self.id = dict.get("id", None)
|
||||
self.name = dict.get("name", None)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"ID": self.id,
|
||||
"Name": self.name
|
||||
}
|
||||
|
||||
|
||||
class Configuration:
|
||||
"""
|
||||
Configuration for this module.
|
||||
"""
|
||||
|
||||
def __init__(self, token=None, host=None, scheme=None, validate_certs=None, name=None, description=None, port=None,
|
||||
policies=None, service_identities=None, node_identities=None, state=None, check_mode=None):
|
||||
self.token = token # type: str
|
||||
self.host = host # type: str
|
||||
self.port = port # type: int
|
||||
self.scheme = scheme # type: str
|
||||
self.validate_certs = validate_certs # type: bool
|
||||
self.name = name # type: str
|
||||
self.description = description # type: str
|
||||
if policies is not None:
|
||||
self.policies = [PolicyLink(p) for p in policies] # type: list(PolicyLink)
|
||||
else:
|
||||
self.policies = [None]
|
||||
if service_identities is not None:
|
||||
self.service_identities = [ServiceIdentity(s) for s in service_identities] # type: list(ServiceIdentity)
|
||||
else:
|
||||
self.service_identities = [None]
|
||||
if node_identities is not None:
|
||||
self.node_identities = [NodeIdentity(n) for n in node_identities] # type: list(NodeIdentity)
|
||||
else:
|
||||
self.node_identities = [None]
|
||||
self.state = state # type: str
|
||||
self.check_mode = check_mode # type: bool
|
||||
|
||||
|
||||
class Output:
|
||||
"""
|
||||
Output of an action of this module.
|
||||
"""
|
||||
|
||||
def __init__(self, changed=None, operation=None, role=None):
|
||||
self.changed = changed # type: bool
|
||||
self.operation = operation # type: str
|
||||
self.role = role # type: dict
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main method.
|
||||
"""
|
||||
module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=True)
|
||||
|
||||
if not HAS_REQUESTS:
|
||||
module.fail_json(msg=missing_required_lib("requests"),
|
||||
exception=REQUESTS_IMP_ERR)
|
||||
|
||||
try:
|
||||
configuration = Configuration(
|
||||
token=module.params.get(TOKEN_PARAMETER_NAME),
|
||||
host=module.params.get(HOST_PARAMETER_NAME),
|
||||
port=module.params.get(PORT_PARAMETER_NAME),
|
||||
scheme=module.params.get(SCHEME_PARAMETER_NAME),
|
||||
validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
|
||||
name=module.params.get(NAME_PARAMETER_NAME),
|
||||
description=module.params.get(DESCRIPTION_PARAMETER_NAME),
|
||||
policies=module.params.get(POLICIES_PARAMETER_NAME),
|
||||
service_identities=module.params.get(SERVICE_IDENTITIES_PARAMETER_NAME),
|
||||
node_identities=module.params.get(NODE_IDENTITIES_PARAMETER_NAME),
|
||||
state=module.params.get(STATE_PARAMETER_NAME),
|
||||
check_mode=module.check_mode
|
||||
|
||||
)
|
||||
except ValueError as err:
|
||||
module.fail_json(msg='Configuration error: %s' % str(err))
|
||||
return
|
||||
|
||||
try:
|
||||
|
||||
version = get_consul_version(configuration)
|
||||
configuration.version = version
|
||||
|
||||
if configuration.state == PRESENT_STATE_VALUE:
|
||||
output = set_role(configuration)
|
||||
else:
|
||||
output = remove_role(configuration)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
configuration.host, configuration.port, str(e)))
|
||||
raise
|
||||
|
||||
return_values = dict(changed=output.changed, operation=output.operation, role=output.role)
|
||||
module.exit_json(**return_values)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -183,8 +183,9 @@ class CPANMinus(ModuleHelper):
|
||||
if v.name and v.from_path:
|
||||
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
|
||||
|
||||
self.command = self.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.vars.set("binary", self.command)
|
||||
self.command = v.executable if v.executable else self.command
|
||||
self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
|
||||
self.vars.binary = self.runner.binary
|
||||
|
||||
def _is_package_installed(self, name, locallib, version):
|
||||
def process(rc, out, err):
|
||||
@@ -220,8 +221,6 @@ class CPANMinus(ModuleHelper):
|
||||
self.do_raise(msg=err, cmd=self.vars.cmd_args)
|
||||
return 'is up to date' not in err and 'is up to date' not in out
|
||||
|
||||
runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
|
||||
|
||||
v = self.vars
|
||||
pkg_param = 'from_path' if v.from_path else 'name'
|
||||
|
||||
@@ -235,7 +234,7 @@ class CPANMinus(ModuleHelper):
|
||||
return
|
||||
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
|
||||
|
||||
with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
|
||||
with self.runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
|
||||
self.changed = ctx.run(pkg_spec=pkg_spec)
|
||||
|
||||
|
||||
|
||||
@@ -78,6 +78,7 @@ EXAMPLES = '''
|
||||
import syslog
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
class EjabberdUser(object):
|
||||
@@ -95,6 +96,17 @@ class EjabberdUser(object):
|
||||
self.host = module.params.get('host')
|
||||
self.user = module.params.get('username')
|
||||
self.pwd = module.params.get('password')
|
||||
self.runner = CmdRunner(
|
||||
module,
|
||||
command="ejabberdctl",
|
||||
arg_formats=dict(
|
||||
cmd=cmd_runner_fmt.as_list(),
|
||||
host=cmd_runner_fmt.as_list(),
|
||||
user=cmd_runner_fmt.as_list(),
|
||||
pwd=cmd_runner_fmt.as_list(),
|
||||
),
|
||||
check_rc=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
@@ -102,7 +114,7 @@ class EjabberdUser(object):
|
||||
changed. It will return True if the user does not match the supplied
|
||||
credentials and False if it does not
|
||||
"""
|
||||
return self.run_command('check_password', [self.user, self.host, self.pwd])
|
||||
return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc)))
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
@@ -110,7 +122,7 @@ class EjabberdUser(object):
|
||||
host specified. If the user exists True is returned, otherwise False
|
||||
is returned
|
||||
"""
|
||||
return self.run_command('check_account', [self.user, self.host])
|
||||
return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc)))
|
||||
|
||||
def log(self, entry):
|
||||
""" This method will log information to the local syslog facility """
|
||||
@@ -118,29 +130,36 @@ class EjabberdUser(object):
|
||||
syslog.openlog('ansible-%s' % self.module._name)
|
||||
syslog.syslog(syslog.LOG_NOTICE, entry)
|
||||
|
||||
def run_command(self, cmd, options):
|
||||
def run_command(self, cmd, options, process=None):
|
||||
""" This method will run the any command specified and return the
|
||||
returns using the Ansible common module
|
||||
"""
|
||||
cmd = [self.module.get_bin_path('ejabberdctl', required=True), cmd] + options
|
||||
self.log('command: %s' % " ".join(cmd))
|
||||
return self.module.run_command(cmd)
|
||||
def _proc(*a):
|
||||
return a
|
||||
|
||||
if process is None:
|
||||
process = _proc
|
||||
|
||||
with self.runner("cmd " + options, output_process=process) as ctx:
|
||||
res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd)
|
||||
self.log('command: %s' % " ".join(ctx.run_info['cmd']))
|
||||
return res
|
||||
|
||||
def update(self):
|
||||
""" The update method will update the credentials for the user provided
|
||||
"""
|
||||
return self.run_command('change_password', [self.user, self.host, self.pwd])
|
||||
return self.run_command('change_password', 'user host pwd')
|
||||
|
||||
def create(self):
|
||||
""" The create method will create a new user on the host with the
|
||||
password provided
|
||||
"""
|
||||
return self.run_command('register', [self.user, self.host, self.pwd])
|
||||
return self.run_command('register', 'user host pwd')
|
||||
|
||||
def delete(self):
|
||||
""" The delete method will delete the user from the host
|
||||
"""
|
||||
return self.run_command('unregister', [self.user, self.host])
|
||||
return self.run_command('unregister', 'user host')
|
||||
|
||||
|
||||
def main():
|
||||
@@ -150,7 +169,7 @@ def main():
|
||||
username=dict(required=True, type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger?
|
||||
logging=dict(default=False, type='bool', removed_in_version='10.0.0', removed_from_collection='community.general'),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['password']),
|
||||
|
||||
108
plugins/modules/gio_mime.py
Normal file
108
plugins/modules/gio_mime.py
Normal file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gio_mime
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
short_description: Set default handler for MIME type, for applications using Gnome GIO
|
||||
version_added: 7.5.0
|
||||
description:
|
||||
- This module allows configuring the default handler for a specific MIME type, to be used by applications built with th Gnome GIO API.
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
options:
|
||||
mime_type:
|
||||
description:
|
||||
- MIME type for which a default handler will be set.
|
||||
type: str
|
||||
required: true
|
||||
handler:
|
||||
description:
|
||||
- Default handler will be set for the MIME type.
|
||||
type: str
|
||||
required: true
|
||||
notes:
|
||||
- This module is a thin wrapper around the C(gio mime) command (and subcommand).
|
||||
- See man gio(1) for more details.
|
||||
seealso:
|
||||
- name: GIO Documentation
|
||||
description: Reference documentation for the GIO API..
|
||||
link: https://docs.gtk.org/gio/
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Set chrome as the default handler for https
|
||||
community.general.gio_mime:
|
||||
mime_type: x-scheme-handler/https
|
||||
handler: google-chrome.desktop
|
||||
register: result
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
handler:
|
||||
description:
|
||||
- The handler set as default.
|
||||
returned: success
|
||||
type: str
|
||||
sample: google-chrome.desktop
|
||||
stdout:
|
||||
description:
|
||||
- The output of the C(gio) command.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Set google-chrome.desktop as the default for x-scheme-handler/https
|
||||
stderr:
|
||||
description:
|
||||
- The error output of the C(gio) command.
|
||||
returned: failure
|
||||
type: str
|
||||
sample: 'gio: Failed to load info for handler "never-existed.desktop"'
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
|
||||
from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get
|
||||
|
||||
|
||||
class GioMime(ModuleHelper):
|
||||
output_params = ['handler']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
mime_type=dict(type='str', required=True),
|
||||
handler=dict(type='str', required=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def __init_module__(self):
|
||||
self.runner = gio_mime_runner(self.module, check_rc=True)
|
||||
self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
|
||||
|
||||
def __run__(self):
|
||||
check_mode_return = (0, 'Module executed in check mode', '')
|
||||
if self.vars.has_changed("handler"):
|
||||
with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx:
|
||||
rc, out, err = ctx.run()
|
||||
self.vars.stdout = out
|
||||
self.vars.stderr = err
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
|
||||
|
||||
def main():
|
||||
GioMime.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -94,6 +94,13 @@ options:
|
||||
- This option is only used on creation, not for updates.
|
||||
type: path
|
||||
version_added: 4.2.0
|
||||
force_delete:
|
||||
description:
|
||||
- Force delete group even if projects in it.
|
||||
- Used only when O(state=absent).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -279,12 +286,18 @@ class GitLabGroup(object):
|
||||
|
||||
return (changed, group)
|
||||
|
||||
def delete_group(self):
|
||||
'''
|
||||
@param force To delete even if projects inside
|
||||
'''
|
||||
def delete_group(self, force=False):
|
||||
group = self.group_object
|
||||
|
||||
if len(group.projects.list(all=False)) >= 1:
|
||||
if not force and len(group.projects.list(all=False)) >= 1:
|
||||
self._module.fail_json(
|
||||
msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
|
||||
msg=("There are still projects in this group. "
|
||||
"These needs to be moved or deleted before this group can be removed. "
|
||||
"Use 'force_delete' to 'true' to force deletion of existing projects.")
|
||||
)
|
||||
else:
|
||||
if self._module.check_mode:
|
||||
return True
|
||||
@@ -295,7 +308,7 @@ class GitLabGroup(object):
|
||||
self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
|
||||
|
||||
'''
|
||||
@param name Name of the groupe
|
||||
@param name Name of the group
|
||||
@param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
|
||||
'''
|
||||
def exists_group(self, project_identifier):
|
||||
@@ -322,6 +335,7 @@ def main():
|
||||
subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']),
|
||||
require_two_factor_authentication=dict(type='bool'),
|
||||
avatar_path=dict(type='path'),
|
||||
force_delete=dict(type='bool', default=False),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -354,6 +368,7 @@ def main():
|
||||
subgroup_creation_level = module.params['subgroup_creation_level']
|
||||
require_two_factor_authentication = module.params['require_two_factor_authentication']
|
||||
avatar_path = module.params['avatar_path']
|
||||
force_delete = module.params['force_delete']
|
||||
|
||||
gitlab_instance = gitlab_authentication(module)
|
||||
|
||||
@@ -375,7 +390,7 @@ def main():
|
||||
|
||||
if state == 'absent':
|
||||
if group_exists:
|
||||
gitlab_group.delete_group()
|
||||
gitlab_group.delete_group(force=force_delete)
|
||||
module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="Group deleted or does not exists")
|
||||
|
||||
@@ -53,13 +53,14 @@ options:
|
||||
type: bool
|
||||
vars:
|
||||
description:
|
||||
- When the list element is a simple key-value pair, set masked and protected to false.
|
||||
- When the list element is a dict with the keys C(value), C(masked) and C(protected), the user can
|
||||
have full control about whether a value should be masked, protected or both.
|
||||
- When the list element is a simple key-value pair, masked, raw and protected will be set to false.
|
||||
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can
|
||||
have full control about whether a value should be masked, raw, protected or both.
|
||||
- Support for group variables requires GitLab >= 9.5.
|
||||
- Support for environment_scope requires GitLab Premium >= 13.11.
|
||||
- Support for protected values requires GitLab >= 9.3.
|
||||
- Support for masked values requires GitLab >= 11.10.
|
||||
- Support for raw values requires GitLab >= 15.7.
|
||||
- A C(value) must be a string or a number.
|
||||
- Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file).
|
||||
- When a value is masked, it must be in Base64 and have a length of at least 8 characters.
|
||||
@@ -95,6 +96,13 @@ options:
|
||||
- Wether variable value is protected or not.
|
||||
type: bool
|
||||
default: false
|
||||
raw:
|
||||
description:
|
||||
- Wether variable value is raw or not.
|
||||
- Support for raw values requires GitLab >= 15.7.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: '7.4.0'
|
||||
variable_type:
|
||||
description:
|
||||
- Wether a variable is an environment variable (V(env_var)) or a file (V(file)).
|
||||
@@ -126,6 +134,38 @@ EXAMPLES = r'''
|
||||
variable_type: env_var
|
||||
environment_scope: production
|
||||
|
||||
- name: Set or update some CI/CD variables with raw value
|
||||
community.general.gitlab_group_variable:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
group: scodeman/testgroup/
|
||||
purge: false
|
||||
vars:
|
||||
ACCESS_KEY_ID: abc123
|
||||
SECRET_ACCESS_KEY:
|
||||
value: 3214cbad
|
||||
masked: true
|
||||
protected: true
|
||||
raw: true
|
||||
variable_type: env_var
|
||||
environment_scope: '*'
|
||||
|
||||
- name: Set or update some CI/CD variables with expandable value
|
||||
community.general.gitlab_group_variable:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
group: scodeman/testgroup/
|
||||
purge: false
|
||||
vars:
|
||||
ACCESS_KEY_ID: abc123
|
||||
SECRET_ACCESS_KEY:
|
||||
value: '$MY_OTHER_VARIABLE'
|
||||
masked: true
|
||||
protected: true
|
||||
raw: false
|
||||
variable_type: env_var
|
||||
environment_scope: '*'
|
||||
|
||||
- name: Delete one variable
|
||||
community.general.gitlab_group_variable:
|
||||
api_url: https://gitlab.com
|
||||
@@ -199,6 +239,7 @@ class GitlabGroupVariables(object):
|
||||
"value": var_obj.get('value'),
|
||||
"masked": var_obj.get('masked'),
|
||||
"protected": var_obj.get('protected'),
|
||||
"raw": var_obj.get('raw'),
|
||||
"variable_type": var_obj.get('variable_type'),
|
||||
}
|
||||
if var_obj.get('environment_scope') is not None:
|
||||
@@ -267,6 +308,8 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
|
||||
item['value'] = str(item.get('value'))
|
||||
if item.get('protected') is None:
|
||||
item['protected'] = False
|
||||
if item.get('raw') is None:
|
||||
item['raw'] = False
|
||||
if item.get('masked') is None:
|
||||
item['masked'] = False
|
||||
if item.get('environment_scope') is None:
|
||||
@@ -338,11 +381,14 @@ def main():
|
||||
group=dict(type='str', required=True),
|
||||
purge=dict(type='bool', required=False, default=False),
|
||||
vars=dict(type='dict', required=False, default=dict(), no_log=True),
|
||||
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
|
||||
# KNOWN dict in filter_returned_variables or bad evil will happen
|
||||
variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str', no_log=True),
|
||||
masked=dict(type='bool', default=False),
|
||||
protected=dict(type='bool', default=False),
|
||||
raw=dict(type='bool', default=False),
|
||||
environment_scope=dict(type='str', default='*'),
|
||||
variable_type=dict(type='str', default='env_var', choices=["env_var", "file"])
|
||||
)),
|
||||
|
||||
@@ -51,11 +51,12 @@ options:
|
||||
type: bool
|
||||
vars:
|
||||
description:
|
||||
- When the list element is a simple key-value pair, masked and protected will be set to false.
|
||||
- When the list element is a dict with the keys C(value), C(masked) and C(protected), the user can
|
||||
have full control about whether a value should be masked, protected or both.
|
||||
- When the list element is a simple key-value pair, masked, raw and protected will be set to false.
|
||||
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can
|
||||
have full control about whether a value should be masked, raw, protected or both.
|
||||
- Support for protected values requires GitLab >= 9.3.
|
||||
- Support for masked values requires GitLab >= 11.10.
|
||||
- Support for raw values requires GitLab >= 15.7.
|
||||
- Support for environment_scope requires GitLab Premium >= 13.11.
|
||||
- Support for variable_type requires GitLab >= 11.11.
|
||||
- A C(value) must be a string or a number.
|
||||
@@ -96,6 +97,13 @@ options:
|
||||
- Support for protected values requires GitLab >= 9.3.
|
||||
type: bool
|
||||
default: false
|
||||
raw:
|
||||
description:
|
||||
- Wether variable value is raw or not.
|
||||
- Support for raw values requires GitLab >= 15.7.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: '7.4.0'
|
||||
variable_type:
|
||||
description:
|
||||
- Wether a variable is an environment variable (V(env_var)) or a file (V(file)).
|
||||
@@ -143,6 +151,38 @@ EXAMPLES = '''
|
||||
variable_type: env_var
|
||||
environment_scope: '*'
|
||||
|
||||
- name: Set or update some CI/CD variables with raw value
|
||||
community.general.gitlab_project_variable:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
project: markuman/dotfiles
|
||||
purge: false
|
||||
vars:
|
||||
ACCESS_KEY_ID: abc123
|
||||
SECRET_ACCESS_KEY:
|
||||
value: 3214cbad
|
||||
masked: true
|
||||
protected: true
|
||||
raw: true
|
||||
variable_type: env_var
|
||||
environment_scope: '*'
|
||||
|
||||
- name: Set or update some CI/CD variables with expandable value
|
||||
community.general.gitlab_project_variable:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
project: markuman/dotfiles
|
||||
purge: false
|
||||
vars:
|
||||
ACCESS_KEY_ID: abc123
|
||||
SECRET_ACCESS_KEY:
|
||||
value: '$MY_OTHER_VARIABLE'
|
||||
masked: true
|
||||
protected: true
|
||||
raw: false
|
||||
variable_type: env_var
|
||||
environment_scope: '*'
|
||||
|
||||
- name: Delete one variable
|
||||
community.general.gitlab_project_variable:
|
||||
api_url: https://gitlab.com
|
||||
@@ -220,6 +260,7 @@ class GitlabProjectVariables(object):
|
||||
"value": var_obj.get('value'),
|
||||
"masked": var_obj.get('masked'),
|
||||
"protected": var_obj.get('protected'),
|
||||
"raw": var_obj.get('raw'),
|
||||
"variable_type": var_obj.get('variable_type'),
|
||||
}
|
||||
|
||||
@@ -290,6 +331,8 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
|
||||
item['value'] = str(item.get('value'))
|
||||
if item.get('protected') is None:
|
||||
item['protected'] = False
|
||||
if item.get('raw') is None:
|
||||
item['raw'] = False
|
||||
if item.get('masked') is None:
|
||||
item['masked'] = False
|
||||
if item.get('environment_scope') is None:
|
||||
@@ -361,11 +404,14 @@ def main():
|
||||
project=dict(type='str', required=True),
|
||||
purge=dict(type='bool', required=False, default=False),
|
||||
vars=dict(type='dict', required=False, default=dict(), no_log=True),
|
||||
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
|
||||
# KNOWN dict in filter_returned_variables or bad evil will happen
|
||||
variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str', no_log=True),
|
||||
masked=dict(type='bool', default=False),
|
||||
protected=dict(type='bool', default=False),
|
||||
raw=dict(type='bool', default=False),
|
||||
environment_scope=dict(type='str', default='*'),
|
||||
variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]),
|
||||
)),
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Copyright (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
|
||||
# Copyright (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
|
||||
# Copyright (c) 2017, Ansible Project
|
||||
# Copyright (c) 2023, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
@@ -98,6 +99,12 @@ options:
|
||||
- Do not insert spaces before and after '=' symbol.
|
||||
type: bool
|
||||
default: false
|
||||
ignore_spaces:
|
||||
description:
|
||||
- Do not change a line if doing so would only add or remove spaces before or after the V(=) symbol.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
create:
|
||||
description:
|
||||
- If set to V(false), the module will fail if the file does not already exist.
|
||||
@@ -178,7 +185,7 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
def match_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
|
||||
return re.match('([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
|
||||
|
||||
|
||||
def match_active_opt(option, line):
|
||||
@@ -186,19 +193,27 @@ def match_active_opt(option, line):
|
||||
return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
|
||||
|
||||
|
||||
def update_section_line(changed, section_lines, index, changed_lines, newline, msg):
|
||||
option_changed = section_lines[index] != newline
|
||||
def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg):
|
||||
option_changed = None
|
||||
if ignore_spaces:
|
||||
old_match = match_opt(option, section_lines[index])
|
||||
if not old_match.group(1):
|
||||
new_match = match_opt(option, newline)
|
||||
option_changed = old_match.group(7) != new_match.group(7)
|
||||
if option_changed is None:
|
||||
option_changed = section_lines[index] != newline
|
||||
if option_changed:
|
||||
section_lines[index] = newline
|
||||
changed = changed or option_changed
|
||||
if option_changed:
|
||||
msg = 'option changed'
|
||||
section_lines[index] = newline
|
||||
changed_lines[index] = 1
|
||||
return (changed, msg)
|
||||
|
||||
|
||||
def do_ini(module, filename, section=None, option=None, values=None,
|
||||
state='present', exclusive=True, backup=False, no_extra_spaces=False,
|
||||
create=True, allow_no_value=False, follow=False):
|
||||
ignore_spaces=False, create=True, allow_no_value=False, follow=False):
|
||||
|
||||
if section is not None:
|
||||
section = to_text(section)
|
||||
@@ -306,8 +321,8 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
for index, line in enumerate(section_lines):
|
||||
if match_opt(option, line):
|
||||
match = match_opt(option, line)
|
||||
if values and match.group(6) in values:
|
||||
matched_value = match.group(6)
|
||||
if values and match.group(7) in values:
|
||||
matched_value = match.group(7)
|
||||
if not matched_value and allow_no_value:
|
||||
# replace existing option with no value line(s)
|
||||
newline = u'%s\n' % option
|
||||
@@ -315,12 +330,12 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
else:
|
||||
# replace existing option=value line(s)
|
||||
newline = assignment_format % (option, matched_value)
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
(changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
|
||||
values.remove(matched_value)
|
||||
elif not values and allow_no_value:
|
||||
# replace existing option with no value line(s)
|
||||
newline = u'%s\n' % option
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
(changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
|
||||
option_no_value_present = True
|
||||
break
|
||||
|
||||
@@ -330,7 +345,7 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
for index, line in enumerate(section_lines):
|
||||
if not changed_lines[index] and match_opt(option, line):
|
||||
newline = assignment_format % (option, values.pop(0))
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
(changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
|
||||
if len(values) == 0:
|
||||
break
|
||||
# remove all remaining option occurrences from the rest of the section
|
||||
@@ -449,6 +464,7 @@ def main():
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
exclusive=dict(type='bool', default=True),
|
||||
no_extra_spaces=dict(type='bool', default=False),
|
||||
ignore_spaces=dict(type='bool', default=False),
|
||||
allow_no_value=dict(type='bool', default=False),
|
||||
create=dict(type='bool', default=True),
|
||||
follow=dict(type='bool', default=False)
|
||||
@@ -469,6 +485,7 @@ def main():
|
||||
exclusive = module.params['exclusive']
|
||||
backup = module.params['backup']
|
||||
no_extra_spaces = module.params['no_extra_spaces']
|
||||
ignore_spaces = module.params['ignore_spaces']
|
||||
allow_no_value = module.params['allow_no_value']
|
||||
create = module.params['create']
|
||||
follow = module.params['follow']
|
||||
@@ -481,7 +498,9 @@ def main():
|
||||
elif values is None:
|
||||
values = []
|
||||
|
||||
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value, follow)
|
||||
(changed, backup_file, diff, msg) = do_ini(
|
||||
module, path, section, option, values, state, exclusive, backup,
|
||||
no_extra_spaces, ignore_spaces, create, allow_no_value, follow)
|
||||
|
||||
if not module.check_mode and os.path.exists(path):
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
|
||||
@@ -40,6 +40,12 @@ options:
|
||||
aliases: ["primarygroup"]
|
||||
type: str
|
||||
version_added: '2.5.0'
|
||||
ipagroupobjectclasses:
|
||||
description: A list of group objectclasses.
|
||||
aliases: ["groupobjectclasses"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '7.3.0'
|
||||
ipagroupsearchfields:
|
||||
description: A list of fields to search in when searching for groups.
|
||||
aliases: ["groupsearchfields"]
|
||||
@@ -85,12 +91,20 @@ options:
|
||||
elements: str
|
||||
version_added: '3.7.0'
|
||||
ipauserauthtype:
|
||||
description: The authentication type to use by default.
|
||||
description:
|
||||
- The authentication type to use by default.
|
||||
- The choice V(idp) has been added in community.general 7.3.0.
|
||||
aliases: ["userauthtype"]
|
||||
choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"]
|
||||
choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "disabled"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '2.5.0'
|
||||
ipauserobjectclasses:
|
||||
description: A list of user objectclasses.
|
||||
aliases: ["userobjectclasses"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '7.3.0'
|
||||
ipausersearchfields:
|
||||
description: A list of fields to search in when searching for users.
|
||||
aliases: ["usersearchfields"]
|
||||
@@ -235,11 +249,12 @@ class ConfigIPAClient(IPAClient):
|
||||
|
||||
def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
ipadefaultemaildomain=None, ipadefaultprimarygroup=None,
|
||||
ipagroupsearchfields=None, ipahomesrootdir=None,
|
||||
ipakrbauthzdata=None, ipamaxusernamelength=None,
|
||||
ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
|
||||
ipasearchtimelimit=None, ipaselinuxusermaporder=None,
|
||||
ipauserauthtype=None, ipausersearchfields=None):
|
||||
ipagroupsearchfields=None, ipagroupobjectclasses=None,
|
||||
ipahomesrootdir=None, ipakrbauthzdata=None,
|
||||
ipamaxusernamelength=None, ipapwdexpadvnotify=None,
|
||||
ipasearchrecordslimit=None, ipasearchtimelimit=None,
|
||||
ipaselinuxusermaporder=None, ipauserauthtype=None,
|
||||
ipausersearchfields=None, ipauserobjectclasses=None):
|
||||
config = {}
|
||||
if ipaconfigstring is not None:
|
||||
config['ipaconfigstring'] = ipaconfigstring
|
||||
@@ -249,6 +264,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
config['ipadefaultemaildomain'] = ipadefaultemaildomain
|
||||
if ipadefaultprimarygroup is not None:
|
||||
config['ipadefaultprimarygroup'] = ipadefaultprimarygroup
|
||||
if ipagroupobjectclasses is not None:
|
||||
config['ipagroupobjectclasses'] = ipagroupobjectclasses
|
||||
if ipagroupsearchfields is not None:
|
||||
config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields)
|
||||
if ipahomesrootdir is not None:
|
||||
@@ -267,6 +284,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder)
|
||||
if ipauserauthtype is not None:
|
||||
config['ipauserauthtype'] = ipauserauthtype
|
||||
if ipauserobjectclasses is not None:
|
||||
config['ipauserobjectclasses'] = ipauserobjectclasses
|
||||
if ipausersearchfields is not None:
|
||||
config['ipausersearchfields'] = ','.join(ipausersearchfields)
|
||||
|
||||
@@ -283,6 +302,7 @@ def ensure(module, client):
|
||||
ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
|
||||
ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
|
||||
ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'),
|
||||
ipagroupobjectclasses=module.params.get('ipagroupobjectclasses'),
|
||||
ipagroupsearchfields=module.params.get('ipagroupsearchfields'),
|
||||
ipahomesrootdir=module.params.get('ipahomesrootdir'),
|
||||
ipakrbauthzdata=module.params.get('ipakrbauthzdata'),
|
||||
@@ -293,6 +313,7 @@ def ensure(module, client):
|
||||
ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'),
|
||||
ipauserauthtype=module.params.get('ipauserauthtype'),
|
||||
ipausersearchfields=module.params.get('ipausersearchfields'),
|
||||
ipauserobjectclasses=module.params.get('ipauserobjectclasses'),
|
||||
)
|
||||
ipa_config = client.config_show()
|
||||
diff = get_config_diff(client, ipa_config, module_config)
|
||||
@@ -322,6 +343,8 @@ def main():
|
||||
ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
|
||||
ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
|
||||
ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']),
|
||||
ipagroupobjectclasses=dict(type='list', elements='str',
|
||||
aliases=['groupobjectclasses']),
|
||||
ipagroupsearchfields=dict(type='list', elements='str',
|
||||
aliases=['groupsearchfields']),
|
||||
ipahomesrootdir=dict(type='str', aliases=['homesrootdir']),
|
||||
@@ -337,9 +360,11 @@ def main():
|
||||
ipauserauthtype=dict(type='list', elements='str',
|
||||
aliases=['userauthtype'],
|
||||
choices=["password", "radius", "otp", "pkinit",
|
||||
"hardened", "disabled"]),
|
||||
"hardened", "idp", "disabled"]),
|
||||
ipausersearchfields=dict(type='list', elements='str',
|
||||
aliases=['usersearchfields']),
|
||||
ipauserobjectclasses=dict(type='list', elements='str',
|
||||
aliases=['userobjectclasses']),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
|
||||
@@ -30,7 +30,9 @@ options:
|
||||
default: 'always'
|
||||
choices: [ always, on_create ]
|
||||
givenname:
|
||||
description: First name.
|
||||
description:
|
||||
- First name.
|
||||
- If user does not exist and O(state=present), the usage of O(givenname) is required.
|
||||
type: str
|
||||
krbpasswordexpiration:
|
||||
description:
|
||||
@@ -54,7 +56,9 @@ options:
|
||||
- Will not be set for an existing user unless O(update_password=always), which is the default.
|
||||
type: str
|
||||
sn:
|
||||
description: Surname.
|
||||
description:
|
||||
- Surname.
|
||||
- If user does not exist and O(state=present), the usage of O(sn) is required.
|
||||
type: str
|
||||
sshpubkey:
|
||||
description:
|
||||
|
||||
@@ -20,6 +20,7 @@ requirements:
|
||||
author:
|
||||
- Brett Milford (@brettmilford)
|
||||
- Tong He (@unnecessary-username)
|
||||
- Juan Casanova (@juanmcasanova)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
@@ -65,6 +66,19 @@ options:
|
||||
description:
|
||||
- User to authenticate with the Jenkins server.
|
||||
type: str
|
||||
detach:
|
||||
description:
|
||||
- Enable detached mode to not wait for the build end.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 7.4.0
|
||||
time_between_checks:
|
||||
description:
|
||||
- Time in seconds to wait between requests to the Jenkins server.
|
||||
- This times must be higher than the configured quiet time for the job.
|
||||
default: 10
|
||||
type: int
|
||||
version_added: 7.4.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -152,6 +166,8 @@ class JenkinsBuild:
|
||||
self.user = module.params.get('user')
|
||||
self.jenkins_url = module.params.get('url')
|
||||
self.build_number = module.params.get('build_number')
|
||||
self.detach = module.params.get('detach')
|
||||
self.time_between_checks = module.params.get('time_between_checks')
|
||||
self.server = self.get_jenkins_connection()
|
||||
|
||||
self.result = {
|
||||
@@ -235,7 +251,14 @@ class JenkinsBuild:
|
||||
build_status = self.get_build_status()
|
||||
|
||||
if build_status['result'] is None:
|
||||
sleep(10)
|
||||
# If detached mode is active mark as success, we wouldn't be able to get here if it didn't exist
|
||||
if self.detach:
|
||||
result['changed'] = True
|
||||
result['build_info'] = build_status
|
||||
|
||||
return result
|
||||
|
||||
sleep(self.time_between_checks)
|
||||
self.get_result()
|
||||
else:
|
||||
if self.state == "stopped" and build_status['result'] == "ABORTED":
|
||||
@@ -273,6 +296,8 @@ def main():
|
||||
token=dict(no_log=True),
|
||||
url=dict(default="http://localhost:8080"),
|
||||
user=dict(),
|
||||
detach=dict(type='bool', default=False),
|
||||
time_between_checks=dict(type='int', default=10),
|
||||
),
|
||||
mutually_exclusive=[['password', 'token']],
|
||||
required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]],
|
||||
@@ -288,7 +313,7 @@ def main():
|
||||
else:
|
||||
jenkins_build.absent_build()
|
||||
|
||||
sleep(10)
|
||||
sleep(jenkins_build.time_between_checks)
|
||||
result = jenkins_build.get_result()
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
210
plugins/modules/jenkins_build_info.py
Normal file
210
plugins/modules/jenkins_build_info.py
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: jenkins_build_info
|
||||
short_description: Get information about Jenkins builds
|
||||
version_added: 7.4.0
|
||||
description:
|
||||
- Get information about Jenkins builds with Jenkins REST API.
|
||||
requirements:
|
||||
- "python-jenkins >= 0.4.12"
|
||||
author:
|
||||
- Juan Casanova (@juanmcasanova)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the Jenkins job to which the build belongs.
|
||||
required: true
|
||||
type: str
|
||||
build_number:
|
||||
description:
|
||||
- An integer which specifies a build of a job.
|
||||
- If not specified the last build information will be returned.
|
||||
type: int
|
||||
password:
|
||||
description:
|
||||
- Password to authenticate with the Jenkins server.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- API token used to authenticate with the Jenkins server.
|
||||
type: str
|
||||
url:
|
||||
description:
|
||||
- URL of the Jenkins server.
|
||||
default: http://localhost:8080
|
||||
type: str
|
||||
user:
|
||||
description:
|
||||
- User to authenticate with the Jenkins server.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get information about a jenkins build using basic authentication
|
||||
community.general.jenkins_build_info:
|
||||
name: "test-check"
|
||||
build_number: 1
|
||||
user: admin
|
||||
password: asdfg
|
||||
url: http://localhost:8080
|
||||
|
||||
- name: Get information about a jenkins build anonymously
|
||||
community.general.jenkins_build_info:
|
||||
name: "stop-check"
|
||||
build_number: 3
|
||||
url: http://localhost:8080
|
||||
|
||||
- name: Get information about a jenkins build using token authentication
|
||||
community.general.jenkins_build_info:
|
||||
name: "delete-experiment"
|
||||
build_number: 30
|
||||
user: Jenkins
|
||||
token: abcdefghijklmnopqrstuvwxyz123456
|
||||
url: http://localhost:8080
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
name:
|
||||
description: Name of the jenkins job.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "test-job"
|
||||
state:
|
||||
description: State of the jenkins job.
|
||||
returned: success
|
||||
type: str
|
||||
sample: present
|
||||
user:
|
||||
description: User used for authentication.
|
||||
returned: success
|
||||
type: str
|
||||
sample: admin
|
||||
url:
|
||||
description: URL to connect to the Jenkins server.
|
||||
returned: success
|
||||
type: str
|
||||
sample: https://jenkins.mydomain.com
|
||||
build_info:
|
||||
description: Build info of the jenkins job.
|
||||
returned: success
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
JENKINS_IMP_ERR = None
|
||||
try:
|
||||
import jenkins
|
||||
python_jenkins_installed = True
|
||||
except ImportError:
|
||||
JENKINS_IMP_ERR = traceback.format_exc()
|
||||
python_jenkins_installed = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
class JenkinsBuildInfo:
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
self.name = module.params.get('name')
|
||||
self.password = module.params.get('password')
|
||||
self.token = module.params.get('token')
|
||||
self.user = module.params.get('user')
|
||||
self.jenkins_url = module.params.get('url')
|
||||
self.build_number = module.params.get('build_number')
|
||||
self.server = self.get_jenkins_connection()
|
||||
|
||||
self.result = {
|
||||
'changed': False,
|
||||
'url': self.jenkins_url,
|
||||
'name': self.name,
|
||||
'user': self.user,
|
||||
}
|
||||
|
||||
def get_jenkins_connection(self):
|
||||
try:
|
||||
if (self.user and self.password):
|
||||
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
|
||||
elif (self.user and self.token):
|
||||
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
|
||||
elif (self.user and not (self.password or self.token)):
|
||||
return jenkins.Jenkins(self.jenkins_url, self.user)
|
||||
else:
|
||||
return jenkins.Jenkins(self.jenkins_url)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e))
|
||||
|
||||
def get_build_status(self):
|
||||
try:
|
||||
if self.build_number is None:
|
||||
job_info = self.server.get_job_info(self.name)
|
||||
self.build_number = job_info['lastBuild']['number']
|
||||
|
||||
return self.server.get_build_info(self.name, self.build_number)
|
||||
except jenkins.JenkinsException as e:
|
||||
response = {}
|
||||
response["result"] = "ABSENT"
|
||||
return response
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def get_result(self):
|
||||
result = self.result
|
||||
build_status = self.get_build_status()
|
||||
|
||||
if build_status['result'] == "ABSENT":
|
||||
result['failed'] = True
|
||||
result['build_info'] = build_status
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def test_dependencies(module):
|
||||
if not python_jenkins_installed:
|
||||
module.fail_json(
|
||||
msg=missing_required_lib("python-jenkins",
|
||||
url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
|
||||
exception=JENKINS_IMP_ERR)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
build_number=dict(type='int'),
|
||||
name=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
token=dict(no_log=True),
|
||||
url=dict(default="http://localhost:8080"),
|
||||
user=dict(),
|
||||
),
|
||||
mutually_exclusive=[['password', 'token']],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
test_dependencies(module)
|
||||
jenkins_build_info = JenkinsBuildInfo(module)
|
||||
|
||||
result = jenkins_build_info.get_result()
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
211
plugins/modules/keycloak_authz_custom_policy.py
Normal file
211
plugins/modules/keycloak_authz_custom_policy.py
Normal file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
|
||||
# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_authz_custom_policy
|
||||
|
||||
short_description: Allows administration of Keycloak client custom Javascript policies via Keycloak API
|
||||
|
||||
version_added: 7.5.0
|
||||
|
||||
description:
|
||||
- This module allows the administration of Keycloak client custom Javascript via the Keycloak REST
|
||||
API. Custom Javascript policies are only available if a client has Authorization enabled and if
|
||||
they have been deployed to the Keycloak server as JAR files.
|
||||
|
||||
- This module requires access to the REST API via OpenID Connect; the user connecting and the realm
|
||||
being used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate realm definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase options used by Keycloak.
|
||||
The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
|
||||
U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the custom policy.
|
||||
- On V(present), the custom policy will be created (or updated if it exists already).
|
||||
- On V(absent), the custom policy will be removed if it exists.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the custom policy to create.
|
||||
type: str
|
||||
required: true
|
||||
policy_type:
|
||||
description:
|
||||
- The type of the policy. This must match the name of the custom policy deployed to the server.
|
||||
- Multiple policies pointing to the same policy type can be created, but their names have to differ.
|
||||
type: str
|
||||
required: true
|
||||
client_id:
|
||||
description:
|
||||
- The V(clientId) of the Keycloak client that should have the custom policy attached to it.
|
||||
- This is usually a human-readable name of the Keycloak client.
|
||||
type: str
|
||||
required: true
|
||||
realm:
|
||||
description:
|
||||
- The name of the Keycloak realm the Keycloak client is in.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Samuli Seppänen (@mattock)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Manage Keycloak custom authorization policy
|
||||
community.general.keycloak_authz_custom_policy:
|
||||
name: OnlyOwner
|
||||
state: present
|
||||
policy_type: script-policy.js
|
||||
client_id: myclient
|
||||
realm: myrealm
|
||||
auth_keycloak_url: http://localhost:8080/auth
|
||||
auth_username: keycloak
|
||||
auth_password: keycloak
|
||||
auth_realm: master
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
|
||||
end_state:
|
||||
description: Representation of the custom policy after module execution.
|
||||
returned: on success
|
||||
type: dict
|
||||
contains:
|
||||
name:
|
||||
description: Name of the custom policy.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: file:delete
|
||||
policy_type:
|
||||
description: Type of custom policy.
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: File delete
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'absent']),
|
||||
name=dict(type='str', required=True),
|
||||
policy_type=dict(type='str', required=True),
|
||||
client_id=dict(type='str', required=True),
|
||||
realm=dict(type='str', required=True)
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=(
|
||||
[['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', end_state={})
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
# Convenience variables
|
||||
state = module.params.get('state')
|
||||
name = module.params.get('name')
|
||||
policy_type = module.params.get('policy_type')
|
||||
client_id = module.params.get('client_id')
|
||||
realm = module.params.get('realm')
|
||||
|
||||
cid = kc.get_client_id(client_id, realm=realm)
|
||||
if not cid:
|
||||
module.fail_json(msg='Invalid client %s for realm %s' %
|
||||
(client_id, realm))
|
||||
|
||||
before_authz_custom_policy = kc.get_authz_policy_by_name(
|
||||
name=name, client_id=cid, realm=realm)
|
||||
|
||||
desired_authz_custom_policy = {}
|
||||
desired_authz_custom_policy['name'] = name
|
||||
desired_authz_custom_policy['type'] = policy_type
|
||||
|
||||
# Modifying existing custom policies is not possible
|
||||
if before_authz_custom_policy and state == 'present':
|
||||
result['msg'] = "Custom policy %s already exists" % (name)
|
||||
result['changed'] = False
|
||||
result['end_state'] = desired_authz_custom_policy
|
||||
elif not before_authz_custom_policy and state == 'present':
|
||||
if module.check_mode:
|
||||
result['msg'] = "Would create custom policy %s" % (name)
|
||||
else:
|
||||
kc.create_authz_custom_policy(
|
||||
payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm)
|
||||
result['msg'] = "Custom policy %s created" % (name)
|
||||
|
||||
result['changed'] = True
|
||||
result['end_state'] = desired_authz_custom_policy
|
||||
elif before_authz_custom_policy and state == 'absent':
|
||||
if module.check_mode:
|
||||
result['msg'] = "Would remove custom policy %s" % (name)
|
||||
else:
|
||||
kc.remove_authz_custom_policy(
|
||||
policy_id=before_authz_custom_policy['id'], client_id=cid, realm=realm)
|
||||
result['msg'] = "Custom policy %s removed" % (name)
|
||||
|
||||
result['changed'] = True
|
||||
result['end_state'] = {}
|
||||
elif not before_authz_custom_policy and state == 'absent':
|
||||
result['msg'] = "Custom policy %s does not exist" % (name)
|
||||
result['changed'] = False
|
||||
result['end_state'] = {}
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -247,6 +247,7 @@ options:
|
||||
protocol:
|
||||
description:
|
||||
- Type of client.
|
||||
- At creation only, default value will be V(openid-connect) if O(protocol) is omitted.
|
||||
type: str
|
||||
choices: ['openid-connect', 'saml']
|
||||
|
||||
@@ -721,6 +722,10 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
import copy
|
||||
|
||||
|
||||
PROTOCOL_OPENID_CONNECT = 'openid-connect'
|
||||
PROTOCOL_SAML = 'saml'
|
||||
|
||||
|
||||
def normalise_cr(clientrep, remove_ids=False):
|
||||
""" Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the
|
||||
the change detection is more effective.
|
||||
@@ -779,7 +784,7 @@ def main():
|
||||
consentText=dict(type='str'),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
protocol=dict(type='str', choices=['openid-connect', 'saml']),
|
||||
protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]),
|
||||
protocolMapper=dict(type='str'),
|
||||
config=dict(type='dict'),
|
||||
)
|
||||
@@ -813,7 +818,7 @@ def main():
|
||||
authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
|
||||
public_client=dict(type='bool', aliases=['publicClient']),
|
||||
frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
|
||||
protocol=dict(type='str', choices=['openid-connect', 'saml']),
|
||||
protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]),
|
||||
attributes=dict(type='dict'),
|
||||
full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
|
||||
node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
|
||||
@@ -911,6 +916,8 @@ def main():
|
||||
|
||||
if 'clientId' not in desired_client:
|
||||
module.fail_json(msg='client_id needs to be specified when creating a new client')
|
||||
if 'protocol' not in desired_client:
|
||||
desired_client['protocol'] = PROTOCOL_OPENID_CONNECT
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after=sanitize_cr(desired_client))
|
||||
|
||||
@@ -67,7 +67,7 @@ author:
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set default client scopes on realm level
|
||||
community.general.keycloak_clientsecret_info:
|
||||
community.general.keycloak_clientscope_type:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
@@ -79,7 +79,7 @@ EXAMPLES = '''
|
||||
|
||||
|
||||
- name: Set default and optional client scopes on client level with token auth
|
||||
community.general.keycloak_clientsecret_info:
|
||||
community.general.keycloak_clientscope_type:
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
token: TOKEN
|
||||
|
||||
457
plugins/modules/keycloak_realm_key.py
Normal file
457
plugins/modules/keycloak_realm_key.py
Normal file
@@ -0,0 +1,457 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
|
||||
# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_realm_key
|
||||
|
||||
short_description: Allows administration of Keycloak realm keys via Keycloak API
|
||||
|
||||
version_added: 7.5.0
|
||||
|
||||
description:
|
||||
- This module allows the administration of Keycloak realm keys via the Keycloak REST API. It
|
||||
requires access to the REST API via OpenID Connect; the user connecting and the realm being
|
||||
used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate realm definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
|
||||
Aliases are provided so camelCased versions can be used as well.
|
||||
|
||||
- This module is unable to detect changes to the actual cryptographic key after importing it.
|
||||
However, if some other property is changed alongside the cryptographic key, then the key
|
||||
will also get changed as a side-effect, as the JSON payload needs to include the private key.
|
||||
This can be considered either a bug or a feature, as the alternative would be to always
|
||||
update the realm key whether it has changed or not.
|
||||
|
||||
- If certificate is not explicitly provided it will be dynamically created by Keycloak.
|
||||
Therefore comparing the current state of the certificate to the desired state (which may be
|
||||
empty) is not possible.
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: partial
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the keycloak realm key.
|
||||
- On V(present), the realm key will be created (or updated if it exists already).
|
||||
- On V(absent), the realm key will be removed if it exists.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the realm key to create.
|
||||
type: str
|
||||
required: true
|
||||
force:
|
||||
description:
|
||||
- Enforce the state of the private key and certificate. This is not automatically the
|
||||
case as this module is unable to determine the current state of the private key and
|
||||
thus cannot trigger an update based on an actual divergence. That said, a private key
|
||||
update may happen even if force is false as a side-effect of other changes.
|
||||
default: false
|
||||
type: bool
|
||||
parent_id:
|
||||
description:
|
||||
- The parent_id of the realm key. In practice the ID (name) of the realm.
|
||||
type: str
|
||||
required: true
|
||||
provider_id:
|
||||
description:
|
||||
- The name of the "provider ID" for the key.
|
||||
choices: ['rsa']
|
||||
default: 'rsa'
|
||||
type: str
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the key and its properties.
|
||||
type: dict
|
||||
suboptions:
|
||||
active:
|
||||
description:
|
||||
- Whether they key is active or inactive. Not to be confused with the state
|
||||
of the Ansible resource managed by the O(state) parameter.
|
||||
default: true
|
||||
type: bool
|
||||
enabled:
|
||||
description:
|
||||
- Whether the key is enabled or disabled. Not to be confused with the state
|
||||
of the Ansible resource managed by the O(state) parameter.
|
||||
default: true
|
||||
type: bool
|
||||
priority:
|
||||
description:
|
||||
- The priority of the key.
|
||||
type: int
|
||||
required: true
|
||||
algorithm:
|
||||
description:
|
||||
- Key algorithm.
|
||||
default: RS256
|
||||
choices: ['RS256']
|
||||
type: str
|
||||
private_key:
|
||||
description:
|
||||
- The private key as an ASCII string. Contents of the key must match O(config.algorithm)
|
||||
and O(provider_id).
|
||||
- Please note that the module cannot detect whether the private key specified differs from the
|
||||
current state's private key. Use O(force=true) to force the module to update the private key
|
||||
if you expect it to be updated.
|
||||
required: true
|
||||
type: str
|
||||
certificate:
|
||||
description:
|
||||
- A certificate signed with the private key as an ASCII string. Contents of the
|
||||
key must match O(config.algorithm) and O(provider_id).
|
||||
- If you want Keycloak to automatically generate a certificate using your private key
|
||||
then set this to an empty string.
|
||||
required: true
|
||||
type: str
|
||||
notes:
|
||||
- Current value of the private key cannot be fetched from Keycloak.
|
||||
Therefore comparing its desired state to the current state is not
|
||||
possible.
|
||||
- If certificate is not explicitly provided it will be dynamically created
|
||||
by Keycloak. Therefore comparing the current state of the certificate to
|
||||
the desired state (which may be empty) is not possible.
|
||||
- Due to the private key and certificate options the module is
|
||||
B(not fully idempotent). You can use O(force=true) to force the module
|
||||
to always update if you know that the private key might have changed.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
- community.general.attributes
|
||||
|
||||
author:
|
||||
- Samuli Seppänen (@mattock)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Manage Keycloak realm key (certificate autogenerated by Keycloak)
|
||||
community.general.keycloak_realm_key:
|
||||
name: custom
|
||||
state: present
|
||||
parent_id: master
|
||||
provider_id: rsa
|
||||
auth_keycloak_url: http://localhost:8080/auth
|
||||
auth_username: keycloak
|
||||
auth_password: keycloak
|
||||
auth_realm: master
|
||||
config:
|
||||
private_key: "{{ private_key }}"
|
||||
enabled: true
|
||||
active: true
|
||||
priority: 120
|
||||
algorithm: RS256
|
||||
- name: Manage Keycloak realm key and certificate
|
||||
community.general.keycloak_realm_key:
|
||||
name: custom
|
||||
state: present
|
||||
parent_id: master
|
||||
provider_id: rsa
|
||||
auth_keycloak_url: http://localhost:8080/auth
|
||||
auth_username: keycloak
|
||||
auth_password: keycloak
|
||||
auth_realm: master
|
||||
config:
|
||||
private_key: "{{ private_key }}"
|
||||
certificate: "{{ certificate }}"
|
||||
enabled: true
|
||||
active: true
|
||||
priority: 120
|
||||
algorithm: RS256
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
|
||||
end_state:
|
||||
description: Representation of the keycloak_realm_key after module execution.
|
||||
returned: on success
|
||||
type: dict
|
||||
contains:
|
||||
id:
|
||||
description: ID of the realm key.
|
||||
type: str
|
||||
returned: when O(state=present)
|
||||
sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4
|
||||
name:
|
||||
description: Name of the realm key.
|
||||
type: str
|
||||
returned: when O(state=present)
|
||||
sample: mykey
|
||||
parentId:
|
||||
description: ID of the realm this key belongs to.
|
||||
type: str
|
||||
returned: when O(state=present)
|
||||
sample: myrealm
|
||||
providerId:
|
||||
description: The ID of the key provider.
|
||||
type: str
|
||||
returned: when O(state=present)
|
||||
sample: rsa
|
||||
providerType:
|
||||
description: The type of provider.
|
||||
type: str
|
||||
returned: when O(state=present)
|
||||
config:
|
||||
description: Realm key configuration.
|
||||
type: dict
|
||||
returned: when O(state=present)
|
||||
sample: {
|
||||
"active": ["true"],
|
||||
"algorithm": ["RS256"],
|
||||
"enabled": ["true"],
|
||||
"priority": ["140"]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
name=dict(type='str', required=True),
|
||||
force=dict(type='bool', default=False),
|
||||
parent_id=dict(type='str', required=True),
|
||||
provider_id=dict(type='str', default='rsa', choices=['rsa']),
|
||||
config=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
active=dict(type='bool', default=True),
|
||||
enabled=dict(type='bool', default=True),
|
||||
priority=dict(type='int', required=True),
|
||||
algorithm=dict(type='str', default='RS256', choices=['RS256']),
|
||||
private_key=dict(type='str', required=True, no_log=True),
|
||||
certificate=dict(type='str', required=True, no_log=True)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
# Initialize the result object. Only "changed" seems to have special
|
||||
# meaning for Ansible.
|
||||
result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
|
||||
|
||||
# This will include the current state of the realm key if it is already
|
||||
# present. This is only used for diff-mode.
|
||||
before_realm_key = {}
|
||||
before_realm_key['config'] = {}
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force"]
|
||||
|
||||
# Filter and map the parameters names that apply to the role
|
||||
component_params = [x for x in module.params
|
||||
if x not in params_to_ignore and
|
||||
module.params.get(x) is not None]
|
||||
|
||||
# We only support one component provider type in this module
|
||||
provider_type = 'org.keycloak.keys.KeyProvider'
|
||||
|
||||
# Build a proposed changeset from parameters given to this module
|
||||
changeset = {}
|
||||
changeset['config'] = {}
|
||||
|
||||
# Generate a JSON payload for Keycloak Admin API from the module
|
||||
# parameters. Parameters that do not belong to the JSON payload (e.g.
|
||||
# "state" or "auth_keycloal_url") have been filtered away earlier (see
|
||||
# above).
|
||||
#
|
||||
# This loop converts Ansible module parameters (snake-case) into
|
||||
# Keycloak-compatible format (camel-case). For example private_key
|
||||
# becomes privateKey.
|
||||
#
|
||||
# It also converts bool, str and int parameters into lists with a single
|
||||
# entry of 'str' type. Bool values are also lowercased. This is required
|
||||
# by Keycloak.
|
||||
#
|
||||
for component_param in component_params:
|
||||
if component_param == 'config':
|
||||
for config_param in module.params.get('config'):
|
||||
changeset['config'][camel(config_param)] = []
|
||||
raw_value = module.params.get('config')[config_param]
|
||||
if isinstance(raw_value, bool):
|
||||
value = str(raw_value).lower()
|
||||
else:
|
||||
value = str(raw_value)
|
||||
|
||||
changeset['config'][camel(config_param)].append(value)
|
||||
else:
|
||||
# No need for camelcase in here as these are one word parameters
|
||||
new_param_value = module.params.get(component_param)
|
||||
changeset[camel(component_param)] = new_param_value
|
||||
|
||||
# As provider_type is not a module parameter we have to add it to the
|
||||
# changeset explicitly.
|
||||
changeset['providerType'] = provider_type
|
||||
|
||||
# Make a deep copy of the changeset. This is use when determining
|
||||
# changes to the current state.
|
||||
changeset_copy = deepcopy(changeset)
|
||||
|
||||
# It is not possible to compare current keys to desired keys, because the
|
||||
# certificate parameter is a base64-encoded binary blob created on the fly
|
||||
# when a key is added. Moreover, the Keycloak Admin API does not seem to
|
||||
# return the value of the private key for comparison. So, in effect, it we
|
||||
# just have to ignore changes to the keys. However, as the privateKey
|
||||
# parameter needs be present in the JSON payload, any changes done to any
|
||||
# other parameters (e.g. config.priority) will trigger update of the keys
|
||||
# as a side-effect.
|
||||
del changeset_copy['config']['privateKey']
|
||||
del changeset_copy['config']['certificate']
|
||||
|
||||
# Make it easier to refer to current module parameters
|
||||
name = module.params.get('name')
|
||||
force = module.params.get('force')
|
||||
state = module.params.get('state')
|
||||
enabled = module.params.get('enabled')
|
||||
provider_id = module.params.get('provider_id')
|
||||
parent_id = module.params.get('parent_id')
|
||||
|
||||
# Get a list of all Keycloak components that are of keyprovider type.
|
||||
realm_keys = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id)
|
||||
|
||||
# If this component is present get its key ID. Confusingly the key ID is
|
||||
# also known as the Provider ID.
|
||||
key_id = None
|
||||
|
||||
# Track individual parameter changes
|
||||
changes = ""
|
||||
|
||||
# This tells Ansible whether the key was changed (added, removed, modified)
|
||||
result['changed'] = False
|
||||
|
||||
# Loop through the list of components. If we encounter a component whose
|
||||
# name matches the value of the name parameter then assume the key is
|
||||
# already present.
|
||||
for key in realm_keys:
|
||||
if key['name'] == name:
|
||||
key_id = key['id']
|
||||
changeset['id'] = key_id
|
||||
changeset_copy['id'] = key_id
|
||||
|
||||
# Compare top-level parameters
|
||||
for param, value in changeset.items():
|
||||
before_realm_key[param] = key[param]
|
||||
|
||||
if changeset_copy[param] != key[param] and param != 'config':
|
||||
changes += "%s: %s -> %s, " % (param, key[param], changeset_copy[param])
|
||||
result['changed'] = True
|
||||
|
||||
# Compare parameters under the "config" key
|
||||
for p, v in changeset_copy['config'].items():
|
||||
before_realm_key['config'][p] = key['config'][p]
|
||||
if changeset_copy['config'][p] != key['config'][p]:
|
||||
changes += "config.%s: %s -> %s, " % (p, key['config'][p], changeset_copy['config'][p])
|
||||
result['changed'] = True
|
||||
|
||||
# Sanitize linefeeds for the privateKey. Without this the JSON payload
|
||||
# will be invalid.
|
||||
changeset['config']['privateKey'][0] = changeset['config']['privateKey'][0].replace('\\n', '\n')
|
||||
changeset['config']['certificate'][0] = changeset['config']['certificate'][0].replace('\\n', '\n')
|
||||
|
||||
# Check all the possible states of the resource and do what is needed to
|
||||
# converge current state with desired state (create, update or delete
|
||||
# the key).
|
||||
if key_id and state == 'present':
|
||||
if result['changed']:
|
||||
if module._diff:
|
||||
del before_realm_key['config']['privateKey']
|
||||
del before_realm_key['config']['certificate']
|
||||
result['diff'] = dict(before=before_realm_key, after=changeset_copy)
|
||||
|
||||
if module.check_mode:
|
||||
result['msg'] = "Realm key %s would be changed: %s" % (name, changes.strip(", "))
|
||||
else:
|
||||
kc.update_component(changeset, parent_id)
|
||||
result['msg'] = "Realm key %s changed: %s" % (name, changes.strip(", "))
|
||||
elif not result['changed'] and force:
|
||||
kc.update_component(changeset, parent_id)
|
||||
result['changed'] = True
|
||||
result['msg'] = "Realm key %s was forcibly updated" % (name)
|
||||
else:
|
||||
result['msg'] = "Realm key %s was in sync" % (name)
|
||||
|
||||
result['end_state'] = changeset_copy
|
||||
elif key_id and state == 'absent':
|
||||
if module._diff:
|
||||
del before_realm_key['config']['privateKey']
|
||||
del before_realm_key['config']['certificate']
|
||||
result['diff'] = dict(before=before_realm_key, after={})
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = "Realm key %s would be deleted" % (name)
|
||||
else:
|
||||
kc.delete_component(key_id, parent_id)
|
||||
result['changed'] = True
|
||||
result['msg'] = "Realm key %s deleted" % (name)
|
||||
|
||||
result['end_state'] = {}
|
||||
elif not key_id and state == 'present':
|
||||
if module._diff:
|
||||
result['diff'] = dict(before={}, after=changeset_copy)
|
||||
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
result['msg'] = "Realm key %s would be created" % (name)
|
||||
else:
|
||||
kc.create_component(changeset, parent_id)
|
||||
result['changed'] = True
|
||||
result['msg'] = "Realm key %s created" % (name)
|
||||
|
||||
result['end_state'] = changeset_copy
|
||||
elif not key_id and state == 'absent':
|
||||
result['changed'] = False
|
||||
result['msg'] = "Realm key %s not present" % (name)
|
||||
result['end_state'] = {}
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -120,7 +120,7 @@ import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import string_types, text_type
|
||||
from ansible.module_utils.six import binary_type, string_types, text_type
|
||||
from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
|
||||
|
||||
LDAP_IMP_ERR = None
|
||||
@@ -159,7 +159,7 @@ def main():
|
||||
|
||||
|
||||
def _normalize_string(val, convert_to_base64):
|
||||
if isinstance(val, string_types):
|
||||
if isinstance(val, (string_types, binary_type)):
|
||||
if isinstance(val, text_type):
|
||||
val = to_bytes(val, encoding='utf-8')
|
||||
if convert_to_base64:
|
||||
|
||||
@@ -41,13 +41,13 @@ options:
|
||||
description:
|
||||
- The size of the logical volume, according to lvcreate(8) --size, by
|
||||
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
|
||||
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
|
||||
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN];
|
||||
Float values must begin with a digit.
|
||||
- When resizing, apart from specifying an absolute size you may, according to
|
||||
lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with
|
||||
the prefix V(+) or the amount to reduce the logical volume by with prefix V(-).
|
||||
- Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0.
|
||||
- Please note that when using V(+) or V(-), the module is B(not idempotent).
|
||||
- Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent).
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
@@ -73,7 +73,7 @@ options:
|
||||
snapshot:
|
||||
type: str
|
||||
description:
|
||||
- The name of the snapshot volume
|
||||
- The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume.
|
||||
pvs:
|
||||
type: str
|
||||
description:
|
||||
@@ -368,10 +368,10 @@ def main():
|
||||
if size_percent > 100:
|
||||
module.fail_json(msg="Size percentage cannot be larger than 100%")
|
||||
size_whole = size_parts[1]
|
||||
if size_whole == 'ORIGIN':
|
||||
module.fail_json(msg="Snapshot Volumes are not supported")
|
||||
elif size_whole not in ['VG', 'PVS', 'FREE']:
|
||||
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
|
||||
if size_whole == 'ORIGIN' and snapshot is None:
|
||||
module.fail_json(msg="Percentage of ORIGIN supported only for snapshot volumes")
|
||||
elif size_whole not in ['VG', 'PVS', 'FREE', 'ORIGIN']:
|
||||
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE|ORIGIN")
|
||||
size_opt = 'l'
|
||||
size_unit = ''
|
||||
|
||||
|
||||
@@ -34,23 +34,24 @@ options:
|
||||
project:
|
||||
description:
|
||||
- 'Project of an instance.
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
|
||||
required: false
|
||||
type: str
|
||||
version_added: 4.8.0
|
||||
architecture:
|
||||
description:
|
||||
- 'The architecture for the instance (for example V(x86_64) or V(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the instance (for example V({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
|
||||
- If the instance already exists and its "config" values in metadata
|
||||
obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
|
||||
are different, this module tries to apply the configurations.
|
||||
obtained from the LXD API U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get)
|
||||
are different, then this module tries to apply the configurations
|
||||
U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put).
|
||||
- The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true).
|
||||
type: dict
|
||||
required: false
|
||||
@@ -73,13 +74,13 @@ options:
|
||||
description:
|
||||
- 'The devices for the instance
|
||||
(for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the instance is ephemeral (for example V(true) or V(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
@@ -87,7 +88,7 @@ options:
|
||||
- 'The source for the instance
|
||||
(for example V({ "type": "image", "mode": "pull", "server": "https://images.linuxcontainers.org",
|
||||
"protocol": "lxd", "alias": "ubuntu/xenial/amd64" })).'
|
||||
- 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
|
||||
- 'See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.'
|
||||
- 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).'
|
||||
required: false
|
||||
type: dict
|
||||
|
||||
@@ -32,7 +32,7 @@ options:
|
||||
project:
|
||||
description:
|
||||
- 'Project of a profile.
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
|
||||
type: str
|
||||
required: false
|
||||
version_added: 4.8.0
|
||||
@@ -43,12 +43,13 @@ options:
|
||||
config:
|
||||
description:
|
||||
- 'The config for the instance (e.g. {"limits.memory": "4GB"}).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
|
||||
- If the profile already exists and its "config" value in metadata
|
||||
obtained from
|
||||
GET /1.0/profiles/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
|
||||
are different, they this module tries to apply the configurations.
|
||||
U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get)
|
||||
are different, then this module tries to apply the configurations
|
||||
U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put).
|
||||
- Not all config values are supported to apply the existing profile.
|
||||
Maybe you need to delete and recreate a profile.
|
||||
required: false
|
||||
@@ -57,14 +58,14 @@ options:
|
||||
description:
|
||||
- 'The devices for the profile
|
||||
(e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
|
||||
required: false
|
||||
type: dict
|
||||
new_name:
|
||||
description:
|
||||
- A new name of a profile.
|
||||
- If this parameter is specified a profile will be renamed to this name.
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post).
|
||||
required: false
|
||||
type: str
|
||||
merge_profile:
|
||||
|
||||
@@ -35,18 +35,19 @@ options:
|
||||
config:
|
||||
description:
|
||||
- 'The config for the project (for example V({"features.profiles": "true"})).
|
||||
See U(https://linuxcontainers.org/lxd/docs/master/projects/).'
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).'
|
||||
- If the project already exists and its "config" value in metadata
|
||||
obtained from
|
||||
C(GET /1.0/projects/<name>)
|
||||
U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_get)
|
||||
are different, then this module tries to apply the configurations.
|
||||
U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get)
|
||||
are different, then this module tries to apply the configurations
|
||||
U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put).
|
||||
type: dict
|
||||
new_name:
|
||||
description:
|
||||
- A new name of a project.
|
||||
- If this parameter is specified a project will be renamed to this name.
|
||||
See U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_post).
|
||||
See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post).
|
||||
required: false
|
||||
type: str
|
||||
merge_project:
|
||||
|
||||
@@ -354,7 +354,7 @@ def main():
|
||||
# NOTE: Backward compatible with old syntax using '|' as delimiter
|
||||
for hdr in [x.strip() for x in header.split('|')]:
|
||||
try:
|
||||
h_key, h_val = hdr.split('=')
|
||||
h_key, h_val = hdr.split('=', 1)
|
||||
h_val = to_native(Header(h_val, charset))
|
||||
msg.add_header(h_key, h_val)
|
||||
except Exception:
|
||||
|
||||
@@ -49,6 +49,7 @@ options:
|
||||
params:
|
||||
description:
|
||||
- Any extra parameters to pass to make.
|
||||
- If the value is empty, only the key will be used. For example, V(FOO:) will produce V(FOO), not V(FOO=).
|
||||
type: dict
|
||||
target:
|
||||
description:
|
||||
@@ -90,6 +91,18 @@ EXAMPLES = r'''
|
||||
chdir: /home/ubuntu/cool-project
|
||||
target: all
|
||||
file: /some-project/Makefile
|
||||
|
||||
- name: build arm64 kernel on FreeBSD, with 16 parallel jobs
|
||||
community.general.make:
|
||||
chdir: /usr/src
|
||||
jobs: 16
|
||||
target: buildkernel
|
||||
params:
|
||||
# This adds -DWITH_FDT to the command line:
|
||||
-DWITH_FDT:
|
||||
# The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line:
|
||||
TARGET: arm64
|
||||
TARGET_ARCH: aarch64
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -190,7 +203,7 @@ def main():
|
||||
# Fall back to system make
|
||||
make_path = module.get_bin_path('make', required=True)
|
||||
if module.params['params'] is not None:
|
||||
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
|
||||
make_parameters = [k + (('=' + str(v)) if v is not None else '') for k, v in iteritems(module.params['params'])]
|
||||
else:
|
||||
make_parameters = []
|
||||
|
||||
|
||||
@@ -68,6 +68,14 @@ options:
|
||||
required: false
|
||||
default: true
|
||||
type: bool
|
||||
app_name_exact_match:
|
||||
type: bool
|
||||
description:
|
||||
- If this flag is set to V(true) then the application ID lookup by name would only work for an exact match.
|
||||
If set to V(false) it returns the first result.
|
||||
required: false
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
requirements: []
|
||||
'''
|
||||
|
||||
@@ -102,8 +110,10 @@ def main():
|
||||
revision=dict(required=True),
|
||||
user=dict(required=False),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
app_name_exact_match=dict(required=False, type='bool', default=False),
|
||||
),
|
||||
required_one_of=[['app_name', 'application_id']],
|
||||
required_if=[('app_name_exact_match', True, ['app_name'])],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -111,7 +121,6 @@ def main():
|
||||
params = {}
|
||||
if module.params["app_name"] and module.params["application_id"]:
|
||||
module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
|
||||
|
||||
app_id = None
|
||||
if module.params["app_name"]:
|
||||
app_id = get_application_id(module)
|
||||
@@ -150,6 +159,7 @@ def main():
|
||||
def get_application_id(module):
|
||||
url = "https://api.newrelic.com/v2/applications.json"
|
||||
data = "filter[name]=%s" % module.params["app_name"]
|
||||
application_id = None
|
||||
headers = {
|
||||
'Api-Key': module.params["token"],
|
||||
}
|
||||
@@ -161,7 +171,17 @@ def get_application_id(module):
|
||||
if result is None or len(result.get("applications", "")) == 0:
|
||||
module.fail_json(msg='No application found with name "%s"' % module.params["app_name"])
|
||||
|
||||
return result["applications"][0]["id"]
|
||||
if module.params["app_name_exact_match"]:
|
||||
for item in result["applications"]:
|
||||
if item["name"] == module.params["app_name"]:
|
||||
application_id = item["id"]
|
||||
break
|
||||
if application_id is None:
|
||||
module.fail_json(msg='No application found with exact name "%s"' % module.params["app_name"])
|
||||
else:
|
||||
application_id = result["applications"][0]["id"]
|
||||
|
||||
return application_id
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -150,6 +150,7 @@ import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
|
||||
|
||||
class Npm(object):
|
||||
@@ -172,33 +173,29 @@ class Npm(object):
|
||||
else:
|
||||
self.executable = [module.get_bin_path('npm', True)]
|
||||
|
||||
if kwargs['version'] and self.state != 'absent':
|
||||
self.name_version = self.name + '@' + str(self.version)
|
||||
if kwargs['version'] and kwargs['state'] != 'absent':
|
||||
self.name_version = self.name + '@' + str(kwargs['version'])
|
||||
else:
|
||||
self.name_version = self.name
|
||||
|
||||
self.runner = CmdRunner(
|
||||
module,
|
||||
command=self.executable,
|
||||
arg_formats=dict(
|
||||
exec_args=cmd_runner_fmt.as_list(),
|
||||
global_=cmd_runner_fmt.as_bool('--global'),
|
||||
production=cmd_runner_fmt.as_bool('--production'),
|
||||
ignore_scripts=cmd_runner_fmt.as_bool('--ignore-scripts'),
|
||||
unsafe_perm=cmd_runner_fmt.as_bool('--unsafe-perm'),
|
||||
name_version=cmd_runner_fmt.as_list(),
|
||||
registry=cmd_runner_fmt.as_opt_val('--registry'),
|
||||
no_optional=cmd_runner_fmt.as_bool('--no-optional'),
|
||||
no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'),
|
||||
)
|
||||
)
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = self.executable + args
|
||||
|
||||
if self.glbl:
|
||||
cmd.append('--global')
|
||||
if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd):
|
||||
cmd.append('--production')
|
||||
if self.ignore_scripts:
|
||||
cmd.append('--ignore-scripts')
|
||||
if self.unsafe_perm:
|
||||
cmd.append('--unsafe-perm')
|
||||
if self.name_version and add_package_name:
|
||||
cmd.append(self.name_version)
|
||||
if self.registry:
|
||||
cmd.append('--registry')
|
||||
cmd.append(self.registry)
|
||||
if self.no_optional:
|
||||
cmd.append('--no-optional')
|
||||
if self.no_bin_links:
|
||||
cmd.append('--no-bin-links')
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
if self.path:
|
||||
@@ -208,8 +205,19 @@ class Npm(object):
|
||||
self.module.fail_json(msg="path %s is not a directory" % self.path)
|
||||
cwd = self.path
|
||||
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
|
||||
params = dict(self.module.params)
|
||||
params['exec_args'] = args
|
||||
params['global_'] = self.glbl
|
||||
params['production'] = self.production and ('install' in args or 'update' in args or 'ci' in args)
|
||||
params['name_version'] = self.name_version if add_package_name else None
|
||||
|
||||
with self.runner(
|
||||
"exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links",
|
||||
check_rc=check_rc, cwd=cwd
|
||||
) as ctx:
|
||||
rc, out, err = ctx.run(**params)
|
||||
return out
|
||||
|
||||
return ''
|
||||
|
||||
def list(self):
|
||||
@@ -269,12 +277,12 @@ class Npm(object):
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(default=None, type='str'),
|
||||
path=dict(default=None, type='path'),
|
||||
version=dict(default=None, type='str'),
|
||||
name=dict(type='str'),
|
||||
path=dict(type='path'),
|
||||
version=dict(type='str'),
|
||||
production=dict(default=False, type='bool'),
|
||||
executable=dict(default=None, type='path'),
|
||||
registry=dict(default=None, type='str'),
|
||||
executable=dict(type='path'),
|
||||
registry=dict(type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest']),
|
||||
ignore_scripts=dict(default=False, type='bool'),
|
||||
unsafe_perm=dict(default=False, type='bool'),
|
||||
@@ -293,25 +301,27 @@ def main():
|
||||
path = module.params['path']
|
||||
version = module.params['version']
|
||||
glbl = module.params['global']
|
||||
production = module.params['production']
|
||||
executable = module.params['executable']
|
||||
registry = module.params['registry']
|
||||
state = module.params['state']
|
||||
ignore_scripts = module.params['ignore_scripts']
|
||||
unsafe_perm = module.params['unsafe_perm']
|
||||
ci = module.params['ci']
|
||||
no_optional = module.params['no_optional']
|
||||
no_bin_links = module.params['no_bin_links']
|
||||
|
||||
if not path and not glbl:
|
||||
module.fail_json(msg='path must be specified when not using global')
|
||||
|
||||
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
|
||||
executable=executable, registry=registry, ignore_scripts=ignore_scripts,
|
||||
unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)
|
||||
npm = Npm(module,
|
||||
name=name,
|
||||
path=path,
|
||||
version=version,
|
||||
glbl=glbl,
|
||||
production=module.params['production'],
|
||||
executable=module.params['executable'],
|
||||
registry=module.params['registry'],
|
||||
ignore_scripts=module.params['ignore_scripts'],
|
||||
unsafe_perm=module.params['unsafe_perm'],
|
||||
state=state,
|
||||
no_optional=module.params['no_optional'],
|
||||
no_bin_links=module.params['no_bin_links'])
|
||||
|
||||
changed = False
|
||||
if ci:
|
||||
if module.params['ci']:
|
||||
npm.ci_install()
|
||||
changed = True
|
||||
elif state == 'present':
|
||||
|
||||
@@ -467,10 +467,8 @@ class RecordManager(object):
|
||||
if lookup.rcode() != dns.rcode.NOERROR:
|
||||
self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
|
||||
|
||||
if self.module.params['type'] == 'NS':
|
||||
current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
|
||||
else:
|
||||
current_ttl = lookup.answer[0].ttl
|
||||
current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
|
||||
|
||||
return current_ttl != self.module.params['ttl']
|
||||
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ description:
|
||||
- This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
|
||||
author:
|
||||
- "Amanpreet Singh (@ApsOps)"
|
||||
- "Xiao Shen (@xshen1)"
|
||||
requirements:
|
||||
- PagerDuty API access
|
||||
extends_documentation_fragment:
|
||||
@@ -30,20 +31,25 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
|
||||
api_key:
|
||||
type: str
|
||||
description:
|
||||
- The pagerduty API key (readonly access), generated on the pagerduty site.
|
||||
- Required if O(api_version=v1).
|
||||
integration_key:
|
||||
type: str
|
||||
description:
|
||||
- The GUID of one of your 'Generic API' services.
|
||||
- This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service.
|
||||
service_id:
|
||||
type: str
|
||||
description:
|
||||
- ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
|
||||
required: true
|
||||
- Required if O(api_version=v1).
|
||||
service_key:
|
||||
type: str
|
||||
description:
|
||||
- The GUID of one of your "Generic API" services. Obsolete. Please use O(integration_key).
|
||||
integration_key:
|
||||
type: str
|
||||
description:
|
||||
- The GUID of one of your "Generic API" services.
|
||||
- This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
|
||||
- The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key).
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
@@ -53,30 +59,17 @@ options:
|
||||
- 'triggered'
|
||||
- 'acknowledged'
|
||||
- 'resolved'
|
||||
api_key:
|
||||
api_version:
|
||||
type: str
|
||||
description:
|
||||
- The pagerduty API key (readonly access), generated on the pagerduty site.
|
||||
required: true
|
||||
desc:
|
||||
type: str
|
||||
description:
|
||||
- For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
|
||||
will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
|
||||
The maximum length is 1024 characters.
|
||||
- For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event.
|
||||
required: false
|
||||
default: Created via Ansible
|
||||
incident_key:
|
||||
type: str
|
||||
description:
|
||||
- Identifies the incident to which this O(state) should be applied.
|
||||
- For O(state=triggered) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
|
||||
open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
|
||||
problem reports.
|
||||
- For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened by a
|
||||
trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
|
||||
required: false
|
||||
- The API version we want to use to run the module.
|
||||
- V1 is more limited with option we can provide to trigger incident.
|
||||
- V2 has more variables for example, O(severity), O(source), O(custom_details), etc.
|
||||
default: 'v1'
|
||||
choices:
|
||||
- 'v1'
|
||||
- 'v2'
|
||||
version_added: 7.4.0
|
||||
client:
|
||||
type: str
|
||||
description:
|
||||
@@ -87,6 +80,75 @@ options:
|
||||
description:
|
||||
- The URL of the monitoring client that is triggering this event.
|
||||
required: false
|
||||
component:
|
||||
type: str
|
||||
description:
|
||||
- Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0).
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
custom_details:
|
||||
type: dict
|
||||
description:
|
||||
- Additional details about the event and affected system.
|
||||
- A dictionary with custom keys and values.
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
desc:
|
||||
type: str
|
||||
description:
|
||||
- For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
|
||||
will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
|
||||
The maximum length is 1024 characters.
|
||||
- For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event.
|
||||
required: false
|
||||
default: Created via Ansible
|
||||
incident_class:
|
||||
type: str
|
||||
description:
|
||||
- The class/type of the event, for example C(ping failure) or C(cpu load).
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
incident_key:
|
||||
type: str
|
||||
description:
|
||||
- Identifies the incident to which this O(state) should be applied.
|
||||
- For O(state=triggered) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
|
||||
open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup'
|
||||
problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty.
|
||||
- For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened by a
|
||||
trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
|
||||
required: false
|
||||
link_url:
|
||||
type: str
|
||||
description:
|
||||
- Relevant link url to the alert. For example, the website or the job link.
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
link_text:
|
||||
type: str
|
||||
description:
|
||||
- A short decription of the link_url.
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
- The unique location of the affected system, preferably a hostname or FQDN.
|
||||
- Required in case of O(state=trigger) and O(api_version=v2).
|
||||
required: false
|
||||
version_added: 7.4.0
|
||||
severity:
|
||||
type: str
|
||||
description:
|
||||
- The perceived severity of the status the event is describing with respect to the affected system.
|
||||
- Required in case of O(state=trigger) and O(api_version=v2).
|
||||
default: 'critical'
|
||||
choices:
|
||||
- 'critical'
|
||||
- 'warning'
|
||||
- 'error'
|
||||
- 'info'
|
||||
version_added: 7.4.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -127,12 +189,50 @@ EXAMPLES = '''
|
||||
state: resolved
|
||||
incident_key: somekey
|
||||
desc: "some text for incident's log"
|
||||
|
||||
- name: Trigger an v2 incident with just the basic options
|
||||
community.general.pagerduty_alert:
|
||||
integration_key: xxx
|
||||
api_version: v2
|
||||
source: My Ansible Script
|
||||
state: triggered
|
||||
desc: problem that led to this trigger
|
||||
|
||||
- name: Trigger an v2 incident with more options
|
||||
community.general.pagerduty_alert:
|
||||
integration_key: xxx
|
||||
api_version: v2
|
||||
source: My Ansible Script
|
||||
state: triggered
|
||||
desc: problem that led to this trigger
|
||||
incident_key: somekey
|
||||
client: Sample Monitoring Service
|
||||
client_url: http://service.example.com
|
||||
component: mysql
|
||||
incident_class: ping failure
|
||||
link_url: https://pagerduty.com
|
||||
link_text: PagerDuty
|
||||
|
||||
- name: Acknowledge an incident based on incident_key using v2
|
||||
community.general.pagerduty_alert:
|
||||
api_version: v2
|
||||
integration_key: xxx
|
||||
incident_key: somekey
|
||||
state: acknowledged
|
||||
|
||||
- name: Resolve an incident based on incident_key
|
||||
community.general.pagerduty_alert:
|
||||
api_version: v2
|
||||
integration_key: xxx
|
||||
incident_key: somekey
|
||||
state: resolved
|
||||
'''
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
|
||||
@@ -175,8 +275,8 @@ def check(module, name, state, service_id, integration_key, api_key, incident_ke
|
||||
return incidents[0], False
|
||||
|
||||
|
||||
def send_event(module, service_key, event_type, desc,
|
||||
incident_key=None, client=None, client_url=None):
|
||||
def send_event_v1(module, service_key, event_type, desc,
|
||||
incident_key=None, client=None, client_url=None):
|
||||
url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
|
||||
headers = {
|
||||
"Content-type": "application/json"
|
||||
@@ -200,61 +300,127 @@ def send_event(module, service_key, event_type, desc,
|
||||
return json_out
|
||||
|
||||
|
||||
def send_event_v2(module, service_key, event_type, payload, link,
|
||||
incident_key=None, client=None, client_url=None):
|
||||
url = "https://events.pagerduty.com/v2/enqueue"
|
||||
headers = {
|
||||
"Content-type": "application/json"
|
||||
}
|
||||
data = {
|
||||
"routing_key": service_key,
|
||||
"event_action": event_type,
|
||||
"payload": payload,
|
||||
"client": client,
|
||||
"client_url": client_url,
|
||||
}
|
||||
if link:
|
||||
data["links"] = [link]
|
||||
if incident_key:
|
||||
data["dedup_key"] = incident_key
|
||||
if event_type != "trigger":
|
||||
data.pop("payload")
|
||||
response, info = fetch_url(module, url, method="post",
|
||||
headers=headers, data=json.dumps(data))
|
||||
if info["status"] != 202:
|
||||
module.fail_json(msg="failed to %s. Reason: %s" %
|
||||
(event_type, info['msg']))
|
||||
json_out = json.loads(response.read())
|
||||
return json_out, True
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=False),
|
||||
service_id=dict(required=True),
|
||||
service_key=dict(required=False, no_log=True),
|
||||
api_key=dict(required=False, no_log=True),
|
||||
integration_key=dict(required=False, no_log=True),
|
||||
api_key=dict(required=True, no_log=True),
|
||||
state=dict(required=True,
|
||||
choices=['triggered', 'acknowledged', 'resolved']),
|
||||
client=dict(required=False, default=None),
|
||||
client_url=dict(required=False, default=None),
|
||||
service_id=dict(required=False),
|
||||
service_key=dict(required=False, no_log=True),
|
||||
state=dict(
|
||||
required=True, choices=['triggered', 'acknowledged', 'resolved']
|
||||
),
|
||||
api_version=dict(type='str', default='v1', choices=['v1', 'v2']),
|
||||
client=dict(required=False),
|
||||
client_url=dict(required=False),
|
||||
component=dict(required=False),
|
||||
custom_details=dict(required=False, type='dict'),
|
||||
desc=dict(required=False, default='Created via Ansible'),
|
||||
incident_key=dict(required=False, default=None, no_log=False)
|
||||
incident_class=dict(required=False),
|
||||
incident_key=dict(required=False, no_log=False),
|
||||
link_url=dict(required=False),
|
||||
link_text=dict(required=False),
|
||||
source=dict(required=False),
|
||||
severity=dict(
|
||||
default='critical', choices=['critical', 'warning', 'error', 'info']
|
||||
),
|
||||
),
|
||||
supports_check_mode=True
|
||||
required_if=[
|
||||
('api_version', 'v1', ['service_id', 'api_key']),
|
||||
('state', 'acknowledged', ['incident_key']),
|
||||
('state', 'resolved', ['incident_key']),
|
||||
],
|
||||
required_one_of=[('service_key', 'integration_key')],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
service_id = module.params['service_id']
|
||||
integration_key = module.params['integration_key']
|
||||
service_key = module.params['service_key']
|
||||
api_key = module.params['api_key']
|
||||
state = module.params['state']
|
||||
client = module.params['client']
|
||||
client_url = module.params['client_url']
|
||||
desc = module.params['desc']
|
||||
incident_key = module.params['incident_key']
|
||||
|
||||
service_id = module.params.get('service_id')
|
||||
integration_key = module.params.get('integration_key')
|
||||
service_key = module.params.get('service_key')
|
||||
api_key = module.params.get('api_key')
|
||||
state = module.params.get('state')
|
||||
client = module.params.get('client')
|
||||
client_url = module.params.get('client_url')
|
||||
desc = module.params.get('desc')
|
||||
incident_key = module.params.get('incident_key')
|
||||
payload = {
|
||||
'summary': desc,
|
||||
'source': module.params.get('source'),
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'severity': module.params.get('severity'),
|
||||
'component': module.params.get('component'),
|
||||
'class': module.params.get('incident_class'),
|
||||
'custom_details': module.params.get('custom_details'),
|
||||
}
|
||||
link = {}
|
||||
if module.params.get('link_url'):
|
||||
link['href'] = module.params.get('link_url')
|
||||
if module.params.get('link_text'):
|
||||
link['text'] = module.params.get('link_text')
|
||||
if integration_key is None:
|
||||
if service_key is not None:
|
||||
integration_key = service_key
|
||||
module.warn('"service_key" is obsolete parameter and will be removed.'
|
||||
' Please, use "integration_key" instead')
|
||||
else:
|
||||
module.fail_json(msg="'integration_key' is required parameter")
|
||||
integration_key = service_key
|
||||
module.warn(
|
||||
'"service_key" is obsolete parameter and will be removed.'
|
||||
' Please, use "integration_key" instead'
|
||||
)
|
||||
|
||||
state_event_dict = {
|
||||
'triggered': 'trigger',
|
||||
'acknowledged': 'acknowledge',
|
||||
'resolved': 'resolve'
|
||||
'resolved': 'resolve',
|
||||
}
|
||||
|
||||
event_type = state_event_dict[state]
|
||||
|
||||
if event_type != 'trigger' and incident_key is None:
|
||||
module.fail_json(msg="incident_key is required for "
|
||||
"acknowledge or resolve events")
|
||||
|
||||
out, changed = check(module, name, state, service_id,
|
||||
integration_key, api_key, incident_key)
|
||||
|
||||
if not module.check_mode and changed is True:
|
||||
out = send_event(module, integration_key, event_type, desc,
|
||||
incident_key, client, client_url)
|
||||
if module.params.get('api_version') == 'v1':
|
||||
out, changed = check(module, name, state, service_id,
|
||||
integration_key, api_key, incident_key)
|
||||
if not module.check_mode and changed is True:
|
||||
out = send_event_v1(module, integration_key, event_type, desc,
|
||||
incident_key, client, client_url)
|
||||
else:
|
||||
changed = True
|
||||
if event_type == 'trigger' and not payload['source']:
|
||||
module.fail_json(msg='"service" is a required variable for v2 api endpoint.')
|
||||
out, changed = send_event_v2(
|
||||
module,
|
||||
integration_key,
|
||||
event_type,
|
||||
payload,
|
||||
link,
|
||||
incident_key,
|
||||
client,
|
||||
client_url,
|
||||
)
|
||||
|
||||
module.exit_json(result=out, changed=changed)
|
||||
|
||||
|
||||
@@ -569,8 +569,18 @@ def parted(script, device, align):
|
||||
if align == 'undefined':
|
||||
align_option = ''
|
||||
|
||||
"""
|
||||
Use option --fix (-f) if available. Versions prior
|
||||
to 3.4.64 don't have it. For more information see:
|
||||
http://savannah.gnu.org/news/?id=10114
|
||||
"""
|
||||
if parted_version() >= (3, 4, 64):
|
||||
script_option = '-s -f'
|
||||
else:
|
||||
script_option = '-s'
|
||||
|
||||
if script and not module.check_mode:
|
||||
command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
|
||||
command = "%s %s -m %s %s -- %s" % (parted_exec, script_option, align_option, device, script)
|
||||
rc, out, err = module.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
|
||||
462
plugins/modules/pnpm.py
Normal file
462
plugins/modules/pnpm.py
Normal file
@@ -0,0 +1,462 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2023 Aritra Sen <aretrosen@proton.me>
|
||||
# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: pnpm
|
||||
short_description: Manage node.js packages with pnpm
|
||||
version_added: 7.4.0
|
||||
description:
|
||||
- Manage node.js packages with the L(pnpm package manager, https://pnpm.io/).
|
||||
author:
|
||||
- "Aritra Sen (@aretrosen)"
|
||||
- "Chris Hoffman (@chrishoffman), creator of NPM Ansible module"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of a node.js library to install.
|
||||
- All packages in package.json are installed if not provided.
|
||||
type: str
|
||||
required: false
|
||||
alias:
|
||||
description:
|
||||
- Alias of the node.js library.
|
||||
type: str
|
||||
required: false
|
||||
path:
|
||||
description:
|
||||
- The base path to install the node.js libraries.
|
||||
type: path
|
||||
required: false
|
||||
version:
|
||||
description:
|
||||
- The version of the library to be installed, in semver format.
|
||||
type: str
|
||||
required: false
|
||||
global:
|
||||
description:
|
||||
- Install the node.js library globally.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
executable:
|
||||
description:
|
||||
- The executable location for pnpm.
|
||||
- The default location it searches for is E(PATH), fails if not set.
|
||||
type: path
|
||||
required: false
|
||||
ignore_scripts:
|
||||
description:
|
||||
- Use the C(--ignore-scripts) flag when installing.
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
no_optional:
|
||||
description:
|
||||
- Do not install optional packages, equivalent to C(--no-optional).
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
production:
|
||||
description:
|
||||
- Install dependencies in production mode.
|
||||
- Pnpm will ignore any dependencies under C(devDependencies) in package.json.
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
dev:
|
||||
description:
|
||||
- Install dependencies in development mode.
|
||||
- Pnpm will ignore any regular dependencies in C(package.json).
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
optional:
|
||||
description:
|
||||
- Install dependencies in optional mode.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Installation state of the named node.js library.
|
||||
- If V(absent) is selected, a name option must be provided.
|
||||
type: str
|
||||
required: false
|
||||
default: present
|
||||
choices: ["present", "absent", "latest"]
|
||||
requirements:
|
||||
- Pnpm executable present in E(PATH).
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Install "tailwindcss" node.js package.
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
path: /app/location
|
||||
|
||||
- name: Install "tailwindcss" node.js package on version 3.3.2
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
version: 3.3.2
|
||||
path: /app/location
|
||||
|
||||
- name: Install "tailwindcss" node.js package globally.
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
global: true
|
||||
|
||||
- name: Install "tailwindcss" node.js package as dev dependency.
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
path: /app/location
|
||||
dev: true
|
||||
|
||||
- name: Install "tailwindcss" node.js package as optional dependency.
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
path: /app/location
|
||||
optional: true
|
||||
|
||||
- name: Install "tailwindcss" node.js package version 0.1.3 as tailwind-1
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
alias: tailwind-1
|
||||
version: 0.1.3
|
||||
path: /app/location
|
||||
|
||||
- name: Remove the globally-installed package "tailwindcss".
|
||||
community.general.pnpm:
|
||||
name: tailwindcss
|
||||
global: true
|
||||
state: absent
|
||||
|
||||
- name: Install packages based on package.json.
|
||||
community.general.pnpm:
|
||||
path: /app/location
|
||||
|
||||
- name: Update all packages in package.json to their latest version.
|
||||
community.general.pnpm:
|
||||
path: /app/location
|
||||
state: latest
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
class Pnpm(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs["name"]
|
||||
self.alias = kwargs["alias"]
|
||||
self.version = kwargs["version"]
|
||||
self.path = kwargs["path"]
|
||||
self.globally = kwargs["globally"]
|
||||
self.executable = kwargs["executable"]
|
||||
self.ignore_scripts = kwargs["ignore_scripts"]
|
||||
self.no_optional = kwargs["no_optional"]
|
||||
self.production = kwargs["production"]
|
||||
self.dev = kwargs["dev"]
|
||||
self.optional = kwargs["optional"]
|
||||
|
||||
self.alias_name_ver = None
|
||||
|
||||
if self.alias is not None:
|
||||
self.alias_name_ver = self.alias + "@npm:"
|
||||
|
||||
if self.name is not None:
|
||||
self.alias_name_ver = (self.alias_name_ver or "") + self.name
|
||||
if self.version is not None:
|
||||
self.alias_name_ver = self.alias_name_ver + "@" + str(self.version)
|
||||
else:
|
||||
self.alias_name_ver = self.alias_name_ver + "@latest"
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False, check_rc=True):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = self.executable + args
|
||||
|
||||
if self.globally:
|
||||
cmd.append("-g")
|
||||
|
||||
if self.ignore_scripts:
|
||||
cmd.append("--ignore-scripts")
|
||||
|
||||
if self.no_optional:
|
||||
cmd.append("--no-optional")
|
||||
|
||||
if self.production:
|
||||
cmd.append("-P")
|
||||
|
||||
if self.dev:
|
||||
cmd.append("-D")
|
||||
|
||||
if self.name and self.optional:
|
||||
cmd.append("-O")
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
if self.path:
|
||||
if not os.path.exists(self.path):
|
||||
os.makedirs(self.path)
|
||||
|
||||
if not os.path.isdir(self.path):
|
||||
self.module.fail_json(msg="Path %s is not a directory" % self.path)
|
||||
|
||||
if not self.alias_name_ver and not os.path.isfile(
|
||||
os.path.join(self.path, "package.json")
|
||||
):
|
||||
self.module.fail_json(
|
||||
msg="package.json does not exist in provided path"
|
||||
)
|
||||
|
||||
cwd = self.path
|
||||
|
||||
_rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
|
||||
return out, err
|
||||
|
||||
return None, None
|
||||
|
||||
def missing(self):
|
||||
if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")):
|
||||
return True
|
||||
|
||||
cmd = ["list", "--json"]
|
||||
|
||||
if self.name is not None:
|
||||
cmd.append(self.name)
|
||||
|
||||
try:
|
||||
out, err = self._exec(cmd, True, False)
|
||||
if err is not None and err != "":
|
||||
raise Exception(out)
|
||||
|
||||
data = json.loads(out)
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to parse pnpm output with error %s" % to_native(e)
|
||||
)
|
||||
|
||||
if "error" in data:
|
||||
return True
|
||||
|
||||
data = data[0]
|
||||
|
||||
for typedep in [
|
||||
"dependencies",
|
||||
"devDependencies",
|
||||
"optionalDependencies",
|
||||
"unsavedDependencies",
|
||||
]:
|
||||
if typedep not in data:
|
||||
continue
|
||||
|
||||
for dep, prop in data[typedep].items():
|
||||
if self.alias is not None and self.alias != dep:
|
||||
continue
|
||||
|
||||
name = prop["from"] if self.alias is not None else dep
|
||||
if self.name != name:
|
||||
continue
|
||||
|
||||
if self.version is None or self.version == prop["version"]:
|
||||
return False
|
||||
|
||||
break
|
||||
|
||||
return True
|
||||
|
||||
def install(self):
|
||||
if self.alias_name_ver is not None:
|
||||
return self._exec(["add", self.alias_name_ver])
|
||||
return self._exec(["install"])
|
||||
|
||||
def update(self):
|
||||
return self._exec(["update", "--latest"])
|
||||
|
||||
def uninstall(self):
|
||||
if self.alias is not None:
|
||||
return self._exec(["remove", self.alias])
|
||||
return self._exec(["remove", self.name])
|
||||
|
||||
def list_outdated(self):
|
||||
if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")):
|
||||
return list()
|
||||
|
||||
cmd = ["outdated", "--format", "json"]
|
||||
try:
|
||||
out, err = self._exec(cmd, True, False)
|
||||
|
||||
# BUG: It will not show correct error sometimes, like when it has
|
||||
# plain text output intermingled with a {}
|
||||
if err is not None and err != "":
|
||||
raise Exception(out)
|
||||
|
||||
# HACK: To fix the above bug, the following hack is implemented
|
||||
data_lines = out.splitlines(True)
|
||||
|
||||
out = None
|
||||
for line in data_lines:
|
||||
if len(line) > 0 and line[0] == "{":
|
||||
out = line
|
||||
continue
|
||||
|
||||
if len(line) > 0 and line[0] == "}":
|
||||
out += line
|
||||
break
|
||||
|
||||
if out is not None:
|
||||
out += line
|
||||
|
||||
data = json.loads(out)
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to parse pnpm output with error %s" % to_native(e)
|
||||
)
|
||||
|
||||
return data.keys()
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(default=None),
|
||||
alias=dict(default=None),
|
||||
path=dict(default=None, type="path"),
|
||||
version=dict(default=None),
|
||||
executable=dict(default=None, type="path"),
|
||||
ignore_scripts=dict(default=False, type="bool"),
|
||||
no_optional=dict(default=False, type="bool"),
|
||||
production=dict(default=False, type="bool"),
|
||||
dev=dict(default=False, type="bool"),
|
||||
optional=dict(default=False, type="bool"),
|
||||
state=dict(default="present", choices=["present", "absent", "latest"]),
|
||||
)
|
||||
arg_spec["global"] = dict(default=False, type="bool")
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
||||
name = module.params["name"]
|
||||
alias = module.params["alias"]
|
||||
path = module.params["path"]
|
||||
version = module.params["version"]
|
||||
globally = module.params["global"]
|
||||
ignore_scripts = module.params["ignore_scripts"]
|
||||
no_optional = module.params["no_optional"]
|
||||
production = module.params["production"]
|
||||
dev = module.params["dev"]
|
||||
optional = module.params["optional"]
|
||||
state = module.params["state"]
|
||||
|
||||
if module.params["executable"]:
|
||||
executable = module.params["executable"].split(" ")
|
||||
else:
|
||||
executable = [module.get_bin_path("pnpm", True)]
|
||||
|
||||
if name is None and version is not None:
|
||||
module.fail_json(msg="version is meaningless when name is not provided")
|
||||
|
||||
if name is None and alias is not None:
|
||||
module.fail_json(msg="alias is meaningless when name is not provided")
|
||||
|
||||
if path is None and not globally:
|
||||
module.fail_json(msg="path must be specified when not using global")
|
||||
elif path is not None and globally:
|
||||
module.fail_json(msg="Cannot specify path when doing global installation")
|
||||
|
||||
if globally and (production or dev or optional):
|
||||
module.fail_json(
|
||||
msg="Options production, dev, and optional is meaningless when installing packages globally"
|
||||
)
|
||||
|
||||
if name is not None and path is not None and globally:
|
||||
module.fail_json(msg="path should not be mentioned when installing globally")
|
||||
|
||||
if production and dev and optional:
|
||||
module.fail_json(
|
||||
msg="Options production and dev and optional don't go together"
|
||||
)
|
||||
|
||||
if production and dev:
|
||||
module.fail_json(msg="Options production and dev don't go together")
|
||||
|
||||
if production and optional:
|
||||
module.fail_json(msg="Options production and optional don't go together")
|
||||
|
||||
if dev and optional:
|
||||
module.fail_json(msg="Options dev and optional don't go together")
|
||||
|
||||
if name is not None and name[0:4] == "http" and version is not None:
|
||||
module.fail_json(msg="Semver not supported on remote url downloads")
|
||||
|
||||
if name is None and optional:
|
||||
module.fail_json(
|
||||
msg="Optional not available when package name not provided, use no_optional instead"
|
||||
)
|
||||
|
||||
if state == "absent" and name is None:
|
||||
module.fail_json(msg="Package name is required for uninstalling")
|
||||
|
||||
if globally:
|
||||
_rc, out, _err = module.run_command(executable + ["root", "-g"], check_rc=True)
|
||||
path, _tail = os.path.split(out.strip())
|
||||
|
||||
pnpm = Pnpm(
|
||||
module,
|
||||
name=name,
|
||||
alias=alias,
|
||||
path=path,
|
||||
version=version,
|
||||
globally=globally,
|
||||
executable=executable,
|
||||
ignore_scripts=ignore_scripts,
|
||||
no_optional=no_optional,
|
||||
production=production,
|
||||
dev=dev,
|
||||
optional=optional,
|
||||
)
|
||||
|
||||
changed = False
|
||||
out = ""
|
||||
err = ""
|
||||
if state == "present":
|
||||
if pnpm.missing():
|
||||
changed = True
|
||||
out, err = pnpm.install()
|
||||
elif state == "latest":
|
||||
outdated = pnpm.list_outdated()
|
||||
if name is not None:
|
||||
if pnpm.missing() or name in outdated:
|
||||
changed = True
|
||||
out, err = pnpm.install()
|
||||
elif len(outdated):
|
||||
changed = True
|
||||
out, err = pnpm.update()
|
||||
else: # absent
|
||||
if not pnpm.missing():
|
||||
changed = True
|
||||
out, err = pnpm.uninstall()
|
||||
|
||||
module.exit_json(changed=changed, out=out, err=err)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -546,7 +546,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
self.module.fail_json(vmid=vmid, node=node, msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
@@ -559,7 +559,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
|
||||
self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
@@ -575,7 +575,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
|
||||
self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
@@ -588,7 +588,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
|
||||
self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
@@ -695,20 +695,20 @@ def main():
|
||||
if state == 'present' and clone is None:
|
||||
try:
|
||||
if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if (not module.params['vmid'] and
|
||||
proxmox.get_vmid(hostname, ignore_missing=True) and
|
||||
not module.params['force']):
|
||||
vmid = proxmox.get_vmid(hostname)
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
|
||||
elif not proxmox.get_node(node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node)
|
||||
elif not proxmox.content_check(node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
try:
|
||||
proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
|
||||
@@ -733,43 +733,43 @@ def main():
|
||||
timezone=module.params['timezone'],
|
||||
tags=module.params['tags'])
|
||||
|
||||
module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
# Clone a container
|
||||
elif state == 'present' and clone is not None:
|
||||
try:
|
||||
if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if (not module.params['vmid'] and
|
||||
proxmox.get_vmid(hostname, ignore_missing=True) and
|
||||
not module.params['force']):
|
||||
vmid = proxmox.get_vmid(hostname)
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
|
||||
if not proxmox.get_vm(clone, ignore_missing=True):
|
||||
module.exit_json(changed=False, msg="Container to be cloned does not exist")
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist")
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
try:
|
||||
proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
|
||||
|
||||
module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = proxmox.get_vm(vmid)
|
||||
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid)
|
||||
|
||||
if proxmox.start_instance(vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
@@ -778,18 +778,18 @@ def main():
|
||||
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if proxmox.umount_instance(vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid,
|
||||
msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid)
|
||||
|
||||
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
@@ -797,28 +797,28 @@ def main():
|
||||
|
||||
vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
|
||||
if vm_status in ['stopped', 'mounted']:
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid)
|
||||
|
||||
if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and
|
||||
proxmox.start_instance(vm, vmid, timeout)):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'absent':
|
||||
if not vmid:
|
||||
module.exit_json(changed=False, msg='VM with hostname = %s is already absent' % hostname)
|
||||
module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname)
|
||||
try:
|
||||
vm = proxmox.get_vm(vmid, ignore_missing=True)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid)
|
||||
|
||||
vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
|
||||
if vm_status == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if vm_status == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
delete_params = {}
|
||||
|
||||
@@ -829,15 +829,15 @@ def main():
|
||||
|
||||
while timeout:
|
||||
if proxmox.api_task_ok(vm['node'], taskid):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid)
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
|
||||
module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -64,6 +64,7 @@ options:
|
||||
boot:
|
||||
description:
|
||||
- Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n).
|
||||
- For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0).
|
||||
- You can combine to set order.
|
||||
- This option has no default unless O(proxmox_default_behavior) is set to V(compatiblity); then the default is V(cnd).
|
||||
type: str
|
||||
@@ -372,7 +373,7 @@ options:
|
||||
scsi:
|
||||
description:
|
||||
- A hash/dictionary of volume used as SCSI hard disk or CD-ROM. O(scsi='{"key":"value", "key":"value"}').
|
||||
- Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
|
||||
- Keys allowed are - C(scsi[n]) where 0 ≤ n ≤ 13.
|
||||
- Values allowed are - C("storage:size,format=value").
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
@@ -414,6 +415,14 @@ options:
|
||||
smbios:
|
||||
description:
|
||||
- Specifies SMBIOS type 1 fields.
|
||||
- "Comma separated, Base64 encoded (optional) SMBIOS properties:"
|
||||
- V([base64=<1|0>] [,family=<Base64 encoded string>])
|
||||
- V([,manufacturer=<Base64 encoded string>])
|
||||
- V([,product=<Base64 encoded string>])
|
||||
- V([,serial=<Base64 encoded string>])
|
||||
- V([,sku=<Base64 encoded string>])
|
||||
- V([,uuid=<UUID>])
|
||||
- V([,version=<Base64 encoded string>])
|
||||
type: str
|
||||
snapname:
|
||||
description:
|
||||
@@ -523,7 +532,7 @@ options:
|
||||
virtio:
|
||||
description:
|
||||
- A hash/dictionary of volume used as VIRTIO hard disk. O(virtio='{"key":"value", "key":"value"}').
|
||||
- Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
|
||||
- Keys allowed are - C(virtio[n]) where 0 ≤ n ≤ 15.
|
||||
- Values allowed are - C("storage:size,format=value").
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
@@ -1111,11 +1120,11 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||
return False
|
||||
return True
|
||||
|
||||
def restart_vm(self, vm, **status):
|
||||
def restart_vm(self, vm, force, **status):
|
||||
vmid = vm['vmid']
|
||||
try:
|
||||
proxmox_node = self.proxmox_api.nodes(vm['node'])
|
||||
taskid = proxmox_node.qemu(vmid).status.reboot.post()
|
||||
taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post()
|
||||
if not self.wait_for_task(vm['node'], taskid):
|
||||
self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
@@ -1493,7 +1502,7 @@ def main():
|
||||
if vm['status'] == 'stopped':
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
|
||||
|
||||
if proxmox.restart_vm(vm):
|
||||
if proxmox.restart_vm(vm, force=module.params['force']):
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
|
||||
|
||||
elif state == 'absent':
|
||||
|
||||
@@ -193,14 +193,14 @@ class ProxmoxUser:
|
||||
self.user[k] = v
|
||||
elif k in ['groups', 'tokens'] and (v == '' or v is None):
|
||||
self.user[k] = []
|
||||
elif k == 'groups' and type(v) == str:
|
||||
elif k == 'groups' and isinstance(v, str):
|
||||
self.user['groups'] = v.split(',')
|
||||
elif k == 'tokens' and type(v) == list:
|
||||
elif k == 'tokens' and isinstance(v, list):
|
||||
for token in v:
|
||||
if 'privsep' in token:
|
||||
token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
|
||||
self.user['tokens'] = v
|
||||
elif k == 'tokens' and type(v) == dict:
|
||||
elif k == 'tokens' and isinstance(v, dict):
|
||||
self.user['tokens'] = list()
|
||||
for tokenid, tokenvalues in v.items():
|
||||
t = tokenvalues
|
||||
|
||||
@@ -20,8 +20,7 @@ author: 'Sergei Antipov (@UnderGreen) <greendayonfire at gmail dot com>'
|
||||
options:
|
||||
node:
|
||||
description:
|
||||
- Node where to get virtual machines info.
|
||||
required: true
|
||||
- Restrict results to a specific Proxmox VE node.
|
||||
type: str
|
||||
type:
|
||||
description:
|
||||
@@ -35,11 +34,12 @@ options:
|
||||
vmid:
|
||||
description:
|
||||
- Restrict results to a specific virtual machine by using its ID.
|
||||
- If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- Restrict results to a specific virtual machine by using its name.
|
||||
- If multiple virtual machines have the same name then vmid must be used instead.
|
||||
- Restrict results to a specific virtual machine(s) by using their name.
|
||||
- If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
@@ -97,14 +97,18 @@ proxmox_vms:
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "qemu/100",
|
||||
"maxcpu": 1,
|
||||
"maxdisk": 34359738368,
|
||||
"maxmem": 4294967296,
|
||||
"mem": 35158379,
|
||||
"name": "pxe.home.arpa",
|
||||
"netin": 99715803,
|
||||
"netout": 14237835,
|
||||
"node": "pve",
|
||||
"pid": 1947197,
|
||||
"status": "running",
|
||||
"template": False,
|
||||
"type": "qemu",
|
||||
"uptime": 135530,
|
||||
"vmid": 100
|
||||
@@ -115,13 +119,17 @@ proxmox_vms:
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "qemu/101",
|
||||
"maxcpu": 1,
|
||||
"maxdisk": 0,
|
||||
"maxmem": 536870912,
|
||||
"mem": 0,
|
||||
"name": "test1",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"node": "pve",
|
||||
"status": "stopped",
|
||||
"template": False,
|
||||
"type": "qemu",
|
||||
"uptime": 0,
|
||||
"vmid": 101
|
||||
@@ -133,30 +141,55 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec,
|
||||
ProxmoxAnsible,
|
||||
proxmox_to_ansible_bool,
|
||||
)
|
||||
|
||||
|
||||
class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
||||
def get_qemu_vms(self, node, vmid=None):
|
||||
def get_vms_from_cluster_resources(self):
|
||||
try:
|
||||
vms = self.proxmox_api.nodes(node).qemu().get()
|
||||
for vm in vms:
|
||||
vm["vmid"] = int(vm["vmid"])
|
||||
vm["type"] = "qemu"
|
||||
if vmid is None:
|
||||
return vms
|
||||
return [vm for vm in vms if vm["vmid"] == vmid]
|
||||
return self.proxmox_api.cluster().resources().get(type="vm")
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to retrieve VMs information from cluster resources: %s" % e
|
||||
)
|
||||
|
||||
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, name=None, node=None):
|
||||
vms = []
|
||||
for vm in vms_unfiltered:
|
||||
if (
|
||||
type != vm["type"]
|
||||
or (node and vm["node"] != node)
|
||||
or (vmid and int(vm["vmid"]) != vmid)
|
||||
or (name is not None and vm["name"] != name)
|
||||
):
|
||||
continue
|
||||
vms.append(vm)
|
||||
nodes = frozenset([vm["node"] for vm in vms])
|
||||
for node in nodes:
|
||||
if type == "qemu":
|
||||
vms_from_nodes = self.proxmox_api.nodes(node).qemu().get()
|
||||
else:
|
||||
vms_from_nodes = self.proxmox_api.nodes(node).lxc().get()
|
||||
for vmn in vms_from_nodes:
|
||||
for vm in vms:
|
||||
if int(vm["vmid"]) == int(vmn["vmid"]):
|
||||
vm.update(vmn)
|
||||
vm["vmid"] = int(vm["vmid"])
|
||||
vm["template"] = proxmox_to_ansible_bool(vm["template"])
|
||||
break
|
||||
|
||||
return vms
|
||||
|
||||
def get_qemu_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
try:
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, name, node)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
|
||||
|
||||
def get_lxc_vms(self, node, vmid=None):
|
||||
def get_lxc_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
try:
|
||||
vms = self.proxmox_api.nodes(node).lxc().get()
|
||||
for vm in vms:
|
||||
vm["vmid"] = int(vm["vmid"])
|
||||
if vmid is None:
|
||||
return vms
|
||||
return [vm for vm in vms if vm["vmid"] == vmid]
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, name, node)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
|
||||
|
||||
@@ -164,7 +197,7 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
vm_info_args = dict(
|
||||
node=dict(type="str", required=True),
|
||||
node=dict(type="str", required=False),
|
||||
type=dict(
|
||||
type="str", choices=["lxc", "qemu", "all"], default="all", required=False
|
||||
),
|
||||
@@ -188,28 +221,26 @@ def main():
|
||||
|
||||
result = dict(changed=False)
|
||||
|
||||
if proxmox.get_node(node) is None:
|
||||
if node and proxmox.get_node(node) is None:
|
||||
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
|
||||
|
||||
if not vmid and name:
|
||||
vmid = int(proxmox.get_vmid(name, ignore_missing=False))
|
||||
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
|
||||
vms = []
|
||||
|
||||
vms = None
|
||||
if type == "lxc":
|
||||
vms = proxmox.get_lxc_vms(node, vmid=vmid)
|
||||
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
elif type == "qemu":
|
||||
vms = proxmox.get_qemu_vms(node, vmid=vmid)
|
||||
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, name, node)
|
||||
else:
|
||||
vms = proxmox.get_qemu_vms(node, vmid=vmid) + proxmox.get_lxc_vms(
|
||||
node, vmid=vmid
|
||||
)
|
||||
vms = proxmox.get_qemu_vms(
|
||||
vms_cluster_resources,
|
||||
vmid,
|
||||
name,
|
||||
node,
|
||||
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
|
||||
if vms or vmid is None:
|
||||
result["proxmox_vms"] = vms
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
|
||||
module.fail_json(**result)
|
||||
result["proxmox_vms"] = vms
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -109,7 +109,8 @@ options:
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for HTTP requests to OOB controller.
|
||||
default: 10
|
||||
- The default value for this param is C(10) but that is being deprecated
|
||||
and it will be replaced with C(60) in community.general 9.0.0.
|
||||
type: int
|
||||
boot_override_mode:
|
||||
description:
|
||||
@@ -202,6 +203,12 @@ options:
|
||||
- InMaintenanceWindowOnReset
|
||||
- OnStartUpdateRequest
|
||||
version_added: '6.1.0'
|
||||
update_oem_params:
|
||||
required: false
|
||||
description:
|
||||
- Properties for HTTP Multipart Push Updates.
|
||||
type: dict
|
||||
version_added: '7.5.0'
|
||||
update_handle:
|
||||
required: false
|
||||
description:
|
||||
@@ -600,6 +607,8 @@ EXAMPLES = '''
|
||||
update_image_file: ~/images/myupdate.img
|
||||
update_targets:
|
||||
- /redfish/v1/UpdateService/FirmwareInventory/BMC
|
||||
update_oem_params:
|
||||
PreserveConfiguration: false
|
||||
|
||||
- name: Perform requested operations to continue the update
|
||||
community.general.redfish_command:
|
||||
@@ -747,7 +756,7 @@ from ansible.module_utils.common.text.converters import to_native
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
|
||||
"PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride",
|
||||
"PowerGracefulShutdown", "PowerReboot", "PowerCycle", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride",
|
||||
"IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", "VirtualMediaEject", "VerifyBiosAttributes"],
|
||||
"Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
|
||||
"Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
|
||||
@@ -782,7 +791,7 @@ def main():
|
||||
update_username=dict(type='str', aliases=["account_updatename"]),
|
||||
account_properties=dict(type='dict', default={}),
|
||||
bootdevice=dict(),
|
||||
timeout=dict(type='int', default=10),
|
||||
timeout=dict(type='int'),
|
||||
uefi_target=dict(),
|
||||
boot_next=dict(),
|
||||
boot_override_mode=dict(choices=['Legacy', 'UEFI']),
|
||||
@@ -791,6 +800,7 @@ def main():
|
||||
update_image_file=dict(type='path'),
|
||||
update_protocol=dict(),
|
||||
update_targets=dict(type='list', elements='str', default=[]),
|
||||
update_oem_params=dict(type='dict'),
|
||||
update_creds=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
@@ -829,6 +839,16 @@ def main():
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if module.params['timeout'] is None:
|
||||
timeout = 10
|
||||
module.deprecate(
|
||||
'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
|
||||
10, 60
|
||||
),
|
||||
version='9.0.0',
|
||||
collection_name='community.general'
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
@@ -863,6 +883,7 @@ def main():
|
||||
'update_targets': module.params['update_targets'],
|
||||
'update_creds': module.params['update_creds'],
|
||||
'update_apply_time': module.params['update_apply_time'],
|
||||
'update_oem_params': module.params['update_oem_params'],
|
||||
'update_handle': module.params['update_handle'],
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,8 @@ options:
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for HTTP requests to OOB controller.
|
||||
default: 10
|
||||
- The default value for this param is C(10) but that is being deprecated
|
||||
and it will be replaced with C(60) in community.general 9.0.0.
|
||||
type: int
|
||||
boot_order:
|
||||
required: false
|
||||
@@ -130,7 +131,35 @@ options:
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '5.7.0'
|
||||
|
||||
storage_subsystem_id:
|
||||
required: false
|
||||
description:
|
||||
- Id of the Storage Subsystem on which the volume is to be created.
|
||||
type: str
|
||||
default: ''
|
||||
version_added: '7.3.0'
|
||||
volume_ids:
|
||||
required: false
|
||||
description:
|
||||
- List of IDs of volumes to be deleted.
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
version_added: '7.3.0'
|
||||
secure_boot_enable:
|
||||
required: false
|
||||
description:
|
||||
- Setting parameter to enable or disable SecureBoot.
|
||||
type: bool
|
||||
default: True
|
||||
version_added: '7.5.0'
|
||||
volume_details:
|
||||
required: false
|
||||
description:
|
||||
- Setting dict of volume to be created.
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '7.5.0'
|
||||
author:
|
||||
- "Jose Delarosa (@jose-delarosa)"
|
||||
- "T S Kushal (@TSKushal)"
|
||||
@@ -272,6 +301,39 @@ EXAMPLES = '''
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set SecureBoot
|
||||
community.general.redfish_config:
|
||||
category: Systems
|
||||
command: SetSecureBoot
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
secure_boot_enable: True
|
||||
|
||||
- name: Delete All Volumes
|
||||
community.general.redfish_config:
|
||||
category: Systems
|
||||
command: DeleteVolumes
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
storage_subsystem_id: "DExxxxxx"
|
||||
volume_ids: ["volume1", "volume2"]
|
||||
|
||||
- name: Create Volume
|
||||
community.general.redfish_config:
|
||||
category: Systems
|
||||
command: CreateVolume
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
storage_subsystem_id: "DExxxxxx"
|
||||
volume_details:
|
||||
Name: "MR Volume"
|
||||
RAIDType: "RAID0"
|
||||
Drives:
|
||||
- "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -290,7 +352,7 @@ from ansible.module_utils.common.text.converters import to_native
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
|
||||
"SetDefaultBootOrder", "EnableSecureBoot"],
|
||||
"SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume"],
|
||||
"Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"],
|
||||
"Sessions": ["SetSessionService"],
|
||||
}
|
||||
@@ -307,7 +369,7 @@ def main():
|
||||
password=dict(no_log=True),
|
||||
auth_token=dict(no_log=True),
|
||||
bios_attributes=dict(type='dict', default={}),
|
||||
timeout=dict(type='int', default=10),
|
||||
timeout=dict(type='int'),
|
||||
boot_order=dict(type='list', elements='str', default=[]),
|
||||
network_protocols=dict(
|
||||
type='dict',
|
||||
@@ -323,6 +385,10 @@ def main():
|
||||
hostinterface_config=dict(type='dict', default={}),
|
||||
hostinterface_id=dict(),
|
||||
sessions_config=dict(type='dict', default={}),
|
||||
storage_subsystem_id=dict(type='str', default=''),
|
||||
volume_ids=dict(type='list', default=[], elements='str'),
|
||||
secure_boot_enable=dict(type='bool', default=True),
|
||||
volume_details=dict(type='dict', default={})
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -336,6 +402,16 @@ def main():
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if module.params['timeout'] is None:
|
||||
timeout = 10
|
||||
module.deprecate(
|
||||
'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
|
||||
10, 60
|
||||
),
|
||||
version='9.0.0',
|
||||
collection_name='community.general'
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
@@ -372,6 +448,17 @@ def main():
|
||||
# Sessions config options
|
||||
sessions_config = module.params['sessions_config']
|
||||
|
||||
# Volume deletion options
|
||||
storage_subsystem_id = module.params['storage_subsystem_id']
|
||||
volume_ids = module.params['volume_ids']
|
||||
|
||||
# Set SecureBoot options
|
||||
secure_boot_enable = module.params['secure_boot_enable']
|
||||
|
||||
# Volume creation options
|
||||
volume_details = module.params['volume_details']
|
||||
storage_subsystem_id = module.params['storage_subsystem_id']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
@@ -405,6 +492,12 @@ def main():
|
||||
result = rf_utils.set_default_boot_order()
|
||||
elif command == "EnableSecureBoot":
|
||||
result = rf_utils.enable_secure_boot()
|
||||
elif command == "SetSecureBoot":
|
||||
result = rf_utils.set_secure_boot(secure_boot_enable)
|
||||
elif command == "DeleteVolumes":
|
||||
result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids)
|
||||
elif command == "CreateVolume":
|
||||
result = rf_utils.create_volume(volume_details, storage_subsystem_id)
|
||||
|
||||
elif category == "Manager":
|
||||
# execute only if we find a Manager service resource
|
||||
|
||||
@@ -60,7 +60,8 @@ options:
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for HTTP requests to OOB controller.
|
||||
default: 10
|
||||
- The default value for this param is C(10) but that is being deprecated
|
||||
and it will be replaced with C(60) in community.general 9.0.0.
|
||||
type: int
|
||||
update_handle:
|
||||
required: false
|
||||
@@ -337,6 +338,14 @@ EXAMPLES = '''
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get BIOS registry
|
||||
community.general.redfish_info:
|
||||
category: Systems
|
||||
command: GetBiosRegistries
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -354,7 +363,7 @@ CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
|
||||
"GetMemoryInventory", "GetNicInventory", "GetHealthReport",
|
||||
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
|
||||
"GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia"],
|
||||
"GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries"],
|
||||
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
|
||||
"GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"],
|
||||
"Accounts": ["ListUsers"],
|
||||
@@ -386,7 +395,7 @@ def main():
|
||||
username=dict(),
|
||||
password=dict(no_log=True),
|
||||
auth_token=dict(no_log=True),
|
||||
timeout=dict(type='int', default=10),
|
||||
timeout=dict(type='int'),
|
||||
update_handle=dict(),
|
||||
),
|
||||
required_together=[
|
||||
@@ -401,6 +410,16 @@ def main():
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if module.params['timeout'] is None:
|
||||
timeout = 10
|
||||
module.deprecate(
|
||||
'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
|
||||
10, 60
|
||||
),
|
||||
version='9.0.0',
|
||||
collection_name='community.general'
|
||||
)
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password'],
|
||||
@@ -478,6 +497,8 @@ def main():
|
||||
result["health_report"] = rf_utils.get_multi_system_health_report()
|
||||
elif command == "GetVirtualMedia":
|
||||
result["virtual_media"] = rf_utils.get_multi_virtualmedia(category)
|
||||
elif command == "GetBiosRegistries":
|
||||
result["bios_registries"] = rf_utils.get_bios_registries()
|
||||
|
||||
elif category == "Chassis":
|
||||
# execute only if we find Chassis resource
|
||||
|
||||
@@ -17,30 +17,21 @@ version_added: '0.2.0'
|
||||
description:
|
||||
- Gathers information and statistics about Redis servers.
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
options:
|
||||
login_host:
|
||||
description:
|
||||
- The host running the database.
|
||||
type: str
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- The port to connect to.
|
||||
type: int
|
||||
default: 6379
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with, when authentication is enabled for the Redis server.
|
||||
type: str
|
||||
notes:
|
||||
- Requires the redis-py Python package on the remote host. You can
|
||||
install it with pip (C(pip install redis)) or with a package manager.
|
||||
U(https://github.com/andymccurdy/redis-py)
|
||||
login_user:
|
||||
version_added: 7.5.0
|
||||
validate_certs:
|
||||
version_added: 7.5.0
|
||||
tls:
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
ca_certs:
|
||||
version_added: 7.5.0
|
||||
seealso:
|
||||
- module: community.general.redis
|
||||
requirements: [ redis ]
|
||||
author: "Pavlo Bashynskyi (@levonet)"
|
||||
'''
|
||||
|
||||
@@ -199,8 +190,10 @@ except ImportError:
|
||||
REDIS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REDIS_PACKAGE = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, redis_auth_params)
|
||||
|
||||
|
||||
def redis_client(**client_params):
|
||||
@@ -210,23 +203,16 @@ def redis_client(**client_params):
|
||||
# Module execution.
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
login_host=dict(type='str', default='localhost'),
|
||||
login_port=dict(type='int', default=6379),
|
||||
login_password=dict(type='str', no_log=True),
|
||||
),
|
||||
argument_spec=redis_auth_argument_spec(tls_default=False),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_REDIS_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
|
||||
fail_imports(module, module.params['tls'])
|
||||
|
||||
login_host = module.params['login_host']
|
||||
login_port = module.params['login_port']
|
||||
login_password = module.params['login_password']
|
||||
redis_params = redis_auth_params(module)
|
||||
|
||||
# Connect and check
|
||||
client = redis_client(host=login_host, port=login_port, password=login_password)
|
||||
client = redis_client(**redis_params)
|
||||
try:
|
||||
client.ping()
|
||||
except Exception as e:
|
||||
|
||||
322
plugins/modules/simpleinit_msb.py
Normal file
322
plugins/modules/simpleinit_msb.py
Normal file
@@ -0,0 +1,322 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016-2023, Vlad Glagolev <scm@vaygr.net>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: simpleinit_msb
|
||||
short_description: Manage services on Source Mage GNU/Linux
|
||||
version_added: 7.5.0
|
||||
description:
|
||||
- Controls services on remote hosts using C(simpleinit-msb).
|
||||
notes:
|
||||
- This module needs ansible-core 2.15.5 or newer. Older versions have a broken and insufficient daemonize functionality.
|
||||
author: "Vlad Glagolev (@vaygr)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- Name of the service.
|
||||
required: true
|
||||
aliases: ['service']
|
||||
state:
|
||||
type: str
|
||||
required: false
|
||||
choices: [ running, started, stopped, restarted, reloaded ]
|
||||
description:
|
||||
- V(started)/V(stopped) are idempotent actions that will not run
|
||||
commands unless necessary. V(restarted) will always bounce the
|
||||
service. V(reloaded) will always reload.
|
||||
- At least one of O(state) and O(enabled) are required.
|
||||
- Note that V(reloaded) will start the
|
||||
service if it is not already started, even if your chosen init
|
||||
system would not normally.
|
||||
enabled:
|
||||
type: bool
|
||||
required: false
|
||||
description:
|
||||
- Whether the service should start on boot.
|
||||
- At least one of O(state) and O(enabled) are required.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Example action to start service httpd, if not running
|
||||
community.general.simpleinit_msb:
|
||||
name: httpd
|
||||
state: started
|
||||
|
||||
- name: Example action to stop service httpd, if running
|
||||
community.general.simpleinit_msb:
|
||||
name: httpd
|
||||
state: stopped
|
||||
|
||||
- name: Example action to restart service httpd, in all cases
|
||||
community.general.simpleinit_msb:
|
||||
name: httpd
|
||||
state: restarted
|
||||
|
||||
- name: Example action to reload service httpd, in all cases
|
||||
community.general.simpleinit_msb:
|
||||
name: httpd
|
||||
state: reloaded
|
||||
|
||||
- name: Example action to enable service httpd, and not touch the running state
|
||||
community.general.simpleinit_msb:
|
||||
name: httpd
|
||||
enabled: true
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.service import daemonize
|
||||
|
||||
|
||||
class SimpleinitMSB(object):
|
||||
"""
|
||||
Main simpleinit-msb service manipulation class
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.name = module.params['name']
|
||||
self.state = module.params['state']
|
||||
self.enable = module.params['enabled']
|
||||
self.changed = False
|
||||
self.running = None
|
||||
self.action = None
|
||||
self.telinit_cmd = None
|
||||
self.svc_change = False
|
||||
|
||||
def execute_command(self, cmd, daemon=False):
|
||||
if not daemon:
|
||||
return self.module.run_command(cmd)
|
||||
else:
|
||||
return daemonize(self.module, cmd)
|
||||
|
||||
def check_service_changed(self):
|
||||
if self.state and self.running is None:
|
||||
self.module.fail_json(msg="failed determining service state, possible typo of service name?")
|
||||
# Find out if state has changed
|
||||
if not self.running and self.state in ["started", "running", "reloaded"]:
|
||||
self.svc_change = True
|
||||
elif self.running and self.state in ["stopped", "reloaded"]:
|
||||
self.svc_change = True
|
||||
elif self.state == "restarted":
|
||||
self.svc_change = True
|
||||
if self.module.check_mode and self.svc_change:
|
||||
self.module.exit_json(changed=True, msg='service state changed')
|
||||
|
||||
def modify_service_state(self):
|
||||
# Only do something if state will change
|
||||
if self.svc_change:
|
||||
# Control service
|
||||
if self.state in ['started', 'running']:
|
||||
self.action = "start"
|
||||
elif not self.running and self.state == 'reloaded':
|
||||
self.action = "start"
|
||||
elif self.state == 'stopped':
|
||||
self.action = "stop"
|
||||
elif self.state == 'reloaded':
|
||||
self.action = "reload"
|
||||
elif self.state == 'restarted':
|
||||
self.action = "restart"
|
||||
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, msg='changing service state')
|
||||
|
||||
return self.service_control()
|
||||
else:
|
||||
# If nothing needs to change just say all is well
|
||||
rc = 0
|
||||
err = ''
|
||||
out = ''
|
||||
return rc, out, err
|
||||
|
||||
def get_service_tools(self):
|
||||
paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
|
||||
binaries = ['telinit']
|
||||
location = dict()
|
||||
|
||||
for binary in binaries:
|
||||
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
|
||||
|
||||
if location.get('telinit', False) and os.path.exists("/etc/init.d/smgl_init"):
|
||||
self.telinit_cmd = location['telinit']
|
||||
|
||||
if self.telinit_cmd is None:
|
||||
self.module.fail_json(msg='cannot find telinit script for simpleinit-msb, aborting...')
|
||||
|
||||
def get_service_status(self):
|
||||
self.action = "status"
|
||||
rc, status_stdout, status_stderr = self.service_control()
|
||||
|
||||
if self.running is None and status_stdout.count('\n') <= 1:
|
||||
cleanout = status_stdout.lower().replace(self.name.lower(), '')
|
||||
|
||||
if "is not running" in cleanout:
|
||||
self.running = False
|
||||
elif "is running" in cleanout:
|
||||
self.running = True
|
||||
|
||||
return self.running
|
||||
|
||||
def service_enable(self):
|
||||
# Check if the service is already enabled/disabled
|
||||
if not self.enable ^ self.service_enabled():
|
||||
return
|
||||
|
||||
action = "boot" + ("enable" if self.enable else "disable")
|
||||
|
||||
(rc, out, err) = self.execute_command("%s %s %s" % (self.telinit_cmd, action, self.name))
|
||||
|
||||
self.changed = True
|
||||
|
||||
for line in err.splitlines():
|
||||
if self.enable and line.find('already enabled') != -1:
|
||||
self.changed = False
|
||||
break
|
||||
if not self.enable and line.find('already disabled') != -1:
|
||||
self.changed = False
|
||||
break
|
||||
|
||||
if not self.changed:
|
||||
return
|
||||
|
||||
return (rc, out, err)
|
||||
|
||||
def service_enabled(self):
|
||||
self.service_exists()
|
||||
|
||||
(rc, out, err) = self.execute_command("%s %sd" % (self.telinit_cmd, self.enable))
|
||||
|
||||
service_enabled = False if self.enable else True
|
||||
|
||||
rex = re.compile(r'^%s$' % self.name)
|
||||
|
||||
for line in out.splitlines():
|
||||
if rex.match(line):
|
||||
service_enabled = True if self.enable else False
|
||||
break
|
||||
|
||||
return service_enabled
|
||||
|
||||
def service_exists(self):
|
||||
(rc, out, err) = self.execute_command("%s list" % self.telinit_cmd)
|
||||
|
||||
service_exists = False
|
||||
|
||||
rex = re.compile(r'^\w+\s+%s$' % self.name)
|
||||
|
||||
for line in out.splitlines():
|
||||
if rex.match(line):
|
||||
service_exists = True
|
||||
break
|
||||
|
||||
if not service_exists:
|
||||
self.module.fail_json(msg='telinit could not find the requested service: %s' % self.name)
|
||||
|
||||
def service_control(self):
|
||||
self.service_exists()
|
||||
|
||||
svc_cmd = "%s run %s" % (self.telinit_cmd, self.name)
|
||||
|
||||
rc_state, stdout, stderr = self.execute_command("%s %s" % (svc_cmd, self.action), daemon=True)
|
||||
|
||||
return (rc_state, stdout, stderr)
|
||||
|
||||
|
||||
def build_module():
|
||||
return AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True, aliases=['service']),
|
||||
state=dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']),
|
||||
enabled=dict(type='bool'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled']],
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module = build_module()
|
||||
|
||||
service = SimpleinitMSB(module)
|
||||
|
||||
rc = 0
|
||||
out = ''
|
||||
err = ''
|
||||
result = {}
|
||||
result['name'] = service.name
|
||||
|
||||
# Find service management tools
|
||||
service.get_service_tools()
|
||||
|
||||
# Enable/disable service startup at boot if requested
|
||||
if service.module.params['enabled'] is not None:
|
||||
service.service_enable()
|
||||
result['enabled'] = service.enable
|
||||
|
||||
if module.params['state'] is None:
|
||||
# Not changing the running state, so bail out now.
|
||||
result['changed'] = service.changed
|
||||
module.exit_json(**result)
|
||||
|
||||
result['state'] = service.state
|
||||
|
||||
service.get_service_status()
|
||||
|
||||
# Calculate if request will change service state
|
||||
service.check_service_changed()
|
||||
|
||||
# Modify service state if necessary
|
||||
(rc, out, err) = service.modify_service_state()
|
||||
|
||||
if rc != 0:
|
||||
if err:
|
||||
module.fail_json(msg=err)
|
||||
else:
|
||||
module.fail_json(msg=out)
|
||||
|
||||
result['changed'] = service.changed | service.svc_change
|
||||
if service.module.params['enabled'] is not None:
|
||||
result['enabled'] = service.module.params['enabled']
|
||||
|
||||
if not service.module.params['state']:
|
||||
status = service.get_service_status()
|
||||
if status is None:
|
||||
result['state'] = 'absent'
|
||||
elif status is False:
|
||||
result['state'] = 'started'
|
||||
else:
|
||||
result['state'] = 'stopped'
|
||||
else:
|
||||
# as we may have just bounced the service the service command may not
|
||||
# report accurate state at this moment so just show what we ran
|
||||
if service.module.params['state'] in ['started', 'restarted', 'running', 'reloaded']:
|
||||
result['state'] = 'started'
|
||||
else:
|
||||
result['state'] = 'stopped'
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -183,7 +183,6 @@ class Snap(StateModuleHelper):
|
||||
__disable_re = re.compile(r'(?:\S+\s+){5}(?P<notes>\S+)')
|
||||
__set_param_re = re.compile(r'(?P<snap_prefix>\S+:)?(?P<key>\S+)\s*=\s*(?P<value>.+)')
|
||||
__list_re = re.compile(r'^(?P<name>\S+)\s+\S+\s+\S+\s+(?P<channel>\S+)')
|
||||
__install_re = re.compile(r'(?P<name>\S+)\s.+\s(installed|refreshed)')
|
||||
module = dict(
|
||||
argument_spec={
|
||||
'name': dict(type='list', elements='str', required=True),
|
||||
@@ -209,14 +208,24 @@ class Snap(StateModuleHelper):
|
||||
# if state=present there might be file names passed in 'name', in
|
||||
# which case they must be converted to their actual snap names, which
|
||||
# is done using the names_from_snaps() method calling 'snap info'.
|
||||
self.vars.set("snapinfo_run_info", [], output=(self.verbosity >= 4))
|
||||
self.vars.set("status_run_info", [], output=(self.verbosity >= 4))
|
||||
self.vars.set("status_out", None, output=(self.verbosity >= 4))
|
||||
self.vars.set("run_info", [], output=(self.verbosity >= 4))
|
||||
|
||||
if self.vars.state == "present":
|
||||
self.vars.set("snapinfo_run_info", [], output=(self.verbosity >= 4))
|
||||
self.vars.set("snap_names", self.names_from_snaps(self.vars.name))
|
||||
status_var = "snap_names"
|
||||
else:
|
||||
status_var = "name"
|
||||
self.vars.set("snap_status", self.snap_status(self.vars[status_var], self.vars.channel), output=False)
|
||||
self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False)
|
||||
self.vars.set("status_var", status_var, output=False)
|
||||
self.vars.set("snap_status", self.snap_status(self.vars[self.vars.status_var], self.vars.channel), output=False, change=True)
|
||||
self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False, change=True)
|
||||
|
||||
def __quit_module__(self):
|
||||
self.vars.snap_status = self.snap_status(self.vars[self.vars.status_var], self.vars.channel)
|
||||
if self.vars.channel is None:
|
||||
self.vars.channel = "stable"
|
||||
|
||||
def _run_multiple_commands(self, commands, actionable_names, bundle=True, refresh=False):
|
||||
results_cmd = []
|
||||
@@ -252,10 +261,6 @@ class Snap(StateModuleHelper):
|
||||
results_run_info,
|
||||
)
|
||||
|
||||
def __quit_module__(self):
|
||||
if self.vars.channel is None:
|
||||
self.vars.channel = "stable"
|
||||
|
||||
def convert_json_subtree_to_map(self, json_subtree, prefix=None):
|
||||
option_map = {}
|
||||
|
||||
@@ -303,7 +308,10 @@ class Snap(StateModuleHelper):
|
||||
return [name]
|
||||
|
||||
def process_many(rc, out, err):
|
||||
outputs = out.split("---")
|
||||
# This needs to be "\n---" instead of just "---" because otherwise
|
||||
# if a snap uses "---" in its description then that will incorrectly
|
||||
# be interpreted as a separator between snaps in the output.
|
||||
outputs = out.split("\n---")
|
||||
res = []
|
||||
for sout in outputs:
|
||||
res.extend(process_one(rc, sout, ""))
|
||||
@@ -323,11 +331,13 @@ class Snap(StateModuleHelper):
|
||||
if x.startswith("warning: no snap found")]))
|
||||
return process_(rc, out, err)
|
||||
|
||||
with self.runner("info name", output_process=process) as ctx:
|
||||
try:
|
||||
names = ctx.run(name=snaps)
|
||||
finally:
|
||||
self.vars.snapinfo_run_info.append(ctx.run_info)
|
||||
names = []
|
||||
if snaps:
|
||||
with self.runner("info name", output_process=process) as ctx:
|
||||
try:
|
||||
names = ctx.run(name=snaps)
|
||||
finally:
|
||||
self.vars.snapinfo_run_info.append(ctx.run_info)
|
||||
return names
|
||||
|
||||
def snap_status(self, snap_name, channel):
|
||||
@@ -345,9 +355,8 @@ class Snap(StateModuleHelper):
|
||||
list_out = out.split('\n')[1:]
|
||||
list_out = [self.__list_re.match(x) for x in list_out]
|
||||
list_out = [(m.group('name'), m.group('channel')) for m in list_out if m]
|
||||
if self.verbosity >= 4:
|
||||
self.vars.status_out = list_out
|
||||
self.vars.status_run_info = ctx.run_info
|
||||
self.vars.status_out = list_out
|
||||
self.vars.status_run_info = ctx.run_info
|
||||
|
||||
return [_status_check(n, channel, list_out) for n in snap_name]
|
||||
|
||||
@@ -378,14 +387,10 @@ class Snap(StateModuleHelper):
|
||||
self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, bundle=False, refresh=refresh)
|
||||
else:
|
||||
self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, refresh=refresh)
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = run_info
|
||||
self.vars.run_info = run_info
|
||||
|
||||
if rc == 0:
|
||||
match_install2 = [self.__install_re.match(line) for line in out.split('\n')]
|
||||
match_install = [m.group('name') in actionable_snaps for m in match_install2 if m]
|
||||
if len(match_install) == len(actionable_snaps):
|
||||
return
|
||||
return
|
||||
|
||||
classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P<package_name>\w+)"'
|
||||
r' was published using classic confinement')
|
||||
@@ -475,8 +480,7 @@ class Snap(StateModuleHelper):
|
||||
if self.check_mode:
|
||||
return
|
||||
self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps)
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = run_info
|
||||
self.vars.run_info = run_info
|
||||
if rc == 0:
|
||||
return
|
||||
msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
|
||||
# Copyright (c) 2015-2023, Vlad Glagolev <scm@vaygr.net>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: sorcery
|
||||
short_description: Package manager for Source Mage GNU/Linux
|
||||
@@ -20,8 +20,7 @@ author: "Vlad Glagolev (@vaygr)"
|
||||
notes:
|
||||
- When all three components are selected, the update goes by the sequence --
|
||||
Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
|
||||
- grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
|
||||
yet supported.
|
||||
- Grimoire handling is supported since community.general 7.3.0.
|
||||
requirements:
|
||||
- bash
|
||||
extends_documentation_fragment:
|
||||
@@ -34,21 +33,31 @@ attributes:
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the spell
|
||||
- multiple names can be given, separated by commas
|
||||
- special value '*' in conjunction with states V(latest) or
|
||||
- Name of the spell or grimoire.
|
||||
- Multiple names can be given, separated by commas.
|
||||
- Special value V(*) in conjunction with states V(latest) or
|
||||
V(rebuild) will update or rebuild the whole system respectively
|
||||
aliases: ["spell"]
|
||||
- The alias O(grimoire) was added in community.general 7.3.0.
|
||||
aliases: ["spell", "grimoire"]
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
repository:
|
||||
description:
|
||||
- Repository location.
|
||||
- If specified, O(name) represents grimoire(s) instead of spell(s).
|
||||
- Special value V(*) will pull grimoire from the official location.
|
||||
- Only single item in O(name) in conjunction with V(*) can be used.
|
||||
- O(state=absent) must be used with a special value V(*).
|
||||
type: str
|
||||
version_added: 7.3.0
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether to cast, dispel or rebuild a package
|
||||
- state V(cast) is an equivalent of V(present), not V(latest)
|
||||
- state V(latest) always triggers O(update_cache=true)
|
||||
- state V(rebuild) implies cast of all specified spells, not only
|
||||
those existed before
|
||||
- Whether to cast, dispel or rebuild a package.
|
||||
- State V(cast) is an equivalent of V(present), not V(latest).
|
||||
- State V(rebuild) implies cast of all specified spells, not only
|
||||
those existed before.
|
||||
choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
|
||||
default: "present"
|
||||
type: str
|
||||
@@ -56,12 +65,12 @@ options:
|
||||
depends:
|
||||
description:
|
||||
- Comma-separated list of _optional_ dependencies to build a spell
|
||||
(or make sure it is built) with; use +/- in front of dependency
|
||||
to turn it on/off ('+' is optional though).
|
||||
- this option is ignored if O(name) parameter is equal to V(*) or
|
||||
(or make sure it is built) with; use V(+)/V(-) in front of dependency
|
||||
to turn it on/off (V(+) is optional though).
|
||||
- This option is ignored if O(name) parameter is equal to V(*) or
|
||||
contains more than one spell.
|
||||
- providers must be supplied in the form recognized by Sorcery, for example
|
||||
'openssl(SSL)'.
|
||||
- Providers must be supplied in the form recognized by Sorcery,
|
||||
for example 'V(openssl(SSL\))'.
|
||||
type: str
|
||||
|
||||
update:
|
||||
@@ -148,6 +157,30 @@ EXAMPLES = '''
|
||||
update_codex: true
|
||||
cache_valid_time: 86400
|
||||
|
||||
- name: Make sure stable grimoire is present
|
||||
community.general.sorcery:
|
||||
name: stable
|
||||
repository: '*'
|
||||
state: present
|
||||
|
||||
- name: Make sure binary and stable-rc grimoires are removed
|
||||
community.general.sorcery:
|
||||
grimoire: binary,stable-rc
|
||||
repository: '*'
|
||||
state: absent
|
||||
|
||||
- name: Make sure games grimoire is pulled from rsync
|
||||
community.general.sorcery:
|
||||
grimoire: games
|
||||
repository: "rsync://download.sourcemage.org::codex/games"
|
||||
state: present
|
||||
|
||||
- name: Make sure a specific branch of stable grimoire is pulled from git
|
||||
community.general.sorcery:
|
||||
grimoire: stable.git
|
||||
repository: "git://download.sourcemage.org/smgl/grimoire.git:stable.git:stable-0.62"
|
||||
state: present
|
||||
|
||||
- name: Update only Sorcery itself
|
||||
community.general.sorcery:
|
||||
update: true
|
||||
@@ -180,6 +213,8 @@ SORCERY = {
|
||||
SORCERY_LOG_DIR = "/var/log/sorcery"
|
||||
SORCERY_STATE_DIR = "/var/state/sorcery"
|
||||
|
||||
NA = "N/A"
|
||||
|
||||
|
||||
def get_sorcery_ver(module):
|
||||
""" Get Sorcery version. """
|
||||
@@ -220,9 +255,11 @@ def codex_fresh(codex, module):
|
||||
return True
|
||||
|
||||
|
||||
def codex_list(module):
|
||||
def codex_list(module, skip_new=False):
|
||||
""" List valid grimoire collection. """
|
||||
|
||||
params = module.params
|
||||
|
||||
codex = {}
|
||||
|
||||
cmd_scribe = "%s index" % SORCERY['scribe']
|
||||
@@ -241,6 +278,10 @@ def codex_list(module):
|
||||
if match:
|
||||
codex[match.group('grim')] = match.group('ver')
|
||||
|
||||
# return only specified grimoires unless requested to skip new
|
||||
if params['repository'] and not skip_new:
|
||||
codex = dict((x, codex.get(x, NA)) for x in params['name'])
|
||||
|
||||
if not codex:
|
||||
module.fail_json(msg="no grimoires to operate on; add at least one")
|
||||
|
||||
@@ -258,8 +299,7 @@ def update_sorcery(module):
|
||||
changed = False
|
||||
|
||||
if module.check_mode:
|
||||
if not module.params['name'] and not module.params['update_cache']:
|
||||
module.exit_json(changed=True, msg="would have updated Sorcery")
|
||||
return (True, "would have updated Sorcery")
|
||||
else:
|
||||
sorcery_ver = get_sorcery_ver(module)
|
||||
|
||||
@@ -273,9 +313,7 @@ def update_sorcery(module):
|
||||
if sorcery_ver != get_sorcery_ver(module):
|
||||
changed = True
|
||||
|
||||
if not module.params['name'] and not module.params['update_cache']:
|
||||
module.exit_json(changed=changed,
|
||||
msg="successfully updated Sorcery")
|
||||
return (changed, "successfully updated Sorcery")
|
||||
|
||||
|
||||
def update_codex(module):
|
||||
@@ -294,28 +332,29 @@ def update_codex(module):
|
||||
fresh = codex_fresh(codex, module)
|
||||
|
||||
if module.check_mode:
|
||||
if not params['name']:
|
||||
if not fresh:
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, msg="would have updated Codex")
|
||||
elif not fresh or params['name'] and params['state'] == 'latest':
|
||||
# SILENT is required as a workaround for query() in libgpg
|
||||
module.run_command_environ_update.update(dict(SILENT='1'))
|
||||
|
||||
cmd_scribe = "%s update" % SORCERY['scribe']
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_scribe)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="unable to update Codex: " + stdout)
|
||||
|
||||
if codex != codex_list(module):
|
||||
if not fresh:
|
||||
changed = True
|
||||
|
||||
if not params['name']:
|
||||
module.exit_json(changed=changed,
|
||||
msg="successfully updated Codex")
|
||||
return (changed, "would have updated Codex")
|
||||
else:
|
||||
if not fresh:
|
||||
# SILENT is required as a workaround for query() in libgpg
|
||||
module.run_command_environ_update.update(dict(SILENT='1'))
|
||||
|
||||
cmd_scribe = "%s update" % SORCERY['scribe']
|
||||
|
||||
if params['repository']:
|
||||
cmd_scribe += ' %s' % ' '.join(codex.keys())
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_scribe)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="unable to update Codex: " + stdout)
|
||||
|
||||
if codex != codex_list(module):
|
||||
changed = True
|
||||
|
||||
return (changed, "successfully updated Codex")
|
||||
|
||||
|
||||
def match_depends(module):
|
||||
@@ -448,6 +487,65 @@ def match_depends(module):
|
||||
return depends_ok
|
||||
|
||||
|
||||
def manage_grimoires(module):
|
||||
""" Add or remove grimoires. """
|
||||
|
||||
params = module.params
|
||||
grimoires = params['name']
|
||||
url = params['repository']
|
||||
|
||||
codex = codex_list(module, True)
|
||||
|
||||
if url == '*':
|
||||
if params['state'] in ('present', 'latest', 'absent'):
|
||||
if params['state'] == 'absent':
|
||||
action = "remove"
|
||||
todo = set(grimoires) & set(codex)
|
||||
else:
|
||||
action = "add"
|
||||
todo = set(grimoires) - set(codex)
|
||||
|
||||
if not todo:
|
||||
return (False, "all grimoire(s) are already %sed" % action[:5])
|
||||
|
||||
if module.check_mode:
|
||||
return (True, "would have %sed grimoire(s)" % action[:5])
|
||||
|
||||
cmd_scribe = "%s %s %s" % (SORCERY['scribe'], action, ' '.join(todo))
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_scribe)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to %s one or more grimoire(s): %s" % (action, stdout))
|
||||
|
||||
return (True, "successfully %sed one or more grimoire(s)" % action[:5])
|
||||
else:
|
||||
module.fail_json(msg="unsupported operation on '*' repository value")
|
||||
else:
|
||||
if params['state'] in ('present', 'latest'):
|
||||
if len(grimoires) > 1:
|
||||
module.fail_json(msg="using multiple items with repository is invalid")
|
||||
|
||||
grimoire = grimoires[0]
|
||||
|
||||
if grimoire in codex:
|
||||
return (False, "grimoire %s already exists" % grimoire)
|
||||
|
||||
if module.check_mode:
|
||||
return (True, "would have added grimoire %s from %s" % (grimoire, url))
|
||||
|
||||
cmd_scribe = "%s add %s from %s" % (SORCERY['scribe'], grimoire, url)
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_scribe)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to add grimoire %s from %s: %s" % (grimoire, url, stdout))
|
||||
|
||||
return (True, "successfully added grimoire %s from %s" % (grimoire, url))
|
||||
else:
|
||||
module.fail_json(msg="unsupported operation on repository value")
|
||||
|
||||
|
||||
def manage_spells(module):
|
||||
""" Cast or dispel spells.
|
||||
|
||||
@@ -473,7 +571,7 @@ def manage_spells(module):
|
||||
# see update_codex()
|
||||
module.run_command_environ_update.update(dict(SILENT='1'))
|
||||
|
||||
cmd_sorcery = "%s queue"
|
||||
cmd_sorcery = "%s queue" % SORCERY['sorcery']
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_sorcery)
|
||||
|
||||
@@ -492,7 +590,7 @@ def manage_spells(module):
|
||||
except IOError:
|
||||
module.fail_json(msg="failed to restore the update queue")
|
||||
|
||||
module.exit_json(changed=True, msg="would have updated the system")
|
||||
return (True, "would have updated the system")
|
||||
|
||||
cmd_cast = "%s --queue" % SORCERY['cast']
|
||||
|
||||
@@ -501,12 +599,12 @@ def manage_spells(module):
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to update the system")
|
||||
|
||||
module.exit_json(changed=True, msg="successfully updated the system")
|
||||
return (True, "successfully updated the system")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="the system is already up to date")
|
||||
return (False, "the system is already up to date")
|
||||
elif params['state'] == 'rebuild':
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, msg="would have rebuilt the system")
|
||||
return (True, "would have rebuilt the system")
|
||||
|
||||
cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
|
||||
|
||||
@@ -515,7 +613,7 @@ def manage_spells(module):
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to rebuild the system: " + stdout)
|
||||
|
||||
module.exit_json(changed=True, msg="successfully rebuilt the system")
|
||||
return (True, "successfully rebuilt the system")
|
||||
else:
|
||||
module.fail_json(msg="unsupported operation on '*' name value")
|
||||
else:
|
||||
@@ -577,39 +675,40 @@ def manage_spells(module):
|
||||
|
||||
if cast_queue:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, msg="would have cast spell(s)")
|
||||
return (True, "would have cast spell(s)")
|
||||
|
||||
cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_cast)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to cast spell(s): %s" + stdout)
|
||||
module.fail_json(msg="failed to cast spell(s): " + stdout)
|
||||
|
||||
module.exit_json(changed=True, msg="successfully cast spell(s)")
|
||||
return (True, "successfully cast spell(s)")
|
||||
elif params['state'] != 'absent':
|
||||
module.exit_json(changed=False, msg="spell(s) are already cast")
|
||||
return (False, "spell(s) are already cast")
|
||||
|
||||
if dispel_queue:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, msg="would have dispelled spell(s)")
|
||||
return (True, "would have dispelled spell(s)")
|
||||
|
||||
cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
|
||||
|
||||
rc, stdout, stderr = module.run_command(cmd_dispel)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
|
||||
module.fail_json(msg="failed to dispel spell(s): " + stdout)
|
||||
|
||||
module.exit_json(changed=True, msg="successfully dispelled spell(s)")
|
||||
return (True, "successfully dispelled spell(s)")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="spell(s) are already dispelled")
|
||||
return (False, "spell(s) are already dispelled")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(default=None, aliases=['spell'], type='list', elements='str'),
|
||||
name=dict(default=None, aliases=['spell', 'grimoire'], type='list', elements='str'),
|
||||
repository=dict(default=None, type='str'),
|
||||
state=dict(default='present', choices=['present', 'latest',
|
||||
'absent', 'cast', 'dispelled', 'rebuild']),
|
||||
depends=dict(default=None),
|
||||
@@ -638,14 +737,33 @@ def main():
|
||||
elif params['state'] in ('absent', 'dispelled'):
|
||||
params['state'] = 'absent'
|
||||
|
||||
changed = {
|
||||
'sorcery': (False, NA),
|
||||
'grimoires': (False, NA),
|
||||
'codex': (False, NA),
|
||||
'spells': (False, NA)
|
||||
}
|
||||
|
||||
if params['update']:
|
||||
update_sorcery(module)
|
||||
changed['sorcery'] = update_sorcery(module)
|
||||
|
||||
if params['update_cache'] or params['state'] == 'latest':
|
||||
update_codex(module)
|
||||
if params['name'] and params['repository']:
|
||||
changed['grimoires'] = manage_grimoires(module)
|
||||
|
||||
if params['name']:
|
||||
manage_spells(module)
|
||||
if params['update_cache']:
|
||||
changed['codex'] = update_codex(module)
|
||||
|
||||
if params['name'] and not params['repository']:
|
||||
changed['spells'] = manage_spells(module)
|
||||
|
||||
if any(x[0] for x in changed.values()):
|
||||
state_msg = "state changed"
|
||||
state_changed = True
|
||||
else:
|
||||
state_msg = "no change in state"
|
||||
state_changed = False
|
||||
|
||||
module.exit_json(changed=state_changed, msg=state_msg + ": " + '; '.join(x[1] for x in changed.values()))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -53,6 +53,13 @@ options:
|
||||
- The desired state of program/group.
|
||||
required: true
|
||||
choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
|
||||
stop_before_removing:
|
||||
type: bool
|
||||
description:
|
||||
- Use O(stop_before_removing=true) to stop the program/group before removing it
|
||||
required: false
|
||||
default: false
|
||||
version_added: 7.5.0
|
||||
signal:
|
||||
type: str
|
||||
description:
|
||||
@@ -65,6 +72,7 @@ notes:
|
||||
- When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
|
||||
- When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
|
||||
- When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
|
||||
If the program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true).
|
||||
requirements: [ "supervisorctl" ]
|
||||
author:
|
||||
- "Matt Wright (@mattupstate)"
|
||||
@@ -121,6 +129,7 @@ def main():
|
||||
password=dict(type='str', no_log=True),
|
||||
supervisorctl_path=dict(type='path'),
|
||||
state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
|
||||
stop_before_removing=dict(type='bool', default=False),
|
||||
signal=dict(type='str'),
|
||||
)
|
||||
|
||||
@@ -136,6 +145,7 @@ def main():
|
||||
is_group = True
|
||||
name = name.rstrip(':')
|
||||
state = module.params['state']
|
||||
stop_before_removing = module.params.get('stop_before_removing')
|
||||
config = module.params.get('config')
|
||||
server_url = module.params.get('server_url')
|
||||
username = module.params.get('username')
|
||||
@@ -199,22 +209,27 @@ def main():
|
||||
matched.append((process_name, status))
|
||||
return matched
|
||||
|
||||
def take_action_on_processes(processes, status_filter, action, expected_result):
|
||||
def take_action_on_processes(processes, status_filter, action, expected_result, exit_module=True):
|
||||
to_take_action_on = []
|
||||
for process_name, status in processes:
|
||||
if status_filter(status):
|
||||
to_take_action_on.append(process_name)
|
||||
|
||||
if len(to_take_action_on) == 0:
|
||||
if not exit_module:
|
||||
return
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
if module.check_mode:
|
||||
if not exit_module:
|
||||
return
|
||||
module.exit_json(changed=True)
|
||||
for process_name in to_take_action_on:
|
||||
rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
|
||||
if '%s: %s' % (process_name, expected_result) not in out:
|
||||
module.fail_json(msg=out)
|
||||
|
||||
module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
|
||||
if exit_module:
|
||||
module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
|
||||
|
||||
if state == 'restarted':
|
||||
rc, out, err = run_supervisorctl('update', check_rc=True)
|
||||
@@ -230,6 +245,9 @@ def main():
|
||||
if len(processes) == 0:
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
||||
if stop_before_removing:
|
||||
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped', exit_module=False)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
run_supervisorctl('reread', check_rc=True)
|
||||
|
||||
@@ -325,7 +325,7 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
|
||||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
'-backend-config',
|
||||
shlex_quote('{0}={1}'.format(key, val))
|
||||
'{0}={1}'.format(key, val)
|
||||
])
|
||||
if backend_config_files:
|
||||
for f in backend_config_files:
|
||||
|
||||
@@ -18,3 +18,5 @@
|
||||
- import_tasks: test_version.yml
|
||||
environment: "{{ cargo_environment }}"
|
||||
when: has_cargo | default(false)
|
||||
- import_tasks: test_rustup_cargo.yml
|
||||
when: rustup_cargo_bin | default(false)
|
||||
|
||||
@@ -26,3 +26,17 @@
|
||||
has_cargo: true
|
||||
when:
|
||||
- ansible_system == 'FreeBSD' and ansible_distribution_version is version('13.0', '>')
|
||||
|
||||
- block:
|
||||
- name: Download rustup
|
||||
get_url:
|
||||
url: https://sh.rustup.rs
|
||||
dest: /tmp/sh.rustup.rs
|
||||
mode: "0750"
|
||||
force: true
|
||||
- name: Install rustup cargo
|
||||
command: /tmp/sh.rustup.rs -y
|
||||
- set_fact:
|
||||
rustup_cargo_bin: "{{ lookup('env', 'HOME') }}/.cargo/bin/cargo"
|
||||
when:
|
||||
- ansible_distribution != 'CentOS' or ansible_distribution_version is version('7.0', '>=')
|
||||
|
||||
23
tests/integration/targets/cargo/tasks/test_rustup_cargo.yml
Normal file
23
tests/integration/targets/cargo/tasks/test_rustup_cargo.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
- name: Install application helloworld
|
||||
community.general.cargo:
|
||||
executable: "{{ rustup_cargo_bin }}"
|
||||
name: helloworld
|
||||
register: rustup_install_absent_helloworld
|
||||
|
||||
- name: Uninstall application helloworld
|
||||
community.general.cargo:
|
||||
executable: "{{ rustup_cargo_bin }}"
|
||||
state: absent
|
||||
name: helloworld
|
||||
register: rustup_uninstall_present_helloworld
|
||||
|
||||
- name: Check assertions helloworld
|
||||
assert:
|
||||
that:
|
||||
- rustup_install_absent_helloworld is changed
|
||||
- rustup_uninstall_present_helloworld is changed
|
||||
@@ -21,6 +21,8 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
cmd=dict(type="str", default="echo"),
|
||||
path_prefix=dict(type="str"),
|
||||
arg_formats=dict(type="dict", default={}),
|
||||
arg_order=dict(type="raw", required=True),
|
||||
arg_values=dict(type="dict", default={}),
|
||||
@@ -41,7 +43,7 @@ def main():
|
||||
|
||||
arg_formats[arg] = func(*args)
|
||||
|
||||
runner = CmdRunner(module, ['echo', '--'], arg_formats=arg_formats)
|
||||
runner = CmdRunner(module, [module.params["cmd"], '--'], arg_formats=arg_formats, path_prefix=module.params["path_prefix"])
|
||||
|
||||
with runner.context(p['arg_order'], check_mode_skip=p['check_mode_skip']) as ctx:
|
||||
result = ctx.run(**p['arg_values'])
|
||||
|
||||
7
tests/integration/targets/cmd_runner/meta/main.yml
Normal file
7
tests/integration/targets/cmd_runner/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
dependencies:
|
||||
- setup_remote_tmp_dir
|
||||
@@ -6,3 +6,4 @@
|
||||
ansible.builtin.include_tasks:
|
||||
file: test_cmd_echo.yml
|
||||
loop: "{{ cmd_echo_tests }}"
|
||||
when: item.condition | default(true) | bool
|
||||
|
||||
@@ -3,17 +3,26 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: test cmd_echo [{{ item.name }}]
|
||||
cmd_echo:
|
||||
arg_formats: "{{ item.arg_formats|default(omit) }}"
|
||||
arg_order: "{{ item.arg_order }}"
|
||||
arg_values: "{{ item.arg_values|default(omit) }}"
|
||||
check_mode_skip: "{{ item.check_mode_skip|default(omit) }}"
|
||||
aa: "{{ item.aa|default(omit) }}"
|
||||
register: test_result
|
||||
check_mode: "{{ item.check_mode|default(omit) }}"
|
||||
ignore_errors: "{{ item.expect_error|default(omit) }}"
|
||||
- name: create copy of /bin/echo ({{ item.name }})
|
||||
ansible.builtin.copy:
|
||||
src: /bin/echo
|
||||
dest: "{{ item.copy_to }}/echo"
|
||||
mode: "755"
|
||||
when: item.copy_to is defined
|
||||
|
||||
- name: check results [{{ item.name }}]
|
||||
- name: test cmd_echo module ({{ item.name }})
|
||||
cmd_echo:
|
||||
cmd: "{{ item.cmd | default(omit) }}"
|
||||
path_prefix: "{{ item.path_prefix | default(omit) }}"
|
||||
arg_formats: "{{ item.arg_formats | default(omit) }}"
|
||||
arg_order: "{{ item.arg_order }}"
|
||||
arg_values: "{{ item.arg_values | default(omit) }}"
|
||||
check_mode_skip: "{{ item.check_mode_skip | default(omit) }}"
|
||||
aa: "{{ item.aa | default(omit) }}"
|
||||
register: test_result
|
||||
check_mode: "{{ item.check_mode | default(omit) }}"
|
||||
ignore_errors: "{{ item.expect_error | default(omit) }}"
|
||||
|
||||
- name: check results ({{ item.name }})
|
||||
assert:
|
||||
that: "{{ item.assertions }}"
|
||||
|
||||
@@ -138,3 +138,125 @@ cmd_echo_tests:
|
||||
- test_result.rc == 0
|
||||
- test_result.out == "-- --answer=11 --tt-arg potatoes\n"
|
||||
- test_result.err == ""
|
||||
|
||||
- name: use cmd echo
|
||||
cmd: echo
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
assertions:
|
||||
- test_result.rc == 0
|
||||
- test_result.out == "-- --answer=11 --tt-arg potatoes\n"
|
||||
- test_result.err == ""
|
||||
|
||||
- name: use cmd /bin/echo
|
||||
cmd: /bin/echo
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
assertions:
|
||||
- test_result.rc == 0
|
||||
- test_result.out == "-- --answer=11 --tt-arg potatoes\n"
|
||||
- test_result.err == ""
|
||||
|
||||
# this will not be in the regular set of paths get_bin_path() searches
|
||||
- name: use cmd {{ remote_tmp_dir }}/echo
|
||||
condition: >
|
||||
{{
|
||||
ansible_distribution != "MacOSX" and
|
||||
not (ansible_distribution == "CentOS" and ansible_distribution_major_version is version('7.0', '<'))
|
||||
}}
|
||||
copy_to: "{{ remote_tmp_dir }}"
|
||||
cmd: "{{ remote_tmp_dir }}/echo"
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
assertions:
|
||||
- test_result.rc == 0
|
||||
- test_result.out == "-- --answer=11 --tt-arg potatoes\n"
|
||||
- test_result.err == ""
|
||||
|
||||
- name: use cmd echo with path_prefix {{ remote_tmp_dir }}
|
||||
cmd: echo
|
||||
condition: >
|
||||
{{
|
||||
ansible_distribution != "MacOSX" and
|
||||
not (ansible_distribution == "CentOS" and ansible_distribution_major_version is version('7.0', '<'))
|
||||
}}
|
||||
copy_to: "{{ remote_tmp_dir }}"
|
||||
path_prefix: "{{ remote_tmp_dir }}"
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
assertions:
|
||||
- test_result.rc == 0
|
||||
- test_result.out == "-- --answer=11 --tt-arg potatoes\n"
|
||||
- test_result.err == ""
|
||||
|
||||
- name: use cmd never-existed
|
||||
cmd: never-existed
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
expect_error: true
|
||||
assertions:
|
||||
- >
|
||||
"Failed to find required executable" in test_result.msg
|
||||
|
||||
- name: use cmd /usr/bin/never-existed
|
||||
cmd: /usr/bin/never-existed
|
||||
arg_formats:
|
||||
aa:
|
||||
func: as_opt_eq_val
|
||||
args: [--answer]
|
||||
tt:
|
||||
func: as_opt_val
|
||||
args: [--tt-arg]
|
||||
arg_order: 'aa tt'
|
||||
arg_values:
|
||||
tt: potatoes
|
||||
aa: 11
|
||||
expect_error: true
|
||||
assertions:
|
||||
- >
|
||||
"No such file or directory" in test_result.msg
|
||||
|
||||
201
tests/integration/targets/consul/tasks/consul_role.yml
Normal file
201
tests/integration/targets/consul/tasks/consul_role.yml
Normal file
@@ -0,0 +1,201 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: Create a policy with rules
|
||||
consul_policy:
|
||||
name: foo-access-for-role
|
||||
rules: |
|
||||
key "foo" {
|
||||
policy = "read"
|
||||
}
|
||||
key "private/foo" {
|
||||
policy = "deny"
|
||||
}
|
||||
token: "{{ consul_management_token }}"
|
||||
register: policy_result
|
||||
|
||||
- name: Create another policy with rules
|
||||
consul_policy:
|
||||
name: bar-access-for-role
|
||||
rules: |
|
||||
key "bar" {
|
||||
policy = "read"
|
||||
}
|
||||
key "private/bar" {
|
||||
policy = "deny"
|
||||
}
|
||||
token: "{{ consul_management_token }}"
|
||||
register: policy_result
|
||||
|
||||
- name: Create a role with policy
|
||||
consul_role:
|
||||
name: foo-role-with-policy
|
||||
policies:
|
||||
- name: "foo-access-for-role"
|
||||
token: "{{ consul_management_token }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['Name'] == 'foo-role-with-policy'
|
||||
|
||||
- name: Update policy description, in check mode
|
||||
consul_role:
|
||||
name: foo-role-with-policy
|
||||
description: "Testing updating description"
|
||||
token: "{{ consul_management_token }}"
|
||||
check_mode: yes
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['Description'] == "Testing updating description"
|
||||
- result['role']['Policies'][0]['Name'] == 'foo-access-for-role'
|
||||
|
||||
- name: Update policy to add the description
|
||||
consul_role:
|
||||
name: foo-role-with-policy
|
||||
description: "Role for testing policies"
|
||||
token: "{{ consul_management_token }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['Description'] == "Role for testing policies"
|
||||
- result['role']['Policies'][0]['Name'] == 'foo-access-for-role'
|
||||
|
||||
- name: Update the role with another policy, also testing leaving description blank
|
||||
consul_role:
|
||||
name: foo-role-with-policy
|
||||
policies:
|
||||
- name: "foo-access-for-role"
|
||||
- name: "bar-access-for-role"
|
||||
token: "{{ consul_management_token }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['Policies'][0]['Name'] == 'foo-access-for-role'
|
||||
- result['role']['Policies'][1]['Name'] == 'bar-access-for-role'
|
||||
- result['role']['Description'] == "Role for testing policies"
|
||||
|
||||
- name: Create a role with service identity
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
service_identities:
|
||||
- name: web
|
||||
datacenters:
|
||||
- dc1
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'][0]['ServiceName'] == "web"
|
||||
- result['role']['ServiceIdentities'][0]['Datacenters'][0] == "dc1"
|
||||
|
||||
- name: Update the role with service identity in check mode
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
service_identities:
|
||||
- name: web
|
||||
datacenters:
|
||||
- dc2
|
||||
register: result
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'][0]['ServiceName'] == "web"
|
||||
- result['role']['ServiceIdentities'][0]['Datacenters'][0] == "dc2"
|
||||
|
||||
- name: Update the role with service identity to add a policy, leaving the service id unchanged
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
policies:
|
||||
- name: "foo-access-for-role"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'][0]['ServiceName'] == "web"
|
||||
- result['role']['ServiceIdentities'][0]['Datacenters'][0] == "dc1"
|
||||
- result['role']['Policies'][0]['Name'] == 'foo-access-for-role'
|
||||
|
||||
- name: Update the role with service identity to remove the policies
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
policies: []
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'][0]['ServiceName'] == "web"
|
||||
- result['role']['ServiceIdentities'][0]['Datacenters'][0] == "dc1"
|
||||
- result['role']['Policies'] is not defined
|
||||
|
||||
- name: Update the role with service identity to remove the node identities, in check mode
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
node_identities: []
|
||||
register: result
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'][0]['ServiceName'] == "web"
|
||||
- result['role']['ServiceIdentities'][0]['Datacenters'][0] == "dc1"
|
||||
- result['role']['Policies'] is not defined
|
||||
- result['role']['NodeIdentities'] == [] # in check mode the cleared field is returned as an emtpy array
|
||||
|
||||
- name: Update the role with service identity to remove the service identities
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-service-identity
|
||||
service_identities: []
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['ServiceIdentities'] is not defined # in normal mode the dictionary is removed from the result
|
||||
- result['role']['Policies'] is not defined
|
||||
|
||||
- name: Create a role with node identity
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-node-identity
|
||||
node_identities:
|
||||
- name: node-1
|
||||
datacenter: dc2
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['role']['NodeIdentities'][0]['NodeName'] == "node-1"
|
||||
- result['role']['NodeIdentities'][0]['Datacenter'] == "dc2"
|
||||
|
||||
- name: Remove the last role
|
||||
consul_role:
|
||||
token: "{{ consul_management_token }}"
|
||||
name: role-with-node-identity
|
||||
state: absent
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
@@ -91,6 +91,7 @@
|
||||
- 3
|
||||
- import_tasks: consul_session.yml
|
||||
- import_tasks: consul_policy.yml
|
||||
- import_tasks: consul_role.yml
|
||||
always:
|
||||
- name: Kill consul process
|
||||
shell: kill $(cat {{ remote_tmp_dir }}/consul.pid)
|
||||
|
||||
11
tests/integration/targets/ejabberd_user/aliases
Normal file
11
tests/integration/targets/ejabberd_user/aliases
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
azp/posix/3
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/freebsd
|
||||
skip/alpine
|
||||
skip/rhel
|
||||
destructive
|
||||
@@ -0,0 +1,9 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
---
|
||||
- name: Remove ejabberd
|
||||
ansible.builtin.package:
|
||||
name: ejabberd
|
||||
state: absent
|
||||
7
tests/integration/targets/ejabberd_user/meta/main.yml
Normal file
7
tests/integration/targets/ejabberd_user/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
106
tests/integration/targets/ejabberd_user/tasks/main.yml
Normal file
106
tests/integration/targets/ejabberd_user/tasks/main.yml
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: Bail out if not supported
|
||||
ansible.builtin.meta: end_play
|
||||
when: ansible_distribution in ('Alpine', 'openSUSE Leap', 'CentOS', 'Fedora')
|
||||
|
||||
|
||||
- name: Remove ejabberd
|
||||
ansible.builtin.package:
|
||||
name: ejabberd
|
||||
state: absent
|
||||
|
||||
- name: Create user without ejabberdctl installed
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
password: pa$$w0rd
|
||||
state: present
|
||||
register: user_no_ejabberdctl
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install ejabberd
|
||||
ansible.builtin.package:
|
||||
name: ejabberd
|
||||
state: present
|
||||
notify: Remove ejabberd
|
||||
|
||||
- ansible.builtin.service:
|
||||
name: ejabberd
|
||||
state: started
|
||||
|
||||
- name: Create user alice (check)
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
password: pa$$w0rd
|
||||
state: present
|
||||
check_mode: true
|
||||
register: user_alice_check
|
||||
|
||||
- name: Create user alice
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
password: pa$$w0rd
|
||||
state: present
|
||||
register: user_alice
|
||||
|
||||
- name: Create user alice (idempotency)
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
password: pa$$w0rd
|
||||
state: present
|
||||
register: user_alice_idempot
|
||||
|
||||
- name: Create user alice (change password)
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
password: different_pa$$w0rd
|
||||
state: present
|
||||
register: user_alice_chgpw
|
||||
|
||||
- name: Remove user alice (check)
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
state: absent
|
||||
register: remove_alice_check
|
||||
check_mode: true
|
||||
|
||||
- name: Remove user alice
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
state: absent
|
||||
register: remove_alice
|
||||
|
||||
- name: Remove user alice (idempotency)
|
||||
community.general.ejabberd_user:
|
||||
host: localhost
|
||||
username: alice
|
||||
state: absent
|
||||
register: remove_alice_idempot
|
||||
|
||||
- name: Assertions
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- user_no_ejabberdctl is failed
|
||||
- "'Failed to find required executable' in user_no_ejabberdctl.msg"
|
||||
- user_alice_check is changed
|
||||
- user_alice is changed
|
||||
- user_alice_idempot is not changed
|
||||
- user_alice_chgpw is changed
|
||||
- remove_alice_check is changed
|
||||
- remove_alice is changed
|
||||
- remove_alice_idempot is not changed
|
||||
@@ -38,3 +38,9 @@
|
||||
|
||||
- name: include tasks to test regressions
|
||||
include_tasks: tests/03-encoding.yml
|
||||
|
||||
- name: include tasks to test symlink handling
|
||||
include_tasks: tests/04-symlink.yml
|
||||
|
||||
- name: include tasks to test ignore_spaces
|
||||
include_tasks: tests/05-ignore_spaces.yml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
## basiscs
|
||||
## basics
|
||||
|
||||
- name: test-basic 1 - specify both "value" and "values" and fail
|
||||
ini_file:
|
||||
|
||||
@@ -9,26 +9,26 @@
|
||||
content: |
|
||||
[main]
|
||||
foo=BAR
|
||||
dest: my_original_file.ini
|
||||
dest: "{{ remote_tmp_dir }}/my_original_file.ini"
|
||||
- name: Clean up symlink.ini
|
||||
ansible.builtin.file:
|
||||
path: symlink.ini
|
||||
path: "{{ remote_tmp_dir }}/symlink.ini"
|
||||
state: absent
|
||||
- name: Create a symbolic link
|
||||
ansible.builtin.file:
|
||||
src: my_original_file.ini
|
||||
dest: symlink.ini
|
||||
dest: "{{ remote_tmp_dir }}/symlink.ini"
|
||||
state: link
|
||||
|
||||
- name: Set the proxy key on the symlink which will be converted as a file
|
||||
community.general.ini_file:
|
||||
path: symlink.ini
|
||||
path: "{{ remote_tmp_dir }}/symlink.ini"
|
||||
section: main
|
||||
option: proxy
|
||||
value: 'http://proxy.myorg.org:3128'
|
||||
- name: Set the proxy key on the final file that is still unchanged
|
||||
community.general.ini_file:
|
||||
path: my_original_file.ini
|
||||
path: "{{ remote_tmp_dir }}/my_original_file.ini"
|
||||
section: main
|
||||
option: proxy
|
||||
value: 'http://proxy.myorg.org:3128'
|
||||
@@ -41,7 +41,7 @@
|
||||
- block: *prepare
|
||||
- name: Set the proxy key on the symlink which will be preserved
|
||||
community.general.ini_file:
|
||||
path: symlink.ini
|
||||
path: "{{ remote_tmp_dir }}/symlink.ini"
|
||||
section: main
|
||||
option: proxy
|
||||
value: 'http://proxy.myorg.org:3128'
|
||||
@@ -49,7 +49,7 @@
|
||||
register: result
|
||||
- name: Set the proxy key on the target directly that was changed in the previous step
|
||||
community.general.ini_file:
|
||||
path: my_original_file.ini
|
||||
path: "{{ remote_tmp_dir }}/my_original_file.ini"
|
||||
section: main
|
||||
option: proxy
|
||||
value: 'http://proxy.myorg.org:3128'
|
||||
|
||||
@@ -0,0 +1,123 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
## testing ignore_spaces option
|
||||
|
||||
- name: test-ignore_spaces 1 (commented line updated) - create test file
|
||||
copy:
|
||||
dest: "{{ output_file }}"
|
||||
content: "[foo]\n; bar=baz\n"
|
||||
|
||||
- name: test-ignore_spaces 1 - set new value
|
||||
ini_file:
|
||||
path: "{{ output_file }}"
|
||||
section: foo
|
||||
option: bar
|
||||
value: frelt
|
||||
ignore_spaces: true
|
||||
register: result
|
||||
|
||||
- name: test-ignore_spaces 1 - read content from output file
|
||||
slurp:
|
||||
src: "{{ output_file }}"
|
||||
register: output_content
|
||||
|
||||
- name: test-ignore_spaces 1 - verify results
|
||||
vars:
|
||||
actual_content: "{{ output_content.content | b64decode }}"
|
||||
expected_content: "[foo]\nbar = frelt\n"
|
||||
assert:
|
||||
that:
|
||||
- actual_content == expected_content
|
||||
- result is changed
|
||||
- result.msg == 'option changed'
|
||||
|
||||
- name: test-ignore_spaces 2 (uncommented line updated) - create test file
|
||||
copy:
|
||||
dest: "{{ output_file }}"
|
||||
content: "[foo]\nbar=baz\n"
|
||||
|
||||
- name: test-ignore_spaces 2 - set new value
|
||||
ini_file:
|
||||
path: "{{ output_file }}"
|
||||
section: foo
|
||||
option: bar
|
||||
value: frelt
|
||||
ignore_spaces: true
|
||||
register: result
|
||||
|
||||
- name: test-ignore_spaces 2 - read content from output file
|
||||
slurp:
|
||||
src: "{{ output_file }}"
|
||||
register: output_content
|
||||
|
||||
- name: test-ignore_spaces 2 - verify results
|
||||
vars:
|
||||
actual_content: "{{ output_content.content | b64decode }}"
|
||||
expected_content: "[foo]\nbar = frelt\n"
|
||||
assert:
|
||||
that:
|
||||
- actual_content == expected_content
|
||||
- result is changed
|
||||
- result.msg == 'option changed'
|
||||
|
||||
- name: test-ignore_spaces 3 (spaces on top of no spaces) - create test file
|
||||
copy:
|
||||
dest: "{{ output_file }}"
|
||||
content: "[foo]\nbar=baz\n"
|
||||
|
||||
- name: test-ignore_spaces 3 - try to set value
|
||||
ini_file:
|
||||
path: "{{ output_file }}"
|
||||
section: foo
|
||||
option: bar
|
||||
value: baz
|
||||
ignore_spaces: true
|
||||
register: result
|
||||
|
||||
- name: test-ignore_spaces 3 - read content from output file
|
||||
slurp:
|
||||
src: "{{ output_file }}"
|
||||
register: output_content
|
||||
|
||||
- name: test-ignore_spaces 3 - verify results
|
||||
vars:
|
||||
actual_content: "{{ output_content.content | b64decode }}"
|
||||
expected_content: "[foo]\nbar=baz\n"
|
||||
assert:
|
||||
that:
|
||||
- actual_content == expected_content
|
||||
- result is not changed
|
||||
- result.msg == "OK"
|
||||
|
||||
- name: test-ignore_spaces 4 (no spaces on top of spaces) - create test file
|
||||
copy:
|
||||
dest: "{{ output_file }}"
|
||||
content: "[foo]\nbar = baz\n"
|
||||
|
||||
- name: test-ignore_spaces 4 - try to set value
|
||||
ini_file:
|
||||
path: "{{ output_file }}"
|
||||
section: foo
|
||||
option: bar
|
||||
value: baz
|
||||
ignore_spaces: true
|
||||
no_extra_spaces: true
|
||||
register: result
|
||||
|
||||
- name: test-ignore_spaces 4 - read content from output file
|
||||
slurp:
|
||||
src: "{{ output_file }}"
|
||||
register: output_content
|
||||
|
||||
- name: test-ignore_spaces 4 - verify results
|
||||
vars:
|
||||
actual_content: "{{ output_content.content | b64decode }}"
|
||||
expected_content: "[foo]\nbar = baz\n"
|
||||
assert:
|
||||
that:
|
||||
- actual_content == expected_content
|
||||
- result is not changed
|
||||
- result.msg == "OK"
|
||||
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
unsupported
|
||||
@@ -0,0 +1,4 @@
|
||||
FROM quay.io/keycloak/keycloak:20.0.2
|
||||
|
||||
COPY policy.jar /opt/keycloak/providers/
|
||||
RUN /opt/keycloak/bin/kc.sh build
|
||||
@@ -0,0 +1,3 @@
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"policies": [
|
||||
{
|
||||
"name": "MyPolicy1",
|
||||
"fileName": "policy-1.js",
|
||||
"description": "My Policy 1"
|
||||
},
|
||||
{
|
||||
"name": "MyPolicy2",
|
||||
"fileName": "policy-2.js",
|
||||
"description": "My Policy 2"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
zip -r policy.jar META-INF/keycloak-scripts.json policy-1.js policy-2.js
|
||||
@@ -0,0 +1,3 @@
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@@ -0,0 +1 @@
|
||||
$evaluation.grant();
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user