mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
119 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5bd5de4281 | ||
|
|
4aebefcf9e | ||
|
|
62f9a5b0a9 | ||
|
|
3d03eda99e | ||
|
|
c01ce10b4b | ||
|
|
16aa776c93 | ||
|
|
d7d1659e34 | ||
|
|
5b9b99384f | ||
|
|
f898279c8c | ||
|
|
2215c6d360 | ||
|
|
ca3948858a | ||
|
|
f14e566cc7 | ||
|
|
a2c93f5e99 | ||
|
|
67a2abcab2 | ||
|
|
2e4864db7f | ||
|
|
1f0b2a5173 | ||
|
|
25482000f0 | ||
|
|
c0f3aa14cf | ||
|
|
1ef104be61 | ||
|
|
773df88a41 | ||
|
|
d77e256088 | ||
|
|
2917389779 | ||
|
|
59af80235b | ||
|
|
aec52198e3 | ||
|
|
cbe4490c9e | ||
|
|
9de059b44d | ||
|
|
c72a23a5f1 | ||
|
|
0b9d9c0fdb | ||
|
|
67eaf9405f | ||
|
|
5de05a6243 | ||
|
|
46b4b9a6de | ||
|
|
10146aae1c | ||
|
|
d2ec7053c5 | ||
|
|
51fcacae08 | ||
|
|
29211b970c | ||
|
|
5c1fa53558 | ||
|
|
2348f3d439 | ||
|
|
46a051d168 | ||
|
|
b2212bc8ef | ||
|
|
e05e3aed67 | ||
|
|
a13541299e | ||
|
|
221067e708 | ||
|
|
db6458bd93 | ||
|
|
f342243fb0 | ||
|
|
37f2b06c3c | ||
|
|
0a8a41966d | ||
|
|
263c5ba9de | ||
|
|
cfc28a3f6a | ||
|
|
b495035923 | ||
|
|
d4637e9b1c | ||
|
|
7842dc0dea | ||
|
|
314a0bc553 | ||
|
|
34b7876e4f | ||
|
|
65c10de630 | ||
|
|
a86f31ac0f | ||
|
|
bc82fe36be | ||
|
|
7c810a6186 | ||
|
|
9d468fb078 | ||
|
|
2c79d42eb4 | ||
|
|
d95c3a738f | ||
|
|
839880d711 | ||
|
|
bc0edf7d55 | ||
|
|
68458fd8aa | ||
|
|
4aa70ab48f | ||
|
|
739210c6b9 | ||
|
|
8c23d0e345 | ||
|
|
cde4a1a099 | ||
|
|
10b3381f21 | ||
|
|
0ccd52b63a | ||
|
|
c76e598d61 | ||
|
|
ad3efa9719 | ||
|
|
b9c8d2bee5 | ||
|
|
2c167547f6 | ||
|
|
73de447489 | ||
|
|
134f6132ce | ||
|
|
f229c800da | ||
|
|
52a0970ef8 | ||
|
|
11e0797650 | ||
|
|
e6bbbac6a0 | ||
|
|
d78f3dd7c4 | ||
|
|
737f8340e4 | ||
|
|
f87ab7046d | ||
|
|
4c100aef47 | ||
|
|
2b6bbd9f91 | ||
|
|
0484abdddd | ||
|
|
3f119aa9b6 | ||
|
|
277329a6fe | ||
|
|
955eb531a3 | ||
|
|
9ac2918d49 | ||
|
|
740180d4a5 | ||
|
|
5d6c539373 | ||
|
|
09e2f77289 | ||
|
|
7aaf8cf496 | ||
|
|
664bd70294 | ||
|
|
9651bca396 | ||
|
|
f3375c638e | ||
|
|
14d663029a | ||
|
|
9297802089 | ||
|
|
5b4dc4ace2 | ||
|
|
15a72418ac | ||
|
|
7278bdcf9d | ||
|
|
b9aab568f7 | ||
|
|
e01d014c36 | ||
|
|
e5bdc028c4 | ||
|
|
67f7184234 | ||
|
|
b42ab6b45d | ||
|
|
67eafdd20b | ||
|
|
3cc11bfd42 | ||
|
|
a367fba315 | ||
|
|
00583448e2 | ||
|
|
c0cae2b27e | ||
|
|
07a9efd54f | ||
|
|
9dc8f2b05d | ||
|
|
baf726b389 | ||
|
|
2d7302ba12 | ||
|
|
7caefbd420 | ||
|
|
1a3c221995 | ||
|
|
0eecd48ea8 | ||
|
|
d71c10da27 |
@@ -68,6 +68,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
@@ -117,7 +130,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
@@ -125,6 +137,22 @@ stages:
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: '3.10'
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
@@ -150,13 +178,8 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
@@ -196,6 +219,23 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
@@ -204,8 +244,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
@@ -227,14 +265,6 @@ stages:
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -248,6 +278,8 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: FreeBSD 12.0
|
||||
test: freebsd/12.0
|
||||
groups:
|
||||
@@ -263,8 +295,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
@@ -285,6 +315,28 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
@@ -297,10 +349,8 @@ stages:
|
||||
test: centos8
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
@@ -312,12 +362,8 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
@@ -331,8 +377,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: openSUSE 15 py3
|
||||
@@ -350,6 +394,17 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Cloud_2_12
|
||||
displayName: Cloud 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
@@ -361,7 +416,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
@@ -372,7 +426,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 3.5
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
@@ -382,7 +436,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 2.7
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
@@ -390,21 +444,26 @@ stages:
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
41
.github/BOTMETA.yml
vendored
41
.github/BOTMETA.yml
vendored
@@ -1,7 +1,8 @@
|
||||
notifications: true
|
||||
automerge: true
|
||||
files:
|
||||
plugins/:
|
||||
supershipit: quidame Ajpantuso
|
||||
supershipit: quidame
|
||||
changelogs/: {}
|
||||
changelogs/fragments/:
|
||||
support: community
|
||||
@@ -48,6 +49,9 @@ files:
|
||||
maintainers: dagwieers
|
||||
$callbacks/diy.py:
|
||||
maintainers: theque5t
|
||||
$callbacks/elastic.py:
|
||||
maintainers: v1v
|
||||
keywords: apm observability
|
||||
$callbacks/hipchat.py: {}
|
||||
$callbacks/jabber.py: {}
|
||||
$callbacks/loganalytics.py:
|
||||
@@ -62,6 +66,9 @@ files:
|
||||
$callbacks/nrdp.py:
|
||||
maintainers: rverchere
|
||||
$callbacks/null.py: {}
|
||||
$callbacks/opentelemetry.py:
|
||||
maintainers: v1v
|
||||
keywords: opentelemetry observability
|
||||
$callbacks/say.py:
|
||||
notify: chris-short
|
||||
maintainers: $team_macos
|
||||
@@ -131,6 +138,8 @@ files:
|
||||
$filters/random_mac.py: {}
|
||||
$filters/time.py:
|
||||
maintainers: resmo
|
||||
$filters/unicode_normalize.py:
|
||||
maintainers: Ajpantuso
|
||||
$filters/version_sort.py:
|
||||
maintainers: ericzolf
|
||||
$inventories/:
|
||||
@@ -148,8 +157,14 @@ files:
|
||||
$inventories/nmap.py: {}
|
||||
$inventories/online.py:
|
||||
maintainers: sieben
|
||||
$inventories/opennebula.py:
|
||||
maintainers: feldsam
|
||||
labels: cloud opennebula
|
||||
keywords: opennebula dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/icinga2.py:
|
||||
maintainers: bongoeadgc6
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -173,7 +188,7 @@ files:
|
||||
$lookups/dnstxt.py:
|
||||
maintainers: jpmens
|
||||
$lookups/dsv.py:
|
||||
maintainers: amigus
|
||||
maintainers: amigus endlesstrax
|
||||
$lookups/etcd3.py:
|
||||
maintainers: eric-belhomme
|
||||
$lookups/etcd.py:
|
||||
@@ -209,7 +224,7 @@ files:
|
||||
maintainers: $team_ansible_core jpmens
|
||||
$lookups/shelvefile.py: {}
|
||||
$lookups/tss.py:
|
||||
maintainers: amigus
|
||||
maintainers: amigus endlesstrax
|
||||
$module_utils/:
|
||||
labels: module_utils
|
||||
$module_utils/gitlab.py:
|
||||
@@ -458,6 +473,10 @@ files:
|
||||
maintainers: slok
|
||||
$modules/database/misc/redis_info.py:
|
||||
maintainers: levonet
|
||||
$modules/database/misc/redis_data_info.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/redis_data.py:
|
||||
maintainers: paginabianca
|
||||
$modules/database/misc/riak.py:
|
||||
maintainers: drewkerrigan jsmartin
|
||||
$modules/database/mssql/mssql_db.py:
|
||||
@@ -506,10 +525,14 @@ files:
|
||||
maintainers: Gaetan2907
|
||||
$modules/identity/keycloak/keycloak_group.py:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/keycloak/keycloak_realm.py:
|
||||
maintainers: kris2kris
|
||||
$modules/identity/keycloak/keycloak_role.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/keycloak/keycloak_user_federation.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/onepassword_info.py:
|
||||
maintainers: Rylon
|
||||
$modules/identity/opendj/opendj_backendprop.py:
|
||||
@@ -738,6 +761,8 @@ files:
|
||||
ignore: jle64
|
||||
$modules/packaging/language/pip_package_info.py:
|
||||
maintainers: bcoca matburt maxamillion
|
||||
$modules/packaging/language/pipx.py:
|
||||
maintainers: russoz
|
||||
$modules/packaging/language/yarn.py:
|
||||
maintainers: chrishoffman verkaufer
|
||||
$modules/packaging/os/apk.py:
|
||||
@@ -1101,11 +1126,13 @@ files:
|
||||
$modules/web_infrastructure/apache2_mod_proxy.py:
|
||||
maintainers: oboukili
|
||||
$modules/web_infrastructure/apache2_module.py:
|
||||
maintainers: berendt n0trax robinro
|
||||
maintainers: berendt n0trax
|
||||
ignore: robinro
|
||||
$modules/web_infrastructure/deploy_helper.py:
|
||||
maintainers: ramondelafuente
|
||||
$modules/web_infrastructure/django_manage.py:
|
||||
maintainers: scottanderson42 russoz tastychutney
|
||||
maintainers: russoz
|
||||
ignore: scottanderson42 tastychutney
|
||||
labels: django_manage
|
||||
$modules/web_infrastructure/ejabberd_user.py:
|
||||
maintainers: privateip
|
||||
@@ -1136,6 +1163,10 @@ files:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_project.py:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_job_run.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/rundeck_job_executions_info.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/sophos_utm/:
|
||||
maintainers: $team_e_spirit
|
||||
keywords: sophos utm
|
||||
|
||||
227
CHANGELOG.rst
227
CHANGELOG.rst
@@ -6,6 +6,233 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 2.0.0.
|
||||
|
||||
v3.8.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release. Please note that this is the last minor 3.x.0 release; afterwards there will only be bugfix releases 3.8.y.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command (per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded separately. Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running under non-root user. The deployment file is copied with file content only. The file permission is set to ``440`` and belongs to root user. When the JBoss ``WildFly`` server is running under non-root user, it is unable to read the deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=`` and ``state=present`` only the first run was successful, future runs failed due to missing checks prior starting zypper. Usage of ``state=absent`` in combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, https://github.com/ansible-collections/community.general/issues/3466).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Callback
|
||||
~~~~~~~~
|
||||
|
||||
- elastic - Create distributed traces for each Ansible task in Elastic APM
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- opennebula - OpenNebula inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_tasks_info - Retrieve information about one or more Proxmox VE tasks
|
||||
|
||||
Packaging
|
||||
~~~~~~~~~
|
||||
|
||||
language
|
||||
^^^^^^^^
|
||||
|
||||
- pipx - Manages applications installed with pipx
|
||||
|
||||
Web Infrastructure
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- rundeck_job_executions_info - Query executions for a Rundeck job
|
||||
- rundeck_job_run - Run a Rundeck job
|
||||
|
||||
v3.7.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
|
||||
- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
|
||||
- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` (https://github.com/ansible-collections/community.general/pull/3379).
|
||||
- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
|
||||
- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order (https://github.com/ansible-collections/community.general/pull/3178).
|
||||
- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
|
||||
- lxd_container - add ``ignore_volatile_options`` option which allows to disable the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
|
||||
- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
|
||||
- pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option (https://github.com/ansible-collections/community.general/pull/3315).
|
||||
- redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
|
||||
- tss lookup plugin - added ``token`` parameter for token authorization; ``username`` and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
|
||||
- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-<number>`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237).
|
||||
- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
|
||||
- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
|
||||
- openbsd_pkg - fix crash from ``KeyError`` exception when package installs, but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
|
||||
- redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Callback
|
||||
~~~~~~~~
|
||||
|
||||
- opentelemetry - Create distributed traces with OpenTelemetry
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- unicode_normalize - Normalizes unicode strings to facilitate comparison of characters with normalized forms
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- icinga2 - Icinga2 inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Database
|
||||
~~~~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- redis_data - Set key value pairs in Redis
|
||||
- redis_data_info - Get value of key in Redis database
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_user_federation - Allows administration of Keycloak user federations via Keycloak API
|
||||
|
||||
v3.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247).
|
||||
- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047).
|
||||
- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047).
|
||||
- ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204).
|
||||
- ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033).
|
||||
- keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231).
|
||||
- linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203).
|
||||
- module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290).
|
||||
- module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206).
|
||||
- nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262).
|
||||
- nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141).
|
||||
- open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286).
|
||||
- openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284).
|
||||
- pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285).
|
||||
- redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232).
|
||||
- scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250).
|
||||
- slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205).
|
||||
- snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970).
|
||||
- tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228).
|
||||
- tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252).
|
||||
- vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191).
|
||||
- zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502).
|
||||
- zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253).
|
||||
- django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215).
|
||||
- keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231).
|
||||
- launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239).
|
||||
- one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286).
|
||||
- openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284).
|
||||
- packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260).
|
||||
- proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266).
|
||||
- saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- scaleway inventory script - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3195).
|
||||
- scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199).
|
||||
- udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256).
|
||||
- ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- vbox inventory script - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3195).
|
||||
- vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191).
|
||||
- zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_identity_provider - Allows administration of Keycloak identity providers via Keycloak API
|
||||
|
||||
v3.5.0
|
||||
======
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
@@ -42,7 +43,12 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
|
||||
can only be used by very few people should better be added to more specialized collections.
|
||||
|
||||
2. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
|
||||
That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
|
||||
of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
|
||||
such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
|
||||
|
||||
3. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
|
||||
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
|
||||
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
|
||||
@@ -52,7 +58,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
||||
which run in CI.
|
||||
|
||||
3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
|
||||
subdirectories.)
|
||||
|
||||
@@ -60,7 +66,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
|
||||
4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
|
||||
33
README.md
33
README.md
@@ -17,7 +17,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -76,7 +76,21 @@ Also for some notes specific to this collection see [our CONTRIBUTING documentat
|
||||
|
||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||
|
||||
### Communication
|
||||
## Collection maintenance
|
||||
|
||||
To learn how to maintain / become a maintainer of this collection, refer to:
|
||||
|
||||
* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
||||
* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
|
||||
|
||||
It is necessary for maintainers of this collection to be subscribed to:
|
||||
|
||||
* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
|
||||
* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
|
||||
|
||||
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
|
||||
|
||||
## Communication
|
||||
|
||||
We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
|
||||
|
||||
@@ -86,16 +100,11 @@ We take part in the global quarterly [Ansible Contributor Summit](https://github
|
||||
|
||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||
|
||||
For more information about communication, refer to the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
|
||||
### Publishing New Version
|
||||
## Publishing New Version
|
||||
|
||||
Basic instructions without release branches:
|
||||
|
||||
1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
|
||||
2. Run `antsibull-changelog release --collection-flatmap yes`
|
||||
3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
|
||||
4. Tag the commit with `<version>`. Push changes and tag to the main repository.
|
||||
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
|
||||
|
||||
## Release notes
|
||||
|
||||
@@ -103,10 +112,10 @@ See the [changelog](https://github.com/ansible-collections/community.general/blo
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
|
||||
|
||||
In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
|
||||
|
||||
See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
|
||||
|
||||
## More information
|
||||
|
||||
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
|
||||
|
||||
@@ -1624,3 +1624,319 @@ releases:
|
||||
name: xfconf_info
|
||||
namespace: system
|
||||
release_date: '2021-08-10'
|
||||
3.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message
|
||||
when not found (https://github.com/ansible-collections/community.general/issues/3253).
|
||||
- django_manage - argument ``command`` is being splitted again as it should
|
||||
(https://github.com/ansible-collections/community.general/issues/3215).
|
||||
- keycloak_realm - element type for ``events_listeners`` parameter should be
|
||||
``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231).
|
||||
- launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- memcached cache plugin - change function argument names to fix sanity errors
|
||||
(https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- netapp module utils - remove always-true conditional to fix sanity errors
|
||||
(https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels
|
||||
(https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239).
|
||||
- one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- open_iscsi - calling ``run_command`` with arguments as ``list`` instead of
|
||||
``str`` (https://github.com/ansible-collections/community.general/pull/3286).
|
||||
- openwrt_init - calling ``run_command`` with arguments as ``list`` instead
|
||||
of ``str`` (https://github.com/ansible-collections/community.general/pull/3284).
|
||||
- packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- pamd - code for ``state=updated`` when dealing with the pam module arguments,
|
||||
made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260).
|
||||
- proxmox_kvm - clone operation should return the VMID of the target VM and
|
||||
not that of the source VM. This was failing when the target VM with the chosen
|
||||
name already existed (https://github.com/ansible-collections/community.general/pull/3266).
|
||||
- saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- scaleway inventory script - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3195).
|
||||
- scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk``
|
||||
version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192,
|
||||
https://github.com/ansible-collections/community.general/pull/3199).
|
||||
- udm_dns_record - fixed managing of PTR records, which can never have worked
|
||||
before (https://github.com/ansible-collections/community.general/pull/3256).
|
||||
- ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
|
||||
- vbox inventory script - change function argument name to fix sanity errors
|
||||
(https://github.com/ansible-collections/community.general/pull/3195).
|
||||
- vdo - boolean arguments now compared with proper ``true`` and ``false`` values
|
||||
instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191).
|
||||
- zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502).
|
||||
minor_changes:
|
||||
- dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247).
|
||||
- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047).
|
||||
- gitlab_group_members - added functionality to set all members exactly as given
|
||||
(https://github.com/ansible-collections/community.general/pull/3047).
|
||||
- ini_file - add abbility to define multiple options with the same name but
|
||||
different values (https://github.com/ansible-collections/community.general/issues/273,
|
||||
https://github.com/ansible-collections/community.general/issues/1204).
|
||||
- ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove
|
||||
single ``option=value`` entries without overwriting existing options with
|
||||
the same name but different values (https://github.com/ansible-collections/community.general/pull/3033).
|
||||
- keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation
|
||||
of login events (https://github.com/ansible-collections/community.general/pull/3231).
|
||||
- linode inventory plugin - adds the ``ip_style`` configuration key. Set to
|
||||
``api`` to get more detailed network details back from the remote Linode host
|
||||
(https://github.com/ansible-collections/community.general/pull/3203).
|
||||
- module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``,
|
||||
to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290).
|
||||
- module_helper module_utils - added classmethod to trigger the execution of
|
||||
MH modules (https://github.com/ansible-collections/community.general/pull/3206).
|
||||
- nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105,
|
||||
https://github.com/ansible-collections/community.general/pull/3262).
|
||||
- nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141).
|
||||
- open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286).
|
||||
- openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284).
|
||||
- pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285).
|
||||
- redfish_info - include ``Status`` property for Thermal objects when querying
|
||||
Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232).
|
||||
- scaleway plugin inventory - parse scw-cli config file for ``oauth_token``
|
||||
(https://github.com/ansible-collections/community.general/pull/3250).
|
||||
- slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205).
|
||||
- snap - improved module error handling, especially for the case when snap server
|
||||
is down (https://github.com/ansible-collections/community.general/issues/2970).
|
||||
- tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228).
|
||||
- tss lookup plugin - refactored to decouple the supporting third-party library
|
||||
(``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252).
|
||||
- vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191).
|
||||
- zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502).
|
||||
- zypper - prefix zypper commands with ``/sbin/transactional-update --continue
|
||||
--drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 273-add_multiple_options_with_same_name_to_ini_file.yml
|
||||
- 3.6.0.yml
|
||||
- 3041-gitlab_x_members_fix_and_enhancement.yml
|
||||
- 3141-disallow-options-unsupported-by-nmcli.yml
|
||||
- 3164-zypper-support-transactional-updates.yaml
|
||||
- 3191-vdo-refactor.yml
|
||||
- 3194-sanity.yml
|
||||
- 3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml
|
||||
- 3203-linode-inventory-return-full-api-ip-data.yml
|
||||
- 3205-slack-minor-refactor.yaml
|
||||
- 3206-mh-classmethod.yaml
|
||||
- 3211-snap-error-handling.yml
|
||||
- 3228-tss-domain-authorization.yml
|
||||
- 3231-fix-keycloak-realm-events.yml
|
||||
- 3233-include-thermal-sensor-status-via-redfish_info.yaml
|
||||
- 3239-nmcli-sit-ipip-config-bugfix.yaml
|
||||
- 3247-retry_servfail-for-dig.yaml
|
||||
- 3250-parse-scw-config.yml
|
||||
- 3252-tss_lookup_plugin-refactor.yml
|
||||
- 3256-fix-ptr-handling-in-udm_dns_record.yml
|
||||
- 3258-apache2_module.yml
|
||||
- 3262-nmcli-add-gre-tunnel-support.yaml
|
||||
- 3266-vmid-existing-target-clone.yml
|
||||
- 3283-django_manage-fix-command-splitting.yaml
|
||||
- 3284-openwrt_init-improvements.yaml
|
||||
- 3285-pamd-updated-with-empty-args.yaml
|
||||
- 3286-open_iscsi-improvements.yaml
|
||||
- 3290-mh-cmd-boolean-not.yaml
|
||||
- 502-zfs_bugfix_and_diff_mode_support.yaml
|
||||
modules:
|
||||
- description: Allows administration of Keycloak identity providers via Keycloak
|
||||
API
|
||||
name: keycloak_identity_provider
|
||||
namespace: identity.keycloak
|
||||
release_date: '2021-08-31'
|
||||
3.7.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-<number>``
|
||||
(for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084,
|
||||
https://github.com/ansible-collections/community.general/pull/3237).
|
||||
- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead
|
||||
of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
|
||||
- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
|
||||
- openbsd_pkg - fix crash from ``KeyError`` exception when package installs,
|
||||
but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
|
||||
- redfish_utils module utils - if given, add account ID of user that should
|
||||
be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
|
||||
minor_changes:
|
||||
- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``,
|
||||
``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
|
||||
- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
|
||||
- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled``
|
||||
(https://github.com/ansible-collections/community.general/pull/3379).
|
||||
- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_project_members - added functionality to set all members exactly as
|
||||
given (https://github.com/ansible-collections/community.general/pull/3319).
|
||||
- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
|
||||
- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
|
||||
- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user
|
||||
map order (https://github.com/ansible-collections/community.general/pull/3178).
|
||||
- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
|
||||
- lxd_container - add ``ignore_volatile_options`` option which allows to disable
|
||||
the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
|
||||
- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
|
||||
- pids - refactor to add support for older ``psutil`` versions to the ``pattern``
|
||||
option (https://github.com/ansible-collections/community.general/pull/3315).
|
||||
- redfish_command and redfish_config and redfish_utils module utils - add parameter
|
||||
to strip etag of quotes before patch, since some vendors do not properly ``If-Match``
|
||||
etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
|
||||
- tss lookup plugin - added ``token`` parameter for token authorization; ``username``
|
||||
and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
|
||||
- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 3.7.0.yml
|
||||
- 3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml
|
||||
- 3237-copr-fix_chroot_naming.yml
|
||||
- 3248-adds-few-more-gitlab-group-options.yml
|
||||
- 3296-clean-etag.yaml
|
||||
- 3313-nmcli-add_gsm_support.yml
|
||||
- 3315-pids-refactor.yml
|
||||
- 3319-gitlab_project_members_enhancement.yml
|
||||
- 3327-tss-token-authorization.yml
|
||||
- 3328-interfaces_file-improvements.yaml
|
||||
- 3329-kernel_blacklist-improvements.yaml
|
||||
- 3331-do_not_ignore_volatile_configs_by_option.yml
|
||||
- 3332-zpool_facts-pythonify.yaml
|
||||
- 3334-django_manage-split-params.yaml
|
||||
- 3336-openbsd_pkg-fix-KeyError.yml
|
||||
- 3337-linode-fix.yml
|
||||
- 3343-redfish_utils-addUser-userId.yml
|
||||
- 3359-add-unicode_normalize-filter.yml
|
||||
- 3367-add-require_two_factor_authentication-property-to-gitlab-group.yml
|
||||
- 3379-gitlab_project-ci_cd_properties.yml
|
||||
- 634-gitlab_project_runners.yaml
|
||||
modules:
|
||||
- description: Allows administration of Keycloak user federations via Keycloak
|
||||
API
|
||||
name: keycloak_user_federation
|
||||
namespace: identity.keycloak
|
||||
- description: Set key value pairs in Redis
|
||||
name: redis_data
|
||||
namespace: database.misc
|
||||
- description: Get value of key in Redis database
|
||||
name: redis_data_info
|
||||
namespace: database.misc
|
||||
plugins:
|
||||
callback:
|
||||
- description: Create distributed traces with OpenTelemetry
|
||||
name: opentelemetry
|
||||
namespace: null
|
||||
filter:
|
||||
- description: Normalizes unicode strings to facilitate comparison of characters
|
||||
with normalized forms
|
||||
name: unicode_normalize
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: Icinga2 inventory source
|
||||
name: icinga2
|
||||
namespace: null
|
||||
release_date: '2021-09-21'
|
||||
3.8.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys
|
||||
(https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication``
|
||||
on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``,
|
||||
``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running
|
||||
under non-root user. The deployment file is copied with file content only.
|
||||
The file permission is set to ``440`` and belongs to root user. When the JBoss
|
||||
``WildFly`` server is running under non-root user, it is unable to read the
|
||||
deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED``
|
||||
when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider
|
||||
mappers (https://github.com/ansible-collections/community.general/pull/3538,
|
||||
https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when
|
||||
role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535,
|
||||
https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to
|
||||
fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without
|
||||
crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450,
|
||||
https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using
|
||||
it on the controller, like the ``to_yaml`` filter, do not produce different
|
||||
output (https://github.com/ansible-collections/community.general/issues/3471,
|
||||
https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=``
|
||||
and ``state=present`` only the first run was successful, future runs failed
|
||||
due to missing checks prior starting zypper. Usage of ``state=absent`` in
|
||||
combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791,
|
||||
https://github.com/ansible-collections/community.general/issues/3466).
|
||||
minor_changes:
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of
|
||||
the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings,
|
||||
instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator
|
||||
(https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to
|
||||
support enabling the plugin only if the given environment variable exists
|
||||
and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the
|
||||
``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command
|
||||
(per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded
|
||||
separately. Previously, all changes were reported as one count of packages
|
||||
"added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert``
|
||||
is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
release_summary: Regular feature and bugfix release. Please note that this is
|
||||
the last minor 3.x.0 release; afterwards there will only be bugfix releases
|
||||
3.8.y.
|
||||
fragments:
|
||||
- 2692-logstash-callback-plugin-replacing_options.yml
|
||||
- 3.8.0.yml
|
||||
- 3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml
|
||||
- 3393-pkgng-many_packages_one_command.yml
|
||||
- 3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml
|
||||
- 3401-nmcli-needs-type.yml
|
||||
- 3422-open-iscsi-mutual-authentication-support.yaml
|
||||
- 3425-mail_add_configurable_ehlo_hostname.yml
|
||||
- 3426-copy-permissions-along-with-file-for-jboss-module.yml
|
||||
- 3450-callback_opentelemetry-exception_handling.yml
|
||||
- 3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml
|
||||
- 3473-gitlab_deploy_key-fix_idempotency.yml
|
||||
- 3474-zypper_repository_improve_repo_file_idempotency.yml
|
||||
- 3478-yaml-callback.yml
|
||||
- 3496-callback_opentelemetry-enrich_stacktraces.yml
|
||||
- 3498-callback_opentelemetry-only_in_ci.yml
|
||||
- 3500-macports-add-stdout-and-stderr-to-status.yaml
|
||||
- 3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml
|
||||
- 3536-quote-role-name-in-url.yml
|
||||
- 3538-fix-keycloak-idp-mappers-change-detection.yml
|
||||
- 3540-terraform_add_parallelism_parameter.yml
|
||||
modules:
|
||||
- description: Manages applications installed with pipx
|
||||
name: pipx
|
||||
namespace: packaging.language
|
||||
- description: Retrieve information about one or more Proxmox VE tasks
|
||||
name: proxmox_tasks_info
|
||||
namespace: cloud.misc
|
||||
- description: Query executions for a Rundeck job
|
||||
name: rundeck_job_executions_info
|
||||
namespace: web_infrastructure
|
||||
- description: Run a Rundeck job
|
||||
name: rundeck_job_run
|
||||
namespace: web_infrastructure
|
||||
plugins:
|
||||
callback:
|
||||
- description: Create distributed traces for each Ansible task in Elastic APM
|
||||
name: elastic
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: OpenNebula inventory source
|
||||
name: opennebula
|
||||
namespace: null
|
||||
release_date: '2021-10-12'
|
||||
|
||||
@@ -69,5 +69,6 @@ Individuals who have been asked to become a part of this group have generally be
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Alexei Znamensky | russoz | russoz | |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Andrew Pantuso | Ajpantuso | ajpantuso | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
|
||||
@@ -751,3 +751,34 @@ To extract ports from all clusters with name containing 'server1':
|
||||
server_name_query: "domain.server[?contains(name,'server1')].port"
|
||||
|
||||
.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
|
||||
|
||||
Working with Unicode
|
||||
---------------------
|
||||
|
||||
`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
|
||||
|
||||
You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Compare Unicode representations
|
||||
debug:
|
||||
msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
|
||||
vars:
|
||||
with_combining_character: "{{ 'Mayagu\u0308ez' }}"
|
||||
without_combining_character: Mayagüez
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Compare Unicode representations] ********************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": true
|
||||
}
|
||||
|
||||
The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
|
||||
|
||||
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
|
||||
|
||||
.. versionadded:: 3.7.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.5.0
|
||||
version: 3.8.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
8
plugins/cache/memcached.py
vendored
8
plugins/cache/memcached.py
vendored
@@ -154,12 +154,12 @@ class CacheModuleKeys(MutableSet):
|
||||
def __len__(self):
|
||||
return len(self._keyset)
|
||||
|
||||
def add(self, key):
|
||||
self._keyset[key] = time.time()
|
||||
def add(self, value):
|
||||
self._keyset[value] = time.time()
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def discard(self, key):
|
||||
del self._keyset[key]
|
||||
def discard(self, value):
|
||||
del self._keyset[value]
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
|
||||
408
plugins/callback/elastic.py
Normal file
408
plugins/callback/elastic.py
Normal file
@@ -0,0 +1,408 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: elastic
|
||||
type: notification
|
||||
short_description: Create distributed traces for each Ansible task in Elastic APM
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task in Elastic APM.
|
||||
- You can configure the plugin with environment variables.
|
||||
- See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
apm_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVICE_NAME
|
||||
apm_server_url:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server and its environment variables.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVER_URL
|
||||
apm_secret_token:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server token
|
||||
env:
|
||||
- name: ELASTIC_APM_SECRET_TOKEN
|
||||
apm_api_key:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM API key
|
||||
env:
|
||||
- name: ELASTIC_APM_API_KEY
|
||||
apm_verify_server_cert:
|
||||
default: true
|
||||
type: bool
|
||||
description:
|
||||
- Verifies the SSL certificate if an HTTPS connection.
|
||||
env:
|
||||
- name: ELASTIC_APM_VERIFY_SERVER_CERT
|
||||
traceparent:
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- elastic-apm (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.elastic
|
||||
|
||||
Set the environment variable:
|
||||
export ELASTIC_APM_SERVER_URL=<your APM server URL)>
|
||||
export ELASTIC_APM_SERVICE_NAME=your_service_name
|
||||
export ELASTIC_APM_API_KEY=your_APM_API_KEY
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleRuntimeError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
|
||||
except ImportError as imp_exc:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
self.start = time.time()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
self.finish = time.time()
|
||||
|
||||
|
||||
class ElasticSource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
|
||||
apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
|
||||
if apm_cli:
|
||||
instrument() # Only call this once, as early as possible.
|
||||
if traceparent:
|
||||
parent = trace_parent_from_string(traceparent)
|
||||
apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
|
||||
else:
|
||||
apm_cli.begin_transaction("Session", start=parent_start_time)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
label(ansible_version=self.ansible_version)
|
||||
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
|
||||
if self.ip_address is not None:
|
||||
label(ansible_host_ip=self.ip_address)
|
||||
|
||||
for task_data in tasks:
|
||||
for host_uuid, host_data in task_data.host_data.items():
|
||||
self.create_span_data(apm_cli, task_data, host_data)
|
||||
|
||||
apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
|
||||
|
||||
def create_span_data(self, apm_cli, task_data, host_data):
|
||||
""" create the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = "success"
|
||||
status = "success"
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
if res.get('exception') is not None:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
status = "failure"
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = "unknown"
|
||||
|
||||
with capture_span(task_data.name,
|
||||
start=task_data.start,
|
||||
span_type="ansible.task.run",
|
||||
duration=host_data.finish - task_data.start,
|
||||
labels={"ansible.task.args": task_data.args,
|
||||
"ansible.task.message": message,
|
||||
"ansible.task.module": task_data.action,
|
||||
"ansible.task.name": name,
|
||||
"ansible.task.result": rc,
|
||||
"ansible.task.host.name": host_data.name,
|
||||
"ansible.task.host.status": host_data.status}) as span:
|
||||
span.outcome = status
|
||||
if 'failure' in status:
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||
|
||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
if apm_server_url:
|
||||
return Client(service_name=apm_service_name,
|
||||
server_url=apm_server_url,
|
||||
verify_server_cert=False,
|
||||
secret_token=apm_secret_token,
|
||||
api_key=apm_api_key,
|
||||
use_elastic_traceparent_header=True,
|
||||
debug=True)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces with Elastic APM.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.elastic'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.apm_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.traceparent = False
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
|
||||
if ELASTIC_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `elastic-apm` must be installed to use this plugin'),
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.elastic = ElasticSource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.apm_service_name = self.get_option('apm_service_name')
|
||||
if not self.apm_service_name:
|
||||
self.apm_service_name = 'ansible'
|
||||
|
||||
self.apm_server_url = self.get_option('apm_server_url')
|
||||
self.apm_secret_token = self.get_option('apm_secret_token')
|
||||
self.apm_api_key = self.get_option('apm_api_key')
|
||||
self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = "success"
|
||||
else:
|
||||
status = "failure"
|
||||
self.elastic.generate_distributed_traces(
|
||||
self.tasks_data,
|
||||
status,
|
||||
time.time(),
|
||||
self.traceparent,
|
||||
self.apm_service_name,
|
||||
self.apm_server_url,
|
||||
self.apm_verify_server_cert,
|
||||
self.apm_secret_token,
|
||||
self.apm_api_key
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -78,7 +78,7 @@ def get_mac():
|
||||
|
||||
# Getting hostname of system:
|
||||
def get_hostname():
|
||||
return str(socket.gethostname()).split('.local')[0]
|
||||
return str(socket.gethostname()).split('.local', 1)[0]
|
||||
|
||||
|
||||
# Getting IP of system:
|
||||
|
||||
@@ -94,6 +94,7 @@ ansible.cfg: |
|
||||
|
||||
import os
|
||||
import json
|
||||
from ansible import context
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
@@ -152,11 +153,11 @@ class CallbackModule(CallbackBase):
|
||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||
self.ls_pre_command).read()
|
||||
|
||||
if self._options is not None:
|
||||
self.base_data['ansible_checkmode'] = self._options.check
|
||||
self.base_data['ansible_tags'] = self._options.tags
|
||||
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||
self.base_data['inventory'] = self._options.inventory
|
||||
if context.CLIARGS is not None:
|
||||
self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
|
||||
self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
|
||||
self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
|
||||
self.base_data['inventory'] = context.CLIARGS.get('inventory')
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
437
plugins/callback/opentelemetry.py
Normal file
437
plugins/callback/opentelemetry.py
Normal file
@@ -0,0 +1,437 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: opentelemetry
|
||||
type: notification
|
||||
short_description: Create distributed traces with OpenTelemetry
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task with OpenTelemetry.
|
||||
- You can configure the OpenTelemetry exporter and SDK with environment variables.
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
|
||||
- See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
enable_from_environment:
|
||||
type: str
|
||||
description:
|
||||
- Whether to enable this callback only if the given environment variable exists and it is set to C(true).
|
||||
- This is handy when you use Configuration as Code and want to send distributed traces
|
||||
if running in the CI rather when running Ansible locally.
|
||||
- For such, it evaluates the given I(enable_from_environment) value as environment variable
|
||||
and if set to true this plugin will be enabled.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
|
||||
version_added: 3.8.0
|
||||
otel_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: OTEL_SERVICE_NAME
|
||||
traceparent:
|
||||
default: None
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- opentelemetry-api (Python library)
|
||||
- opentelemetry-exporter-otlp (Python library)
|
||||
- opentelemetry-sdk (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.opentelemetry
|
||||
|
||||
Set the environment variable:
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
|
||||
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
|
||||
export OTEL_SERVICE_NAME=your_service_name
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.trace import SpanKind
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.trace.status import Status, StatusCode
|
||||
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
if sys.version_info >= (3, 7):
|
||||
self.start = time.time_ns()
|
||||
else:
|
||||
self.start = _time_ns()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
if sys.version_info >= (3, 7):
|
||||
self.finish = time.time_ns()
|
||||
else:
|
||||
self.finish = _time_ns()
|
||||
|
||||
|
||||
class OpenTelemetrySource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def traceparent_context(self, traceparent):
|
||||
carrier = dict()
|
||||
carrier['traceparent'] = traceparent
|
||||
return TraceContextTextMapPropagator().extract(carrier=carrier)
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(
|
||||
resource=Resource.create({SERVICE_NAME: otel_service_name})
|
||||
)
|
||||
)
|
||||
|
||||
processor = BatchSpanProcessor(OTLPSpanExporter())
|
||||
|
||||
trace.get_tracer_provider().add_span_processor(processor)
|
||||
|
||||
tracer = trace.get_tracer(__name__)
|
||||
|
||||
with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
|
||||
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
|
||||
parent.set_status(status)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
parent.set_attribute("ansible.version", self.ansible_version)
|
||||
parent.set_attribute("ansible.session", self.session)
|
||||
parent.set_attribute("ansible.host.name", self.host)
|
||||
if self.ip_address is not None:
|
||||
parent.set_attribute("ansible.host.ip", self.ip_address)
|
||||
parent.set_attribute("ansible.host.user", self.user)
|
||||
for task in tasks:
|
||||
for host_uuid, host_data in task.host_data.items():
|
||||
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
|
||||
self.update_span_data(task, host_data, span)
|
||||
|
||||
def update_span_data(self, task_data, host_data, span):
|
||||
""" update the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = 'success'
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
message = self.get_error_message(res)
|
||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||
# Record an exception with the task message
|
||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
|
||||
span.set_status(status)
|
||||
self.set_span_attribute(span, "ansible.task.args", task_data.args)
|
||||
self.set_span_attribute(span, "ansible.task.module", task_data.action)
|
||||
self.set_span_attribute(span, "ansible.task.message", message)
|
||||
self.set_span_attribute(span, "ansible.task.name", name)
|
||||
self.set_span_attribute(span, "ansible.task.result", rc)
|
||||
self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
|
||||
self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
|
||||
span.end(end_time=host_data.finish)
|
||||
|
||||
def set_span_attribute(self, span, attributeName, attributeValue):
|
||||
""" update the span attribute with the given attribute and value if not None """
|
||||
|
||||
if span is None and self._display is not None:
|
||||
self._display.warning('span object is None. Please double check if that is expected.')
|
||||
else:
|
||||
if attributeValue is not None:
|
||||
span.set_attribute(attributeName, attributeValue)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return OpenTelemetrySource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
return lines[-1]
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message(result):
|
||||
message = result.get('msg', 'failed')
|
||||
exception = result.get('exception')
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.opentelemetry'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.otel_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
self.traceparent = False
|
||||
|
||||
if OTEL_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
|
||||
OTEL_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.opentelemetry = OpenTelemetrySource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
environment_variable = self.get_option('enable_from_environment')
|
||||
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
|
||||
self.disabled = True
|
||||
self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
|
||||
"Disabling the `opentelemetry` callback plugin.".format(environment_variable))
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.otel_service_name = self.get_option('otel_service_name')
|
||||
|
||||
if not self.otel_service_name:
|
||||
self.otel_service_name = 'ansible'
|
||||
|
||||
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.opentelemetry.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = Status(status_code=StatusCode.OK)
|
||||
else:
|
||||
status = Status(status_code=StatusCode.ERROR)
|
||||
self.opentelemetry.generate_distributed_traces(
|
||||
self.otel_service_name,
|
||||
self.ansible_playbook,
|
||||
self.tasks_data,
|
||||
status,
|
||||
self.traceparent
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -42,28 +42,29 @@ def should_use_block(value):
|
||||
return False
|
||||
|
||||
|
||||
def my_represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
class MyDumper(AnsibleDumper):
|
||||
def represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
|
||||
|
||||
class CallbackModule(Default):
|
||||
@@ -79,7 +80,6 @@ class CallbackModule(Default):
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
@@ -121,7 +121,7 @@ class CallbackModule(Default):
|
||||
|
||||
if abridged_result:
|
||||
dumped += '\n'
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
|
||||
|
||||
# indent by a couple of spaces
|
||||
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
||||
|
||||
@@ -51,7 +51,7 @@ class Connection(ConnectionBase):
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, sudoable=False, in_data=None):
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" run a command on the remote minion """
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
|
||||
57
plugins/doc_fragments/redis.py
Normal file
57
plugins/doc_fragments/redis.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
# Common parameters for Redis modules
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
login_host:
|
||||
description:
|
||||
- Specify the target host running the database.
|
||||
default: localhost
|
||||
type: str
|
||||
login_port:
|
||||
description:
|
||||
- Specify the port to connect to.
|
||||
default: 6379
|
||||
type: int
|
||||
login_user:
|
||||
description:
|
||||
- Specify the user to authenticate with.
|
||||
- Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- Specify the password to authenticate with.
|
||||
- Usually not used when target is localhost.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Specify whether or not to use TLS for the connection.
|
||||
type: bool
|
||||
default: true
|
||||
validate_certs:
|
||||
description:
|
||||
- Specify whether or not to validate TLS certificates.
|
||||
- This should only be turned off for personally controlled sites or with
|
||||
C(localhost) as target.
|
||||
type: bool
|
||||
default: true
|
||||
ca_certs:
|
||||
description:
|
||||
- Path to root certificates file. If not set and I(tls) is
|
||||
set to C(true), certifi ca-certificates will be used.
|
||||
type: str
|
||||
requirements: [ "redis", "certifi" ]
|
||||
|
||||
notes:
|
||||
- Requires the C(redis) Python package on the remote host. You can
|
||||
install it with pip (C(pip install redis)) or with a package manager.
|
||||
Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
|
||||
'''
|
||||
31
plugins/doc_fragments/rundeck.py
Normal file
31
plugins/doc_fragments/rundeck.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Rundeck API version to be used.
|
||||
- API version must be at least 14.
|
||||
default: 39
|
||||
api_token:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck User API Token.
|
||||
required: true
|
||||
'''
|
||||
40
plugins/filter/unicode_normalize.py
Normal file
40
plugins/filter/unicode_normalize.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from unicodedata import normalize
|
||||
|
||||
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
|
||||
def unicode_normalize(data, form='NFC'):
|
||||
"""Applies normalization to 'unicode' strings.
|
||||
|
||||
Args:
|
||||
data: A unicode string piped into the Jinja filter
|
||||
form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
|
||||
See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
|
||||
|
||||
Returns:
|
||||
A normalized unicode string of the specified 'form'.
|
||||
"""
|
||||
|
||||
if not isinstance(data, text_type):
|
||||
raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
|
||||
|
||||
if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
|
||||
raise AnsibleFilterError("%s is not a valid form" % form)
|
||||
|
||||
return normalize(form, data)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'unicode_normalize': unicode_normalize,
|
||||
}
|
||||
222
plugins/inventory/icinga2.py
Normal file
222
plugins/inventory/icinga2.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
|
||||
# Copyright (c) 2021 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: icinga2
|
||||
short_description: Icinga2 inventory source
|
||||
version_added: 3.7.0
|
||||
author:
|
||||
- Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
|
||||
description:
|
||||
- Get inventory hosts from the Icinga2 API.
|
||||
- "Uses a configuration file as an inventory source, it must end in
|
||||
C(.icinga2.yml) or C(.icinga2.yaml)."
|
||||
options:
|
||||
plugin:
|
||||
description: Name of the plugin.
|
||||
required: true
|
||||
type: string
|
||||
choices: ['community.general.icinga2']
|
||||
url:
|
||||
description: Root URL of Icinga2 API.
|
||||
type: string
|
||||
required: true
|
||||
user:
|
||||
description: Username to query the API.
|
||||
type: string
|
||||
required: true
|
||||
password:
|
||||
description: Password to query the API.
|
||||
type: string
|
||||
required: true
|
||||
host_filter:
|
||||
description: An Icinga2 API valid host filter.
|
||||
type: string
|
||||
required: false
|
||||
validate_certs:
|
||||
description: Enables or disables SSL certificate verification.
|
||||
type: boolean
|
||||
default: true
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# my.icinga2.yml
|
||||
plugin: community.general.icinga2
|
||||
url: http://localhost:5665
|
||||
user: ansible
|
||||
password: secure
|
||||
host_filter: \"linux-servers\" in host.groups
|
||||
validate_certs: false
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Icinga2 as source. '''
|
||||
|
||||
NAME = 'community.general.icinga2'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# from config
|
||||
self.icinga2_url = None
|
||||
self.icinga2_user = None
|
||||
self.icinga2_password = None
|
||||
self.ssl_verify = None
|
||||
self.host_filter = None
|
||||
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('icinga2.yaml', 'icinga2.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
|
||||
return valid
|
||||
|
||||
def _api_connect(self):
|
||||
self.headers = {
|
||||
'User-Agent': "ansible-icinga2-inv",
|
||||
'Accept': "application/json",
|
||||
}
|
||||
api_status_url = self.icinga2_url + "/status"
|
||||
request_args = {
|
||||
'headers': self.headers,
|
||||
'url_username': self.icinga2_user,
|
||||
'url_password': self.icinga2_password,
|
||||
'validate_certs': self.ssl_verify
|
||||
}
|
||||
open_url(api_status_url, **request_args)
|
||||
|
||||
def _post_request(self, request_url, data=None):
|
||||
self.display.vvv("Requested URL: %s" % request_url)
|
||||
request_args = {
|
||||
'headers': self.headers,
|
||||
'url_username': self.icinga2_user,
|
||||
'url_password': self.icinga2_password,
|
||||
'validate_certs': self.ssl_verify
|
||||
}
|
||||
if data is not None:
|
||||
request_args['data'] = json.dumps(data)
|
||||
self.display.vvv("Request Args: %s" % request_args)
|
||||
response = open_url(request_url, **request_args)
|
||||
response_body = response.read()
|
||||
json_data = json.loads(response_body.decode('utf-8'))
|
||||
if 200 <= response.status <= 299:
|
||||
return json_data
|
||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
||||
raise AnsibleParserError(
|
||||
"API returned no data -- Response: %s - %s"
|
||||
% (response.status, json_data['status']))
|
||||
if response.status == 401:
|
||||
raise AnsibleParserError(
|
||||
"API was unable to complete query -- Response: %s - %s"
|
||||
% (response.status, json_data['status']))
|
||||
if response.status == 500:
|
||||
raise AnsibleParserError(
|
||||
"API Response - %s - %s"
|
||||
% (json_data['status'], json_data['errors']))
|
||||
raise AnsibleParserError(
|
||||
"Unexpected data returned - %s - %s"
|
||||
% (json_data['status'], json_data['errors']))
|
||||
|
||||
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
|
||||
query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
|
||||
self.headers['X-HTTP-Method-Override'] = 'GET'
|
||||
data_dict = dict()
|
||||
if hosts:
|
||||
data_dict['hosts'] = hosts
|
||||
if attrs is not None:
|
||||
data_dict['attrs'] = attrs
|
||||
if joins is not None:
|
||||
data_dict['joins'] = joins
|
||||
if host_filter is not None:
|
||||
data_dict['filter'] = host_filter.replace("\\\"", "\"")
|
||||
self.display.vvv(host_filter)
|
||||
host_dict = self._post_request(query_hosts_url, data_dict)
|
||||
return host_dict['results']
|
||||
|
||||
def get_inventory_from_icinga(self):
|
||||
"""Query for all hosts """
|
||||
self.display.vvv("Querying Icinga2 for inventory")
|
||||
query_args = {
|
||||
"attrs": ["address", "state_type", "state", "groups"],
|
||||
}
|
||||
if self.host_filter is not None:
|
||||
query_args['host_filter'] = self.host_filter
|
||||
# Icinga2 API Call
|
||||
results_json = self._query_hosts(**query_args)
|
||||
# Manipulate returned API data to Ansible inventory spec
|
||||
ansible_inv = self._convert_inv(results_json)
|
||||
return ansible_inv
|
||||
|
||||
def _populate(self):
|
||||
groups = self._to_json(self.get_inventory_from_icinga())
|
||||
return groups
|
||||
|
||||
def _to_json(self, in_dict):
|
||||
"""Convert dictionary to JSON"""
|
||||
return json.dumps(in_dict, sort_keys=True, indent=2)
|
||||
|
||||
def _convert_inv(self, json_data):
|
||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
||||
groups_dict = {"_meta": {"hostvars": {}}}
|
||||
for entry in json_data:
|
||||
host_name = entry['name']
|
||||
host_attrs = entry['attrs']
|
||||
if host_attrs['state'] == 0:
|
||||
host_attrs['state'] = 'on'
|
||||
else:
|
||||
host_attrs['state'] = 'off'
|
||||
host_groups = host_attrs['groups']
|
||||
host_addr = host_attrs['address']
|
||||
self.inventory.add_host(host_addr)
|
||||
for group in host_groups:
|
||||
if group not in self.inventory.groups.keys():
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, host_addr)
|
||||
self.inventory.set_variable(host_addr, 'address', host_addr)
|
||||
self.inventory.set_variable(host_addr, 'hostname', host_name)
|
||||
self.inventory.set_variable(host_addr, 'state',
|
||||
host_attrs['state'])
|
||||
self.inventory.set_variable(host_addr, 'state_type',
|
||||
host_attrs['state_type'])
|
||||
return groups_dict
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
|
||||
# Store the options from the YAML file
|
||||
self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
|
||||
self.icinga2_user = self.get_option('user')
|
||||
self.icinga2_password = self.get_option('password')
|
||||
self.ssl_verify = self.get_option('validate_certs')
|
||||
self.host_filter = self.get_option('host_filter')
|
||||
# Not currently enabled
|
||||
# self.cache_key = self.get_cache_key(path)
|
||||
# self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
# Test connection to API
|
||||
self._api_connect()
|
||||
|
||||
# Call our internal helper to populate the dynamic inventory
|
||||
self._populate()
|
||||
@@ -23,9 +23,17 @@ DOCUMENTATION = r'''
|
||||
- constructed
|
||||
options:
|
||||
plugin:
|
||||
description: marks this as an instance of the 'linode' plugin
|
||||
description: Marks this as an instance of the 'linode' plugin.
|
||||
required: true
|
||||
choices: ['linode', 'community.general.linode']
|
||||
ip_style:
|
||||
description: Populate hostvars with all information available from the Linode APIv4.
|
||||
type: string
|
||||
default: plain
|
||||
choices:
|
||||
- plain
|
||||
- api
|
||||
version_added: 3.6.0
|
||||
access_token:
|
||||
description: The Linode account personal access token.
|
||||
required: true
|
||||
@@ -78,7 +86,18 @@ groups:
|
||||
webservers: "'web' in (tags|list)"
|
||||
mailservers: "'mail' in (tags|list)"
|
||||
compose:
|
||||
# By default, Ansible tries to connect to the label of the instance.
|
||||
# Since that might not be a valid name to connect to, you can
|
||||
# replace it with the first IPv4 address of the linode as follows:
|
||||
ansible_ssh_host: ipv4[0]
|
||||
ansible_port: 2222
|
||||
|
||||
# Example where control traffic limited to internal network
|
||||
plugin: community.general.linode
|
||||
access_token: foobar
|
||||
ip_style: api
|
||||
compose:
|
||||
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -166,14 +185,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
def _add_hostvars_for_instances(self):
|
||||
"""Add hostvars for instances in the dynamic inventory."""
|
||||
ip_style = self.get_option('ip_style')
|
||||
for instance in self.instances:
|
||||
hostvars = instance._raw_json
|
||||
for hostvar_key in hostvars:
|
||||
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
|
||||
continue
|
||||
self.inventory.set_variable(
|
||||
instance.label,
|
||||
hostvar_key,
|
||||
hostvars[hostvar_key]
|
||||
)
|
||||
if ip_style == 'api':
|
||||
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
|
||||
ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
|
||||
ips += instance.ips.ipv6.pools
|
||||
|
||||
for ip_type in set(ip.type for ip in ips):
|
||||
self.inventory.set_variable(
|
||||
instance.label,
|
||||
ip_type,
|
||||
self._ip_data([ip for ip in ips if ip.type == ip_type])
|
||||
)
|
||||
|
||||
def _ip_data(self, ip_list):
|
||||
data = []
|
||||
for ip in list(ip_list):
|
||||
data.append(
|
||||
{
|
||||
'address': ip.address,
|
||||
'subnet_mask': ip.subnet_mask,
|
||||
'gateway': ip.gateway,
|
||||
'public': ip.public,
|
||||
'prefix': ip.prefix,
|
||||
'rdns': ip.rdns,
|
||||
'type': ip.type
|
||||
}
|
||||
)
|
||||
return data
|
||||
|
||||
def _validate_option(self, name, desired_type, option_value):
|
||||
"""Validate user specified configuration data against types."""
|
||||
|
||||
@@ -235,7 +235,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
self.headers = {
|
||||
'Authorization': "Bearer %s" % token,
|
||||
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]),
|
||||
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
|
||||
'Content-type': 'application/json'
|
||||
}
|
||||
|
||||
|
||||
239
plugins/inventory/opennebula.py
Normal file
239
plugins/inventory/opennebula.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: opennebula
|
||||
author:
|
||||
- Kristian Feldsam (@feldsam)
|
||||
short_description: OpenNebula inventory source
|
||||
version_added: "3.8.0"
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from OpenNebula cloud.
|
||||
- Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
|
||||
to set parameter values.
|
||||
- Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'opennebula' plugin.
|
||||
type: string
|
||||
required: true
|
||||
choices: [ community.general.opennebula ]
|
||||
api_url:
|
||||
description:
|
||||
- URL of the OpenNebula RPC server.
|
||||
- It is recommended to use HTTPS so that the username/password are not
|
||||
transferred over the network unencrypted.
|
||||
- If not set then the value of the C(ONE_URL) environment variable is used.
|
||||
env:
|
||||
- name: ONE_URL
|
||||
required: True
|
||||
type: string
|
||||
api_username:
|
||||
description:
|
||||
- Name of the user to login into the OpenNebula RPC server. If not set
|
||||
then the value of the C(ONE_USERNAME) environment variable is used.
|
||||
env:
|
||||
- name: ONE_USERNAME
|
||||
type: string
|
||||
api_password:
|
||||
description:
|
||||
- Password or a token of the user to login into OpenNebula RPC server.
|
||||
- If not set, the value of the C(ONE_PASSWORD) environment variable is used.
|
||||
env:
|
||||
- name: ONE_PASSWORD
|
||||
required: False
|
||||
type: string
|
||||
api_authfile:
|
||||
description:
|
||||
- If both I(api_username) or I(api_password) are not set, then it will try
|
||||
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
|
||||
- Set environment variable C(ONE_AUTH) to override this path.
|
||||
env:
|
||||
- name: ONE_AUTH
|
||||
required: False
|
||||
type: string
|
||||
hostname:
|
||||
description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
|
||||
type: string
|
||||
default: v4_first_ip
|
||||
choices:
|
||||
- v4_first_ip
|
||||
- v6_first_ip
|
||||
- name
|
||||
filter_by_label:
|
||||
description: Only return servers filtered by this label.
|
||||
type: string
|
||||
group_by_labels:
|
||||
description: Create host groups by vm labels
|
||||
type: bool
|
||||
default: True
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# inventory_opennebula.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
|
||||
|
||||
# Pass a label filter to the API
|
||||
plugin: community.general.opennebula
|
||||
api_url: https://opennebula:2633/RPC2
|
||||
filter_by_label: Cache
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyone
|
||||
|
||||
HAS_PYONE = True
|
||||
except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from collections import namedtuple
|
||||
import os
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
NAME = 'community.general.opennebula'
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('opennebula.yaml', 'opennebula.yml')):
|
||||
valid = True
|
||||
return valid
|
||||
|
||||
def _get_connection_info(self):
|
||||
url = self.get_option('api_url')
|
||||
username = self.get_option('api_username')
|
||||
password = self.get_option('api_password')
|
||||
authfile = self.get_option('api_authfile')
|
||||
|
||||
if not username and not password:
|
||||
if authfile is None:
|
||||
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
|
||||
try:
|
||||
with open(authfile, "r") as fp:
|
||||
authstring = fp.read().rstrip()
|
||||
username, password = authstring.split(":")
|
||||
except (OSError, IOError):
|
||||
raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
except Exception:
|
||||
raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
|
||||
auth_params = namedtuple('auth', ('url', 'username', 'password'))
|
||||
|
||||
return auth_params(url=url, username=username, password=password)
|
||||
|
||||
def _get_vm_ipv4(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
return net['IP']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_ipv6(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
if net.get('IP6_GLOBAL'):
|
||||
return net['IP6_GLOBAL']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_pool(self):
|
||||
auth = self._get_connection_info()
|
||||
|
||||
if not (auth.username and auth.password):
|
||||
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
|
||||
else:
|
||||
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
|
||||
# get hosts (VMs)
|
||||
try:
|
||||
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
|
||||
|
||||
return vm_pool
|
||||
|
||||
def _retrieve_servers(self, label_filter=None):
|
||||
vm_pool = self._get_vm_pool()
|
||||
|
||||
result = []
|
||||
|
||||
# iterate over hosts
|
||||
for vm in vm_pool.VM:
|
||||
server = vm.USER_TEMPLATE
|
||||
|
||||
labels = []
|
||||
if vm.USER_TEMPLATE.get('LABELS'):
|
||||
labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
|
||||
labels = ''.join(labels)
|
||||
labels = labels.replace(' ', '_')
|
||||
labels = labels.replace('-', '_')
|
||||
labels = labels.split(',')
|
||||
|
||||
# filter by label
|
||||
if label_filter is not None:
|
||||
if label_filter not in labels:
|
||||
continue
|
||||
|
||||
server['name'] = vm.NAME
|
||||
server['LABELS'] = labels
|
||||
server['v4_first_ip'] = self._get_vm_ipv4(vm)
|
||||
server['v6_first_ip'] = self._get_vm_ipv6(vm)
|
||||
|
||||
result.append(server)
|
||||
|
||||
return result
|
||||
|
||||
def _populate(self):
|
||||
hostname_preference = self.get_option('hostname')
|
||||
group_by_labels = self.get_option('group_by_labels')
|
||||
|
||||
# Add a top group 'one'
|
||||
self.inventory.add_group(group='all')
|
||||
|
||||
filter_by_label = self.get_option('filter_by_label')
|
||||
for server in self._retrieve_servers(filter_by_label):
|
||||
# check for labels
|
||||
if group_by_labels and server['LABELS']:
|
||||
for label in server['LABELS']:
|
||||
self.inventory.add_group(group=label)
|
||||
self.inventory.add_host(host=server['name'], group=label)
|
||||
|
||||
self.inventory.add_host(host=server['name'], group='all')
|
||||
|
||||
for attribute, value in server.items():
|
||||
self.inventory.set_variable(server['name'], attribute, value)
|
||||
|
||||
if hostname_preference != 'name':
|
||||
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
|
||||
|
||||
if server.get('SSH_PORT'):
|
||||
self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_PYONE:
|
||||
raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
self._populate()
|
||||
@@ -88,13 +88,24 @@ DOCUMENTATION = '''
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example which will not gather additional facts for QEMU/LXC guests
|
||||
# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
|
||||
# my.proxmox.yml
|
||||
plugin: community.general.proxmox
|
||||
url: http://localhost:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: no
|
||||
|
||||
# More complete example demonstrating the use of 'want_facts' and the constructed options
|
||||
# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
|
||||
# my.proxmox.yml
|
||||
plugin: community.general.proxmox
|
||||
url: http://pve.domain.com:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: false
|
||||
want_facts: true
|
||||
keyed_groups:
|
||||
# proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
|
||||
- key: proxmox_tags_parsed
|
||||
separator: ""
|
||||
prefix: group
|
||||
|
||||
@@ -13,6 +13,8 @@ DOCUMENTATION = r'''
|
||||
short_description: Scaleway inventory source
|
||||
description:
|
||||
- Get inventory hosts from Scaleway.
|
||||
requirements:
|
||||
- PyYAML
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'scaleway' plugin.
|
||||
@@ -30,9 +32,10 @@ DOCUMENTATION = r'''
|
||||
description: Filter results on a specific tag.
|
||||
type: list
|
||||
oauth_token:
|
||||
required: True
|
||||
description:
|
||||
- Scaleway OAuth token.
|
||||
- If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
|
||||
(C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
|
||||
- More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
|
||||
env:
|
||||
# in order of precedence
|
||||
@@ -95,13 +98,22 @@ variables:
|
||||
ansible_user: "'admin'"
|
||||
'''
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
try:
|
||||
import yaml
|
||||
except ImportError as exc:
|
||||
YAML_IMPORT_ERROR = exc
|
||||
else:
|
||||
YAML_IMPORT_ERROR = None
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
|
||||
|
||||
@@ -278,13 +290,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
|
||||
|
||||
def get_oauth_token(self):
|
||||
oauth_token = self.get_option('oauth_token')
|
||||
|
||||
if 'SCW_CONFIG_PATH' in os.environ:
|
||||
scw_config_path = os.getenv('SCW_CONFIG_PATH')
|
||||
elif 'XDG_CONFIG_HOME' in os.environ:
|
||||
scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml')
|
||||
else:
|
||||
scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml')
|
||||
|
||||
if not oauth_token and os.path.exists(scw_config_path):
|
||||
with open(scw_config_path) as fh:
|
||||
scw_config = yaml.safe_load(fh)
|
||||
active_profile = scw_config.get('active_profile', 'default')
|
||||
if active_profile == 'default':
|
||||
oauth_token = scw_config.get('secret_key')
|
||||
else:
|
||||
oauth_token = scw_config['profiles'][active_profile].get('secret_key')
|
||||
|
||||
return oauth_token
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if YAML_IMPORT_ERROR:
|
||||
raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
config_zones = self.get_option("regions")
|
||||
tags = self.get_option("tags")
|
||||
token = self.get_option("oauth_token")
|
||||
token = self.get_oauth_token()
|
||||
if not token:
|
||||
raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.")
|
||||
hostname_preference = self.get_option("hostnames")
|
||||
|
||||
for zone in self._get_zones(config_zones):
|
||||
|
||||
@@ -35,6 +35,11 @@ DOCUMENTATION = '''
|
||||
flat:
|
||||
description: If 0 each record is returned as a dictionary, otherwise a string
|
||||
default: 1
|
||||
retry_servfail:
|
||||
description: Retry a nameserver if it returns SERVFAIL.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 3.6.0
|
||||
notes:
|
||||
- ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
|
||||
- While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
|
||||
@@ -73,6 +78,10 @@ EXAMPLES = """
|
||||
- ansible.builtin.debug:
|
||||
msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
|
||||
with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
|
||||
|
||||
- name: Retry nameservers that return SERVFAIL
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -300,6 +309,8 @@ class LookupModule(LookupBase):
|
||||
rdclass = dns.rdataclass.from_text(arg)
|
||||
except Exception as e:
|
||||
raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
|
||||
elif opt == 'retry_servfail':
|
||||
myres.retry_servfail = bool(arg)
|
||||
|
||||
continue
|
||||
|
||||
|
||||
@@ -36,15 +36,39 @@ options:
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: username
|
||||
required: true
|
||||
password:
|
||||
description: The password associated with the supplied username.
|
||||
description:
|
||||
- The password associated with the supplied username.
|
||||
- Required when I(token) is not provided.
|
||||
env:
|
||||
- name: TSS_PASSWORD
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: password
|
||||
required: true
|
||||
domain:
|
||||
default: ""
|
||||
description:
|
||||
- The domain with which to request the OAuth2 Access Grant.
|
||||
- Optional when I(token) is not provided.
|
||||
- Requires C(python-tss-sdk) version 1.0.0 or greater.
|
||||
env:
|
||||
- name: TSS_DOMAIN
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: domain
|
||||
required: false
|
||||
version_added: 3.6.0
|
||||
token:
|
||||
description:
|
||||
- Existing token for Thycotic authorizer.
|
||||
- If provided, I(username) and I(password) are not needed.
|
||||
- Requires C(python-tss-sdk) version 1.0.0 or greater.
|
||||
env:
|
||||
- name: TSS_TOKEN
|
||||
ini:
|
||||
- section: tss_lookup
|
||||
key: token
|
||||
version_added: 3.7.0
|
||||
api_path_uri:
|
||||
default: /api/v1
|
||||
description: The path to append to the base URL to form a valid REST
|
||||
@@ -71,18 +95,6 @@ _list:
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: "{{ lookup('community.general.tss', 1) }}"
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
@@ -104,79 +116,169 @@ EXAMPLES = r"""
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
username='user.name',
|
||||
password='password',
|
||||
domain='domain'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret_password: >-
|
||||
{{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}"
|
||||
{{
|
||||
((lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
token='thycotic_access_token',
|
||||
) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: the password is {{ secret_password }}
|
||||
"""
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
|
||||
sdk_is_missing = False
|
||||
import abc
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.module_utils import six
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
from thycotic import __version__ as sdk_version
|
||||
from thycotic.secrets.server import (
|
||||
SecretServer,
|
||||
SecretServerError,
|
||||
PasswordGrantAuthorizer,
|
||||
)
|
||||
except ImportError:
|
||||
sdk_is_missing = True
|
||||
from thycotic.secrets.server import SecretServer, SecretServerError
|
||||
|
||||
from ansible.utils.display import Display
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
HAS_TSS_SDK = True
|
||||
except ImportError:
|
||||
SecretServer = None
|
||||
SecretServerError = None
|
||||
HAS_TSS_SDK = False
|
||||
|
||||
try:
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
|
||||
|
||||
HAS_TSS_AUTHORIZER = True
|
||||
except ImportError:
|
||||
PasswordGrantAuthorizer = None
|
||||
DomainPasswordGrantAuthorizer = None
|
||||
AccessTokenAuthorizer = None
|
||||
HAS_TSS_AUTHORIZER = False
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
@staticmethod
|
||||
def Client(server_parameters):
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class TSSClient(object):
|
||||
def __init__(self):
|
||||
self._client = None
|
||||
|
||||
if LooseVersion(sdk_version) < LooseVersion('1.0.0'):
|
||||
return SecretServer(**server_parameters)
|
||||
@staticmethod
|
||||
def from_params(**server_parameters):
|
||||
if HAS_TSS_AUTHORIZER:
|
||||
return TSSClientV1(**server_parameters)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(
|
||||
return TSSClientV0(**server_parameters)
|
||||
|
||||
def get_secret(self, term):
|
||||
display.debug("tss_lookup term: %s" % term)
|
||||
|
||||
secret_id = self._term_to_secret_id(term)
|
||||
display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
|
||||
|
||||
return self._client.get_secret_json(secret_id)
|
||||
|
||||
@staticmethod
|
||||
def _term_to_secret_id(term):
|
||||
try:
|
||||
return int(term)
|
||||
except ValueError:
|
||||
raise AnsibleOptionsError("Secret ID must be an integer")
|
||||
|
||||
|
||||
class TSSClientV0(TSSClient):
|
||||
def __init__(self, **server_parameters):
|
||||
super(TSSClientV0, self).__init__()
|
||||
|
||||
if server_parameters.get("domain"):
|
||||
raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater")
|
||||
|
||||
self._client = SecretServer(
|
||||
server_parameters["base_url"],
|
||||
server_parameters["username"],
|
||||
server_parameters["password"],
|
||||
server_parameters["api_path_uri"],
|
||||
server_parameters["token_path_uri"],
|
||||
)
|
||||
|
||||
|
||||
class TSSClientV1(TSSClient):
|
||||
def __init__(self, **server_parameters):
|
||||
super(TSSClientV1, self).__init__()
|
||||
|
||||
authorizer = self._get_authorizer(**server_parameters)
|
||||
self._client = SecretServer(
|
||||
server_parameters["base_url"], authorizer, server_parameters["api_path_uri"]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _get_authorizer(**server_parameters):
|
||||
if server_parameters.get("token"):
|
||||
return AccessTokenAuthorizer(
|
||||
server_parameters["token"],
|
||||
)
|
||||
|
||||
if server_parameters.get("domain"):
|
||||
return DomainPasswordGrantAuthorizer(
|
||||
server_parameters["base_url"],
|
||||
server_parameters["username"],
|
||||
server_parameters["domain"],
|
||||
server_parameters["password"],
|
||||
server_parameters["token_path_uri"],
|
||||
)
|
||||
|
||||
return SecretServer(
|
||||
server_parameters["base_url"], authorizer, server_parameters["api_path_uri"]
|
||||
)
|
||||
return PasswordGrantAuthorizer(
|
||||
server_parameters["base_url"],
|
||||
server_parameters["username"],
|
||||
server_parameters["password"],
|
||||
server_parameters["token_path_uri"],
|
||||
)
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables, **kwargs):
|
||||
if sdk_is_missing:
|
||||
if not HAS_TSS_SDK:
|
||||
raise AnsibleError("python-tss-sdk must be installed to use this plugin")
|
||||
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
secret_server = LookupModule.Client(
|
||||
{
|
||||
"base_url": self.get_option("base_url"),
|
||||
"username": self.get_option("username"),
|
||||
"password": self.get_option("password"),
|
||||
"api_path_uri": self.get_option("api_path_uri"),
|
||||
"token_path_uri": self.get_option("token_path_uri"),
|
||||
}
|
||||
tss = TSSClient.from_params(
|
||||
base_url=self.get_option("base_url"),
|
||||
username=self.get_option("username"),
|
||||
password=self.get_option("password"),
|
||||
domain=self.get_option("domain"),
|
||||
token=self.get_option("token"),
|
||||
api_path_uri=self.get_option("api_path_uri"),
|
||||
token_path_uri=self.get_option("token_path_uri"),
|
||||
)
|
||||
result = []
|
||||
|
||||
for term in terms:
|
||||
display.debug("tss_lookup term: %s" % term)
|
||||
try:
|
||||
id = int(term)
|
||||
display.vvv(u"Secret Server lookup of Secret with ID %d" % id)
|
||||
result.append(secret_server.get_secret_json(id))
|
||||
except ValueError:
|
||||
raise AnsibleOptionsError("Secret ID must be an integer")
|
||||
except SecretServerError as error:
|
||||
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||
return result
|
||||
try:
|
||||
return [tss.get_secret(term) for term in terms]
|
||||
except SecretServerError as error:
|
||||
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||
|
||||
@@ -384,8 +384,8 @@ class NetAppESeriesModule(object):
|
||||
path = path[1:]
|
||||
request_url = self.url + self.DEFAULT_REST_API_PATH + path
|
||||
|
||||
if self.log_requests or True:
|
||||
self.module.log(pformat(dict(url=request_url, data=data, method=method)))
|
||||
# if self.log_requests:
|
||||
self.module.log(pformat(dict(url=request_url, data=data, method=method)))
|
||||
|
||||
return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
|
||||
timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
|
||||
|
||||
@@ -78,6 +78,14 @@ URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authen
|
||||
URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority"
|
||||
URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}"
|
||||
|
||||
URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances"
|
||||
URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}"
|
||||
URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers"
|
||||
URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}"
|
||||
|
||||
URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
|
||||
URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
|
||||
|
||||
|
||||
def keycloak_argument_spec():
|
||||
"""
|
||||
@@ -1023,7 +1031,7 @@ class KeycloakAPI(object):
|
||||
:param name: Name of the role to fetch.
|
||||
:param realm: Realm in which the role resides; default 'master'.
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1057,7 +1065,7 @@ class KeycloakAPI(object):
|
||||
:param rolerep: A RoleRepresentation of the updated role.
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=rolerep['name'])
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1071,7 +1079,7 @@ class KeycloakAPI(object):
|
||||
:param name: The name of the role.
|
||||
:param realm: The realm in which this role resides, default "master".
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
@@ -1114,7 +1122,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1160,7 +1168,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=rolerep['name'])
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1179,7 +1187,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
@@ -1437,3 +1445,252 @@ class KeycloakAPI(object):
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s'
|
||||
% (config["alias"], realm, str(e)))
|
||||
|
||||
def get_identity_providers(self, realm='master'):
|
||||
""" Fetch representations for identity providers in a realm
|
||||
:param realm: realm to be queried
|
||||
:return: list of representations for identity providers
|
||||
"""
|
||||
idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
|
||||
def get_identity_provider(self, alias, realm='master'):
|
||||
""" Fetch identity provider representation from a realm using the idp's alias.
|
||||
If the identity provider does not exist, None is returned.
|
||||
:param alias: Alias of the identity provider to fetch.
|
||||
:param realm: Realm in which the identity provider resides; default 'master'.
|
||||
"""
|
||||
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
|
||||
try:
|
||||
return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
|
||||
% (alias, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
|
||||
% (alias, realm, str(e)))
|
||||
|
||||
def create_identity_provider(self, idprep, realm='master'):
|
||||
""" Create an identity provider.
|
||||
:param idprep: Identity provider representation of the idp to be created.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
return open_url(idps_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(idprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s'
|
||||
% (idprep['alias'], realm, str(e)))
|
||||
|
||||
def update_identity_provider(self, idprep, realm='master'):
|
||||
""" Update an existing identity provider.
|
||||
:param idprep: Identity provider representation of the idp to be updated.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias'])
|
||||
try:
|
||||
return open_url(idp_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(idprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s'
|
||||
% (idprep['alias'], realm, str(e)))
|
||||
|
||||
def delete_identity_provider(self, alias, realm='master'):
|
||||
""" Delete an identity provider.
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
"""
|
||||
idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
|
||||
try:
|
||||
return open_url(idp_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s'
|
||||
% (alias, realm, str(e)))
|
||||
|
||||
def get_identity_provider_mappers(self, alias, realm='master'):
|
||||
""" Fetch representations for identity provider mappers
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: realm to be queried
|
||||
:return: list of representations for identity provider mappers
|
||||
"""
|
||||
mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
|
||||
try:
|
||||
return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s'
|
||||
% (alias, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
|
||||
% (alias, realm, str(e)))
|
||||
|
||||
def get_identity_provider_mapper(self, mid, alias, realm='master'):
|
||||
""" Fetch identity provider representation from a realm using the idp's alias.
|
||||
If the identity provider does not exist, None is returned.
|
||||
:param mid: Unique ID of the mapper to fetch.
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: Realm in which the identity provider resides; default 'master'.
|
||||
"""
|
||||
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
|
||||
% (mid, alias, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
|
||||
% (mid, alias, realm, str(e)))
|
||||
|
||||
def create_identity_provider_mapper(self, mapper, alias, realm='master'):
|
||||
""" Create an identity provider mapper.
|
||||
:param mapper: IdentityProviderMapperRepresentation of the mapper to be created.
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
|
||||
try:
|
||||
return open_url(mappers_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(mapper), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
|
||||
% (mapper['name'], alias, realm, str(e)))
|
||||
|
||||
def update_identity_provider_mapper(self, mapper, alias, realm='master'):
|
||||
""" Update an existing identity provider.
|
||||
:param mapper: IdentityProviderMapperRepresentation of the mapper to be updated.
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id'])
|
||||
try:
|
||||
return open_url(mapper_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(mapper), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s'
|
||||
% (mapper['id'], alias, realm, str(e)))
|
||||
|
||||
def delete_identity_provider_mapper(self, mid, alias, realm='master'):
|
||||
""" Delete an identity provider.
|
||||
:param mid: Unique ID of the mapper to delete.
|
||||
:param alias: Alias of the identity provider.
|
||||
:param realm: Realm in which this identity provider resides, default "master".
|
||||
"""
|
||||
mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
|
||||
try:
|
||||
return open_url(mapper_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
|
||||
% (mid, alias, realm, str(e)))
|
||||
|
||||
def get_components(self, filter=None, realm='master'):
|
||||
""" Fetch representations for components in a realm
|
||||
:param realm: realm to be queried
|
||||
:param filter: search filter
|
||||
:return: list of representations for components
|
||||
"""
|
||||
comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
|
||||
if filter is not None:
|
||||
comps_url += '?%s' % filter
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain list of components for realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
|
||||
def get_component(self, cid, realm='master'):
|
||||
""" Fetch component representation from a realm using its cid.
|
||||
If the component does not exist, None is returned.
|
||||
:param cid: Unique ID of the component to fetch.
|
||||
:param realm: Realm in which the component resides; default 'master'.
|
||||
"""
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def create_component(self, comprep, realm='master'):
|
||||
""" Create an component.
|
||||
:param comprep: Component representation of the component to be created.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
:return: Component representation of the created component
|
||||
"""
|
||||
comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
|
||||
try:
|
||||
resp = open_url(comps_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(comprep), validate_certs=self.validate_certs)
|
||||
comp_url = resp.getheader('Location')
|
||||
if comp_url is None:
|
||||
self.module.fail_json(msg='Could not create component in realm %s: %s'
|
||||
% (realm, 'unexpected response'))
|
||||
return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create component in realm %s: %s'
|
||||
% (realm, str(e)))
|
||||
|
||||
def update_component(self, comprep, realm='master'):
|
||||
""" Update an existing component.
|
||||
:param comprep: Component representation of the component to be updated.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
cid = comprep.get('id')
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Cannot update component without id')
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return open_url(comp_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(comprep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def delete_component(self, cid, realm='master'):
|
||||
""" Delete an component.
|
||||
:param cid: Unique ID of the component.
|
||||
:param realm: Realm in which this component resides, default "master".
|
||||
"""
|
||||
comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return open_url(comp_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
@@ -63,3 +63,7 @@ class ModuleHelperBase(object):
|
||||
if 'failed' not in output:
|
||||
output['failed'] = False
|
||||
self.module.exit_json(changed=self.has_changed(), **output)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, module=None):
|
||||
cls(module).run()
|
||||
|
||||
@@ -16,6 +16,7 @@ class ArgFormat(object):
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
FORMAT = 2
|
||||
BOOLEAN_NOT = 3
|
||||
|
||||
@staticmethod
|
||||
def stars_deco(num):
|
||||
@@ -50,12 +51,14 @@ class ArgFormat(object):
|
||||
|
||||
_fmts = {
|
||||
ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
|
||||
ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]),
|
||||
ArgFormat.PRINTF: printf_fmt,
|
||||
ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
|
||||
}
|
||||
|
||||
self.name = name
|
||||
self.stars = stars
|
||||
self.style = style
|
||||
|
||||
if fmt is None:
|
||||
fmt = "{0}"
|
||||
@@ -76,7 +79,7 @@ class ArgFormat(object):
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
if value is None:
|
||||
if value is None and self.style != ArgFormat.BOOLEAN_NOT:
|
||||
return []
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
@@ -101,7 +101,7 @@ class Online(object):
|
||||
|
||||
@staticmethod
|
||||
def get_user_agent_string(module):
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
|
||||
|
||||
def get(self, path, data=None, headers=None):
|
||||
return self.send('GET', path, data, headers)
|
||||
|
||||
@@ -29,7 +29,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\
|
||||
class RedfishUtils(object):
|
||||
|
||||
def __init__(self, creds, root_uri, timeout, module, resource_id=None,
|
||||
data_modification=False):
|
||||
data_modification=False, strip_etag_quotes=False):
|
||||
self.root_uri = root_uri
|
||||
self.creds = creds
|
||||
self.timeout = timeout
|
||||
@@ -37,6 +37,7 @@ class RedfishUtils(object):
|
||||
self.service_root = '/redfish/v1/'
|
||||
self.resource_id = resource_id
|
||||
self.data_modification = data_modification
|
||||
self.strip_etag_quotes = strip_etag_quotes
|
||||
self._init_session()
|
||||
|
||||
def _auth_params(self, headers):
|
||||
@@ -121,6 +122,8 @@ class RedfishUtils(object):
|
||||
if not etag:
|
||||
etag = r['data'].get('@odata.etag')
|
||||
if etag:
|
||||
if self.strip_etag_quotes:
|
||||
etag = etag.strip('"')
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
@@ -976,6 +979,8 @@ class RedfishUtils(object):
|
||||
payload['Password'] = user.get('account_password')
|
||||
if user.get('account_roleid'):
|
||||
payload['RoleId'] = user.get('account_roleid')
|
||||
if user.get('account_id'):
|
||||
payload['Id'] = user.get('account_id')
|
||||
|
||||
response = self.post_request(self.root_uri + self.accounts_uri, payload)
|
||||
if not response['ret']:
|
||||
@@ -1887,7 +1892,7 @@ class RedfishUtils(object):
|
||||
'LowerThresholdCritical', 'LowerThresholdFatal',
|
||||
'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
|
||||
'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
|
||||
'SensorNumber']
|
||||
'SensorNumber', 'Status']
|
||||
|
||||
# Go through list
|
||||
for chassis_uri in self.chassis_uris:
|
||||
|
||||
93
plugins/module_utils/redis.py
Normal file
93
plugins/module_utils/redis.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
REDIS_IMP_ERR = None
|
||||
try:
|
||||
from redis import Redis
|
||||
from redis import __version__ as redis_version
|
||||
HAS_REDIS_PACKAGE = True
|
||||
except ImportError:
|
||||
REDIS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REDIS_PACKAGE = False
|
||||
|
||||
try:
|
||||
import certifi
|
||||
HAS_CERTIFI_PACKAGE = True
|
||||
except ImportError:
|
||||
CERTIFI_IMPORT_ERROR = traceback.format_exc()
|
||||
HAS_CERTIFI_PACKAGE = False
|
||||
|
||||
|
||||
def fail_imports(module):
|
||||
errors = []
|
||||
traceback = []
|
||||
if not HAS_REDIS_PACKAGE:
|
||||
errors.append(missing_required_lib('redis'))
|
||||
traceback.append(REDIS_IMP_ERR)
|
||||
if not HAS_CERTIFI_PACKAGE:
|
||||
errors.append(missing_required_lib('certifi'))
|
||||
traceback.append(CERTIFI_IMPORT_ERROR)
|
||||
if errors:
|
||||
module.fail_json(errors=errors, traceback='\n'.join(traceback))
|
||||
|
||||
|
||||
def redis_auth_argument_spec():
|
||||
return dict(
|
||||
login_host=dict(type='str',
|
||||
default='localhost',),
|
||||
login_user=dict(type='str'),
|
||||
login_password=dict(type='str',
|
||||
no_log=True
|
||||
),
|
||||
login_port=dict(type='int', default=6379),
|
||||
tls=dict(type='bool',
|
||||
default=True),
|
||||
validate_certs=dict(type='bool',
|
||||
default=True
|
||||
),
|
||||
ca_certs=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
class RedisAnsible(object):
|
||||
'''Base class for Redis module'''
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.connection = self._connect()
|
||||
|
||||
def _connect(self):
|
||||
login_host = self.module.params['login_host']
|
||||
login_user = self.module.params['login_user']
|
||||
login_password = self.module.params['login_password']
|
||||
login_port = self.module.params['login_port']
|
||||
tls = self.module.params['tls']
|
||||
validate_certs = 'required' if self.module.params['validate_certs'] else None
|
||||
ca_certs = self.module.params['ca_certs']
|
||||
if tls and ca_certs is None:
|
||||
ca_certs = str(certifi.where())
|
||||
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
|
||||
self.module.fail_json(
|
||||
msg='The option `username` in only supported with redis >= 3.4.0.')
|
||||
params = {'host': login_host,
|
||||
'port': login_port,
|
||||
'password': login_password,
|
||||
'ssl_ca_certs': ca_certs,
|
||||
'ssl_cert_reqs': validate_certs,
|
||||
'ssl': tls}
|
||||
if login_user is not None:
|
||||
params['username'] = login_user
|
||||
try:
|
||||
return Redis(**params)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='{0}'.format(str(e)))
|
||||
return None
|
||||
94
plugins/module_utils/rundeck.py
Normal file
94
plugins/module_utils/rundeck.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def api_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via Rundeck API
|
||||
'''
|
||||
api_argument_spec = url_argument_spec()
|
||||
api_argument_spec.update(dict(
|
||||
url=dict(required=True, type="str"),
|
||||
api_version=dict(type="int", default=39),
|
||||
api_token=dict(required=True, type="str", no_log=True)
|
||||
))
|
||||
|
||||
return api_argument_spec
|
||||
|
||||
|
||||
def api_request(module, endpoint, data=None, method="GET"):
|
||||
"""Manages Rundeck API requests via HTTP(S)
|
||||
|
||||
:arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
|
||||
:arg endpoint: The API endpoint to be used.
|
||||
:kwarg data: The data to be sent (in case of POST/PUT).
|
||||
:kwarg method: "POST", "PUT", etc.
|
||||
|
||||
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
|
||||
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
|
||||
occurred then ``info['body']`` contains the error response data::
|
||||
|
||||
Example::
|
||||
|
||||
data={...}
|
||||
resp, info = fetch_url(module,
|
||||
"http://rundeck.example.org",
|
||||
data=module.jsonify(data),
|
||||
method="POST")
|
||||
status_code = info["status"]
|
||||
body = resp.read()
|
||||
if status_code >= 400 :
|
||||
body = info['body']
|
||||
"""
|
||||
|
||||
response, info = fetch_url(
|
||||
module=module,
|
||||
url="%s/api/%s/%s" % (
|
||||
module.params["url"],
|
||||
module.params["api_version"],
|
||||
endpoint
|
||||
),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": module.params["api_token"]
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 403:
|
||||
module.fail_json(msg="Token authorization failed",
|
||||
execution_info=json.loads(info["body"]))
|
||||
if info["status"] == 409:
|
||||
module.fail_json(msg="Job executions limit reached",
|
||||
execution_info=json.loads(info["body"]))
|
||||
elif info["status"] >= 500:
|
||||
module.fail_json(msg="Rundeck API error",
|
||||
execution_info=json.loads(info["body"]))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
exception=to_native(error),
|
||||
execution_info=content
|
||||
)
|
||||
@@ -142,7 +142,7 @@ class Scaleway(object):
|
||||
|
||||
@staticmethod
|
||||
def get_user_agent_string(module):
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
|
||||
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
|
||||
|
||||
def get(self, path, data=None, headers=None, params=None):
|
||||
return self.send(method='GET', path=path, data=data, headers=headers, params=params)
|
||||
|
||||
@@ -23,40 +23,48 @@ options:
|
||||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- The architecture for the container (e.g. "x86_64" or "i686").
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the container (e.g. {"limits.cpu": "2"}).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
- If the container already exists and its "config" value in metadata
|
||||
obtained from
|
||||
GET /1.0/containers/<name>
|
||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
- If the container already exists and its "config" values in metadata
|
||||
obtained from GET /1.0/containers/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
||||
are different, they this module tries to apply the configurations.
|
||||
- The key starts with 'volatile.' are ignored for this comparison.
|
||||
- Not all config values are supported to apply the existing container.
|
||||
Maybe you need to delete and recreate a container.
|
||||
are different, this module tries to apply the configurations.
|
||||
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
||||
type: dict
|
||||
required: false
|
||||
ignore_volatile_options:
|
||||
description:
|
||||
- If set to C(true), options starting with C(volatile.) are ignored. As a result,
|
||||
they are reapplied for each execution.
|
||||
- This default behavior can be changed by setting this option to C(false).
|
||||
- The default value C(true) will be deprecated in community.general 4.0.0,
|
||||
and will change to C(false) in community.general 5.0.0.
|
||||
type: bool
|
||||
default: true
|
||||
required: false
|
||||
version_added: 3.7.0
|
||||
profiles:
|
||||
description:
|
||||
- Profile to be used by the container
|
||||
- Profile to be used by the container.
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the container
|
||||
(e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the container is ephemeral (e.g. true or false).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
@@ -68,7 +76,7 @@ options:
|
||||
"protocol": "lxd",
|
||||
"alias": "ubuntu/xenial/amd64" }).'
|
||||
- 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
|
||||
required: false
|
||||
type: dict
|
||||
state:
|
||||
@@ -144,10 +152,10 @@ options:
|
||||
trust_password:
|
||||
description:
|
||||
- The client trusted password.
|
||||
- You need to set this password on the LXD server before
|
||||
running this module using the following command.
|
||||
lxc config set core.trust_password <some random password>
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
|
||||
- 'You need to set this password on the LXD server before
|
||||
running this module using the following command:
|
||||
C(lxc config set core.trust_password <some random password>).
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
|
||||
- If trust_password is set, this module send a request for
|
||||
authentication before sending any requests.
|
||||
required: false
|
||||
@@ -176,6 +184,7 @@ EXAMPLES = '''
|
||||
- name: Create a started container
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -209,6 +218,7 @@ EXAMPLES = '''
|
||||
- name: Create a started container
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -279,6 +289,7 @@ EXAMPLES = '''
|
||||
- name: Create LXD container
|
||||
community.general.lxd_container:
|
||||
name: new-container-1
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -289,6 +300,7 @@ EXAMPLES = '''
|
||||
- name: Create container on another node
|
||||
community.general.lxd_container:
|
||||
name: new-container-2
|
||||
ignore_volatile_options: true
|
||||
state: started
|
||||
source:
|
||||
type: image
|
||||
@@ -557,7 +569,7 @@ class LXDContainerManagement(object):
|
||||
def _needs_to_change_container_config(self, key):
|
||||
if key not in self.config:
|
||||
return False
|
||||
if key == 'config':
|
||||
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
@@ -565,6 +577,14 @@ class LXDContainerManagement(object):
|
||||
if old_configs[k] != v:
|
||||
return True
|
||||
return False
|
||||
elif key == 'config': # next default behavior
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items())
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
if old_configs[k] != v:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
old_configs = self.old_container_json['metadata'][key]
|
||||
return self.config[key] != old_configs
|
||||
@@ -606,6 +626,7 @@ class LXDContainerManagement(object):
|
||||
try:
|
||||
if self.trust_password is not None:
|
||||
self.client.authenticate(self.trust_password)
|
||||
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
||||
|
||||
self.old_container_json = self._get_container_json()
|
||||
self.old_state = self._container_json_to_module_state(self.old_container_json)
|
||||
@@ -651,6 +672,10 @@ def main():
|
||||
config=dict(
|
||||
type='dict',
|
||||
),
|
||||
ignore_volatile_options=dict(
|
||||
type='bool',
|
||||
default=True
|
||||
),
|
||||
devices=dict(
|
||||
type='dict',
|
||||
),
|
||||
@@ -703,7 +728,13 @@ def main():
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
# if module.params['ignore_volatile_options'] is None:
|
||||
# module.params['ignore_volatile_options'] = True
|
||||
# module.deprecate(
|
||||
# 'If the keyword "volatile" is used in a playbook in the config section, a
|
||||
# "changed" message will appear with every run, even without a change to the playbook.
|
||||
# This will change in the future.
|
||||
# Please test your scripts by "ignore_volatile_options: false"', version='5.0.0', collection_name='community.general')
|
||||
lxd_manage = LXDContainerManagement(module=module)
|
||||
lxd_manage.run()
|
||||
|
||||
|
||||
@@ -32,7 +32,14 @@ options:
|
||||
type: str
|
||||
disk:
|
||||
description:
|
||||
- hard disk size in GB for instance
|
||||
- This option was previously described as "hard disk size in GB for instance" however several formats describing
|
||||
a lxc mount are permitted.
|
||||
- Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically
|
||||
choose which storage to allocate from, however new versions enforce the C(<STORAGE>:<SIZE>) syntax.
|
||||
- "Additional options are available by using some combination of the following key-value pairs as a
|
||||
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
|
||||
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
|
||||
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
|
||||
@@ -1201,8 +1201,9 @@ def main():
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
# Ensure the choosen VM name doesn't already exist when cloning
|
||||
if get_vmid(proxmox, name):
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name)
|
||||
existing_vmid = get_vmid(proxmox, name)
|
||||
if existing_vmid:
|
||||
module.exit_json(changed=False, vmid=existing_vmid[0], msg="VM with name <%s> already exists" % name)
|
||||
|
||||
# Ensure the choosen VM id doesn't already exist when cloning
|
||||
if get_vm(proxmox, newid):
|
||||
|
||||
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_tasks_info
|
||||
short_description: Retrieve information about one or more Proxmox VE tasks
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Retrieve information about one or more Proxmox VE tasks.
|
||||
author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
|
||||
options:
|
||||
node:
|
||||
description:
|
||||
- Node where to get tasks.
|
||||
required: true
|
||||
type: str
|
||||
task:
|
||||
description:
|
||||
- Return specific task.
|
||||
aliases: ['upid', 'name']
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
node: node01
|
||||
register: result
|
||||
|
||||
- name: Retrieve information about specific tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
|
||||
node: node01
|
||||
register: proxmox_tasks
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
proxmox_tasks:
|
||||
description: List of tasks.
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: ID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
node:
|
||||
description: Node name.
|
||||
returned: on success
|
||||
type: str
|
||||
pid:
|
||||
description: PID of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
pstart:
|
||||
description: pastart of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
starttime:
|
||||
description: Starting time of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
type:
|
||||
description: Type of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
upid:
|
||||
description: UPID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
user:
|
||||
description: User that owns the task.
|
||||
returned: on success
|
||||
type: str
|
||||
endtime:
|
||||
description: Endtime of the task.
|
||||
returned: on success, can be absent
|
||||
type: int
|
||||
status:
|
||||
description: Status of the task.
|
||||
returned: on success, can be absent
|
||||
type: str
|
||||
failed:
|
||||
description: If the task failed.
|
||||
returned: when status is defined
|
||||
type: bool
|
||||
msg:
|
||||
description: Short message.
|
||||
returned: on failure
|
||||
type: str
|
||||
sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
|
||||
def get_task(self, upid, node):
|
||||
tasks = self.get_tasks(node)
|
||||
for task in tasks:
|
||||
if task.info['upid'] == upid:
|
||||
return [task]
|
||||
|
||||
def get_tasks(self, node):
|
||||
tasks = self.proxmox_api.nodes(node).tasks.get()
|
||||
return [ProxmoxTask(task) for task in tasks]
|
||||
|
||||
|
||||
class ProxmoxTask:
|
||||
def __init__(self, task):
|
||||
self.info = dict()
|
||||
for k, v in task.items():
|
||||
if k == 'status' and isinstance(v, str):
|
||||
self.info[k] = v
|
||||
if v != 'OK':
|
||||
self.info['failed'] = True
|
||||
else:
|
||||
self.info[k] = v
|
||||
|
||||
|
||||
def proxmox_task_info_argument_spec():
|
||||
return dict(
|
||||
task=dict(type='str', aliases=['upid', 'name'], required=False),
|
||||
node=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
task_info_args = proxmox_task_info_argument_spec()
|
||||
module_args.update(task_info_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret'),
|
||||
('api_user', 'api_password')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib(
|
||||
'proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
proxmox = ProxmoxTaskInfoAnsible(module)
|
||||
upid = module.params['task']
|
||||
node = module.params['node']
|
||||
if upid:
|
||||
tasks = proxmox.get_task(upid=upid, node=node)
|
||||
else:
|
||||
tasks = proxmox.get_tasks(node=node)
|
||||
if tasks is not None:
|
||||
result['proxmox_tasks'] = [task.info for task in tasks]
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
|
||||
upid, node)
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -137,6 +137,11 @@ options:
|
||||
type: bool
|
||||
default: false
|
||||
version_added: '3.3.0'
|
||||
parallelism:
|
||||
description:
|
||||
- Restrict concurrent operations when Terraform applies the plan.
|
||||
type: int
|
||||
version_added: '3.8.0'
|
||||
notes:
|
||||
- To just run a `terraform plan`, use check mode.
|
||||
requirements: [ "terraform" ]
|
||||
@@ -363,6 +368,7 @@ def main():
|
||||
init_reconfigure=dict(type='bool', default=False),
|
||||
overwrite_init=dict(type='bool', default=True),
|
||||
check_destroy=dict(type='bool', default=False),
|
||||
parallelism=dict(type='int'),
|
||||
),
|
||||
required_if=[('state', 'planned', ['plan_file'])],
|
||||
supports_check_mode=True,
|
||||
@@ -415,6 +421,9 @@ def main():
|
||||
elif state == 'absent':
|
||||
command.extend(DESTROY_ARGS)
|
||||
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
|
||||
@@ -213,8 +213,8 @@ class TemplateModule(OpenNebulaModule):
|
||||
def get_template_by_id(self, template_id):
|
||||
return self.get_template(lambda template: (template.ID == template_id))
|
||||
|
||||
def get_template_by_name(self, template_name):
|
||||
return self.get_template(lambda template: (template.NAME == template_name))
|
||||
def get_template_by_name(self, name):
|
||||
return self.get_template(lambda template: (template.NAME == name))
|
||||
|
||||
def get_template_instance(self, requested_id, requested_name):
|
||||
if requested_id:
|
||||
|
||||
@@ -509,11 +509,10 @@ def wait_for_devices_active(module, packet_conn, watched_devices):
|
||||
def wait_for_public_IPv(module, packet_conn, created_devices):
|
||||
|
||||
def has_public_ip(addr_list, ip_v):
|
||||
return any([a['public'] and a['address_family'] == ip_v and
|
||||
a['address'] for a in addr_list])
|
||||
return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list)
|
||||
|
||||
def all_have_public_ip(ds, ip_v):
|
||||
return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
|
||||
return all(has_public_ip(d.ip_addresses, ip_v) for d in ds)
|
||||
|
||||
address_family = module.params.get('wait_for_public_IPv')
|
||||
|
||||
|
||||
@@ -168,7 +168,7 @@ def get_sshkey_selector(module):
|
||||
return k.key == select_dict['key']
|
||||
else:
|
||||
# if key string not specified, all the fields must match
|
||||
return all([select_dict[f] == getattr(k, f) for f in select_dict])
|
||||
return all(select_dict[f] == getattr(k, f) for f in select_dict)
|
||||
return selector
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ description:
|
||||
requirements:
|
||||
- Python >= 2.6
|
||||
- Univention
|
||||
- ipaddress (for I(type=ptr_record))
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
@@ -34,11 +35,13 @@ options:
|
||||
description:
|
||||
- "Name of the record, this is also the DNS record. E.g. www for
|
||||
www.example.com."
|
||||
- For PTR records this has to be the IP address.
|
||||
zone:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- Corresponding DNS zone for this record, e.g. example.com.
|
||||
- For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)).
|
||||
type:
|
||||
type: str
|
||||
required: true
|
||||
@@ -66,12 +69,29 @@ EXAMPLES = '''
|
||||
a:
|
||||
- 192.0.2.1
|
||||
- 2001:0db8::42
|
||||
|
||||
- name: Create a DNS v4 PTR record on a UCS
|
||||
community.general.udm_dns_record:
|
||||
name: 192.0.2.1
|
||||
zone: 2.0.192.in-addr.arpa
|
||||
type: ptr_record
|
||||
data:
|
||||
ptr_record: "www.example.com."
|
||||
|
||||
- name: Create a DNS v6 PTR record on a UCS
|
||||
community.general.udm_dns_record:
|
||||
name: 2001:db8:0:0:0:ff00:42:8329
|
||||
zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa
|
||||
type: ptr_record
|
||||
data:
|
||||
ptr_record: "www.example.com."
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''#'''
|
||||
|
||||
HAVE_UNIVENTION = False
|
||||
HAVE_IPADDRESS = False
|
||||
try:
|
||||
from univention.admin.handlers.dns import (
|
||||
forward_zone,
|
||||
@@ -82,6 +102,7 @@ except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
|
||||
umc_module_for_add,
|
||||
umc_module_for_edit,
|
||||
@@ -90,6 +111,11 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i
|
||||
config,
|
||||
uldap,
|
||||
)
|
||||
try:
|
||||
import ipaddress
|
||||
HAVE_IPADDRESS = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
@@ -124,14 +150,30 @@ def main():
|
||||
changed = False
|
||||
diff = None
|
||||
|
||||
workname = name
|
||||
if type == 'ptr_record':
|
||||
if not HAVE_IPADDRESS:
|
||||
module.fail_json(msg=missing_required_lib('ipaddress'))
|
||||
try:
|
||||
if 'arpa' not in zone:
|
||||
raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)")
|
||||
ipaddr_rev = ipaddress.ip_address(name).reverse_pointer
|
||||
subnet_offset = ipaddr_rev.find(zone)
|
||||
if subnet_offset == -1:
|
||||
raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev))
|
||||
workname = ipaddr_rev[0:subnet_offset - 1]
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e)
|
||||
)
|
||||
|
||||
obj = list(ldap_search(
|
||||
'(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name),
|
||||
'(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname),
|
||||
attr=['dNSZone']
|
||||
))
|
||||
|
||||
exists = bool(len(obj))
|
||||
container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
|
||||
dn = 'relativeDomainName={0},{1}'.format(name, container)
|
||||
dn = 'relativeDomainName={0},{1}'.format(workname, container)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
@@ -144,13 +186,21 @@ def main():
|
||||
) or reverse_zone.lookup(
|
||||
config(),
|
||||
uldap(),
|
||||
'(zone={0})'.format(zone),
|
||||
'(zoneName={0})'.format(zone),
|
||||
scope='domain',
|
||||
)
|
||||
if len(so) == 0:
|
||||
raise Exception("Did not find zone '{0}' in Univention".format(zone))
|
||||
obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
|
||||
else:
|
||||
obj = umc_module_for_edit('dns/{0}'.format(type), dn)
|
||||
obj['name'] = name
|
||||
|
||||
if type == 'ptr_record':
|
||||
obj['ip'] = name
|
||||
obj['address'] = workname
|
||||
else:
|
||||
obj['name'] = name
|
||||
|
||||
for k, v in data.items():
|
||||
obj[k] = v
|
||||
diff = obj.diff()
|
||||
|
||||
249
plugins/modules/database/misc/redis_data.py
Normal file
249
plugins/modules/database/misc/redis_data.py
Normal file
@@ -0,0 +1,249 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redis_data
|
||||
short_description: Set key value pairs in Redis
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- Set key value pairs in Redis database.
|
||||
author: "Andreas Botzner (@paginabianca)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- Database key.
|
||||
required: true
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- Value that key should be set to.
|
||||
required: false
|
||||
type: str
|
||||
expiration:
|
||||
description:
|
||||
- Expiration time in milliseconds.
|
||||
Setting this flag will always result in a change in the database.
|
||||
required: false
|
||||
type: int
|
||||
non_existing:
|
||||
description:
|
||||
- Only set key if it does not already exist.
|
||||
required: false
|
||||
type: bool
|
||||
existing:
|
||||
description:
|
||||
- Only set key if it already exists.
|
||||
required: false
|
||||
type: bool
|
||||
keep_ttl:
|
||||
description:
|
||||
- Retain the time to live associated with the key.
|
||||
required: false
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- State of the key.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis.documentation
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set key foo=bar on localhost with no username
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
state: present
|
||||
|
||||
- name: Set key foo=bar if non existing with expiration of 30s
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
non_existing: true
|
||||
expiration: 30000
|
||||
state: present
|
||||
|
||||
- name: Set key foo=bar if existing and keep current TTL
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
value: bar
|
||||
existing: true
|
||||
keep_ttl: true
|
||||
|
||||
- name: Set key foo=bar on redishost with custom ca-cert file
|
||||
community.general.redis_data:
|
||||
login_host: redishost
|
||||
login_password: supersecret
|
||||
login_user: someuser
|
||||
validate_certs: true
|
||||
ssl_ca_certs: /path/to/ca/certs
|
||||
key: foo
|
||||
value: bar
|
||||
|
||||
- name: Delete key foo on localhost with no username
|
||||
community.general.redis_data:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
old_value:
|
||||
description: Value of key before setting.
|
||||
returned: on_success if state is C(present) and key exists in database.
|
||||
type: str
|
||||
sample: 'old_value_of_key'
|
||||
value:
|
||||
description: Value key was set to.
|
||||
returned: on success if state is C(present).
|
||||
type: str
|
||||
sample: 'new_value_of_key'
|
||||
msg:
|
||||
description: A short message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Set key: foo to bar'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||
|
||||
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec()
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='str', required=False),
|
||||
expiration=dict(type='int', required=False),
|
||||
non_existing=dict(type='bool', required=False),
|
||||
existing=dict(type='bool', required=False),
|
||||
keep_ttl=dict(type='bool', required=False),
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'absent']),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
required_if=[('state', 'present', ('value',))],
|
||||
mutually_exclusive=[['non_existing', 'existing'],
|
||||
['keep_ttl', 'expiration']],)
|
||||
fail_imports(module)
|
||||
|
||||
redis = RedisAnsible(module)
|
||||
|
||||
key = module.params['key']
|
||||
value = module.params['value']
|
||||
px = module.params['expiration']
|
||||
nx = module.params['non_existing']
|
||||
xx = module.params['existing']
|
||||
keepttl = module.params['keep_ttl']
|
||||
state = module.params['state']
|
||||
set_args = {'name': key, 'value': value, 'px': px,
|
||||
'nx': nx, 'xx': xx, 'keepttl': keepttl}
|
||||
|
||||
result = {'changed': False}
|
||||
|
||||
old_value = None
|
||||
try:
|
||||
old_value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
if state == 'absent':
|
||||
if module.check_mode:
|
||||
if old_value is None:
|
||||
msg = 'Key: {0} not present'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
msg = 'Deleted key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
try:
|
||||
ret = redis.connection.delete(key)
|
||||
if ret == 0:
|
||||
msg = 'Key: {0} not present'.format(key)
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
msg = 'Deleted key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
result['changed'] = True
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to delete key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
old_value = None
|
||||
try:
|
||||
old_value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key: {0} with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
result['old_value'] = old_value
|
||||
if old_value == value and keepttl is not False and px is None:
|
||||
msg = 'Key {0} already has desired value'.format(key)
|
||||
result['msg'] = msg
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
if module.check_mode:
|
||||
result['msg'] = 'Set key: {0}'.format(key)
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
try:
|
||||
ret = redis.connection.set(**set_args)
|
||||
if ret is None:
|
||||
if nx:
|
||||
msg = 'Could not set key: {0}. Key already present.'.format(
|
||||
key)
|
||||
else:
|
||||
msg = 'Could not set key: {0}. Key not present.'.format(key)
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
msg = 'Set key: {0}'.format(key)
|
||||
result['msg'] = msg
|
||||
result['changed'] = True
|
||||
result['value'] = value
|
||||
module.exit_json(**result)
|
||||
except Exception as e:
|
||||
msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
111
plugins/modules/database/misc/redis_data_info.py
Normal file
111
plugins/modules/database/misc/redis_data_info.py
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redis_data_info
|
||||
short_description: Get value of key in Redis database
|
||||
version_added: 3.7.0
|
||||
description:
|
||||
- Get value of keys in Redis database.
|
||||
author: "Andreas Botzner (@paginabianca)"
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- Database key.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get key foo=bar from loalhost with no username
|
||||
community.general.redis_data_info:
|
||||
login_host: localhost
|
||||
login_password: supersecret
|
||||
key: foo
|
||||
|
||||
- name: Get key foo=bar on redishost with custom ca-cert file
|
||||
community.general.redis_data_info:
|
||||
login_host: redishost
|
||||
login_password: supersecret
|
||||
login_user: somuser
|
||||
validate_certs: true
|
||||
ssl_ca_certs: /path/to/ca/certs
|
||||
key: foo
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description: If they key exists in the database.
|
||||
returned: on success
|
||||
type: bool
|
||||
value:
|
||||
description: Value key was set to.
|
||||
returned: if existing
|
||||
type: str
|
||||
sample: 'value_of_some_key'
|
||||
msg:
|
||||
description: A short message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Got key: foo with value: bar'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, RedisAnsible)
|
||||
|
||||
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec()
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
fail_imports(module)
|
||||
|
||||
redis = RedisAnsible(module)
|
||||
|
||||
key = module.params['key']
|
||||
result = {'changed': False}
|
||||
|
||||
value = None
|
||||
try:
|
||||
value = redis.connection.get(key)
|
||||
except Exception as e:
|
||||
msg = 'Failed to get value of key "{0}" with exception: {1}'.format(
|
||||
key, str(e))
|
||||
result['msg'] = msg
|
||||
module.fail_json(**result)
|
||||
|
||||
if value is None:
|
||||
msg = 'Key "{0}" does not exist in database'.format(key)
|
||||
result['exists'] = False
|
||||
else:
|
||||
msg = 'Got key "{0}"'.format(key)
|
||||
result['value'] = value
|
||||
result['exists'] = True
|
||||
result['msg'] = msg
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -47,7 +47,18 @@ options:
|
||||
description:
|
||||
- The string value to be associated with an I(option).
|
||||
- May be omitted when removing an I(option).
|
||||
- Mutually exclusive with I(values).
|
||||
- I(value=v) is equivalent to I(values=[v]).
|
||||
type: str
|
||||
values:
|
||||
description:
|
||||
- The string value to be associated with an I(option).
|
||||
- May be omitted when removing an I(option).
|
||||
- Mutually exclusive with I(value).
|
||||
- I(value=v) is equivalent to I(values=[v]).
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 3.6.0
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file including the timestamp information so you can get
|
||||
@@ -56,10 +67,25 @@ options:
|
||||
default: no
|
||||
state:
|
||||
description:
|
||||
- If set to C(absent) the option or section will be removed if present instead of created.
|
||||
- If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed.
|
||||
- If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed,
|
||||
but the other I(option)s with the same name are not touched.
|
||||
- If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added,
|
||||
but the other I(option)s with the same name are not touched.
|
||||
- If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be
|
||||
added and the other I(option)s with the same name are removed.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
exclusive:
|
||||
description:
|
||||
- If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent),
|
||||
or replaced when I(state=present).
|
||||
- If set to C(no), only the specified I(value(s)) are added when I(state=present),
|
||||
or removed when I(state=absent), and existing ones are not modified.
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: 3.6.0
|
||||
no_extra_spaces:
|
||||
description:
|
||||
- Do not insert spaces before and after '=' symbol.
|
||||
@@ -103,6 +129,27 @@ EXAMPLES = r'''
|
||||
option: temperature
|
||||
value: cold
|
||||
backup: yes
|
||||
|
||||
- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file
|
||||
community.general.ini_file:
|
||||
path: /etc/conf
|
||||
section: drinks
|
||||
option: beverage
|
||||
value: lemon juice
|
||||
mode: '0600'
|
||||
state: present
|
||||
exclusive: no
|
||||
|
||||
- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file
|
||||
community.general.ini_file:
|
||||
path: /etc/conf
|
||||
section: drinks
|
||||
option: beverage
|
||||
values:
|
||||
- coke
|
||||
- pepsi
|
||||
mode: '0600'
|
||||
state: present
|
||||
'''
|
||||
|
||||
import io
|
||||
@@ -117,24 +164,37 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
def match_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('[#;]?( |\t)*%s( |\t)*(=|$)' % option, line)
|
||||
return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
|
||||
|
||||
|
||||
def match_active_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
|
||||
return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
|
||||
|
||||
|
||||
def do_ini(module, filename, section=None, option=None, value=None,
|
||||
state='present', backup=False, no_extra_spaces=False, create=True,
|
||||
allow_no_value=False):
|
||||
def update_section_line(changed, section_lines, index, changed_lines, newline, msg):
|
||||
option_changed = section_lines[index] != newline
|
||||
changed = changed or option_changed
|
||||
if option_changed:
|
||||
msg = 'option changed'
|
||||
section_lines[index] = newline
|
||||
changed_lines[index] = 1
|
||||
return (changed, msg)
|
||||
|
||||
|
||||
def do_ini(module, filename, section=None, option=None, values=None,
|
||||
state='present', exclusive=True, backup=False, no_extra_spaces=False,
|
||||
create=True, allow_no_value=False):
|
||||
|
||||
if section is not None:
|
||||
section = to_text(section)
|
||||
if option is not None:
|
||||
option = to_text(option)
|
||||
if value is not None:
|
||||
value = to_text(value)
|
||||
|
||||
# deduplicate entries in values
|
||||
values_unique = []
|
||||
[values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None]
|
||||
values = values_unique
|
||||
|
||||
diff = dict(
|
||||
before='',
|
||||
@@ -145,7 +205,7 @@ def do_ini(module, filename, section=None, option=None, value=None,
|
||||
|
||||
if not os.path.exists(filename):
|
||||
if not create:
|
||||
module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
|
||||
module.fail_json(rc=257, msg='Destination %s does not exist!' % filename)
|
||||
destpath = os.path.dirname(filename)
|
||||
if not os.path.exists(destpath) and not module.check_mode:
|
||||
os.makedirs(destpath)
|
||||
@@ -185,74 +245,134 @@ def do_ini(module, filename, section=None, option=None, value=None,
|
||||
section = fake_section_name
|
||||
|
||||
within_section = not section
|
||||
section_start = 0
|
||||
section_start = section_end = 0
|
||||
msg = 'OK'
|
||||
if no_extra_spaces:
|
||||
assignment_format = u'%s=%s\n'
|
||||
else:
|
||||
assignment_format = u'%s = %s\n'
|
||||
|
||||
option_no_value_present = False
|
||||
|
||||
non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$'))
|
||||
|
||||
before = after = []
|
||||
section_lines = []
|
||||
|
||||
for index, line in enumerate(ini_lines):
|
||||
# find start and end of section
|
||||
if line.startswith(u'[%s]' % section):
|
||||
within_section = True
|
||||
section_start = index
|
||||
elif line.startswith(u'['):
|
||||
if within_section:
|
||||
if state == 'present':
|
||||
# insert missing option line at the end of the section
|
||||
for i in range(index, 0, -1):
|
||||
# search backwards for previous non-blank or non-comment line
|
||||
if not non_blank_non_comment_pattern.match(ini_lines[i - 1]):
|
||||
if option and value is not None:
|
||||
ini_lines.insert(i, assignment_format % (option, value))
|
||||
msg = 'option added'
|
||||
changed = True
|
||||
elif option and value is None and allow_no_value:
|
||||
ini_lines.insert(i, '%s\n' % option)
|
||||
msg = 'option added'
|
||||
changed = True
|
||||
break
|
||||
elif state == 'absent' and not option:
|
||||
# remove the entire section
|
||||
del ini_lines[section_start:index]
|
||||
msg = 'section removed'
|
||||
section_end = index
|
||||
break
|
||||
|
||||
before = ini_lines[0:section_start]
|
||||
section_lines = ini_lines[section_start:section_end]
|
||||
after = ini_lines[section_end:len(ini_lines)]
|
||||
|
||||
# Keep track of changed section_lines
|
||||
changed_lines = [0] * len(section_lines)
|
||||
|
||||
# handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex
|
||||
#
|
||||
# 1. edit all lines where we have a option=value pair with a matching value in values[]
|
||||
# 2. edit all the remaing lines where we have a matching option
|
||||
# 3. delete remaining lines where we have a matching option
|
||||
# 4. insert missing option line(s) at the end of the section
|
||||
|
||||
if state == 'present' and option:
|
||||
for index, line in enumerate(section_lines):
|
||||
if match_opt(option, line):
|
||||
match = match_opt(option, line)
|
||||
if values and match.group(6) in values:
|
||||
matched_value = match.group(6)
|
||||
if not matched_value and allow_no_value:
|
||||
# replace existing option with no value line(s)
|
||||
newline = u'%s\n' % option
|
||||
option_no_value_present = True
|
||||
else:
|
||||
# replace existing option=value line(s)
|
||||
newline = assignment_format % (option, matched_value)
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
values.remove(matched_value)
|
||||
elif not values and allow_no_value:
|
||||
# replace existing option with no value line(s)
|
||||
newline = u'%s\n' % option
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
option_no_value_present = True
|
||||
break
|
||||
|
||||
if state == 'present' and exclusive and not allow_no_value:
|
||||
# override option with no value to option with value if not allow_no_value
|
||||
if len(values) > 0:
|
||||
for index, line in enumerate(section_lines):
|
||||
if not changed_lines[index] and match_active_opt(option, section_lines[index]):
|
||||
newline = assignment_format % (option, values.pop(0))
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
if len(values) == 0:
|
||||
break
|
||||
# remove all remaining option occurrences from the rest of the section
|
||||
for index in range(len(section_lines) - 1, 0, -1):
|
||||
if not changed_lines[index] and match_active_opt(option, section_lines[index]):
|
||||
del section_lines[index]
|
||||
del changed_lines[index]
|
||||
changed = True
|
||||
msg = 'option changed'
|
||||
|
||||
if state == 'present':
|
||||
# insert missing option line(s) at the end of the section
|
||||
for index in range(len(section_lines), 0, -1):
|
||||
# search backwards for previous non-blank or non-comment line
|
||||
if not non_blank_non_comment_pattern.match(section_lines[index - 1]):
|
||||
if option and values:
|
||||
# insert option line(s)
|
||||
for element in values[::-1]:
|
||||
# items are added backwards, so traverse the list backwards to not confuse the user
|
||||
# otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯
|
||||
if element is not None:
|
||||
# insert option=value line
|
||||
section_lines.insert(index, assignment_format % (option, element))
|
||||
msg = 'option added'
|
||||
changed = True
|
||||
elif element is None and allow_no_value:
|
||||
# insert option with no value line
|
||||
section_lines.insert(index, u'%s\n' % option)
|
||||
msg = 'option added'
|
||||
changed = True
|
||||
elif option and not values and allow_no_value and not option_no_value_present:
|
||||
# insert option with no value line(s)
|
||||
section_lines.insert(index, u'%s\n' % option)
|
||||
msg = 'option added'
|
||||
changed = True
|
||||
break
|
||||
|
||||
if state == 'absent':
|
||||
if option:
|
||||
if exclusive:
|
||||
# delete all option line(s) with given option and ignore value
|
||||
new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))]
|
||||
if section_lines != new_section_lines:
|
||||
changed = True
|
||||
msg = 'option changed'
|
||||
section_lines = new_section_lines
|
||||
elif not exclusive and len(values) > 0:
|
||||
# delete specified option=value line(s)
|
||||
new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)]
|
||||
if section_lines != new_section_lines:
|
||||
changed = True
|
||||
msg = 'option changed'
|
||||
section_lines = new_section_lines
|
||||
else:
|
||||
if within_section and option:
|
||||
if state == 'present':
|
||||
# change the existing option line
|
||||
if match_opt(option, line):
|
||||
if value is None and allow_no_value:
|
||||
newline = u'%s\n' % option
|
||||
else:
|
||||
newline = assignment_format % (option, value)
|
||||
option_changed = ini_lines[index] != newline
|
||||
changed = changed or option_changed
|
||||
if option_changed:
|
||||
msg = 'option changed'
|
||||
ini_lines[index] = newline
|
||||
if option_changed:
|
||||
# remove all possible option occurrences from the rest of the section
|
||||
index = index + 1
|
||||
while index < len(ini_lines):
|
||||
line = ini_lines[index]
|
||||
if line.startswith(u'['):
|
||||
break
|
||||
if match_active_opt(option, line):
|
||||
del ini_lines[index]
|
||||
else:
|
||||
index = index + 1
|
||||
break
|
||||
elif state == 'absent':
|
||||
# delete the existing line
|
||||
if match_active_opt(option, line):
|
||||
del ini_lines[index]
|
||||
changed = True
|
||||
msg = 'option changed'
|
||||
break
|
||||
# drop the entire section
|
||||
section_lines = []
|
||||
msg = 'section removed'
|
||||
changed = True
|
||||
|
||||
# reassemble the ini_lines after manipulation
|
||||
ini_lines = before + section_lines + after
|
||||
|
||||
# remove the fake section line
|
||||
del ini_lines[0]
|
||||
@@ -261,9 +381,10 @@ def do_ini(module, filename, section=None, option=None, value=None,
|
||||
if not within_section and state == 'present':
|
||||
ini_lines.append(u'[%s]\n' % section)
|
||||
msg = 'section and option added'
|
||||
if option and value is not None:
|
||||
ini_lines.append(assignment_format % (option, value))
|
||||
elif option and value is None and allow_no_value:
|
||||
if option and values:
|
||||
for value in values:
|
||||
ini_lines.append(assignment_format % (option, value))
|
||||
elif option and not values and allow_no_value:
|
||||
ini_lines.append(u'%s\n' % option)
|
||||
else:
|
||||
msg = 'only section added'
|
||||
@@ -303,12 +424,17 @@ def main():
|
||||
section=dict(type='str', required=True),
|
||||
option=dict(type='str'),
|
||||
value=dict(type='str'),
|
||||
values=dict(type='list', elements='str'),
|
||||
backup=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
exclusive=dict(type='bool', default=True),
|
||||
no_extra_spaces=dict(type='bool', default=False),
|
||||
allow_no_value=dict(type='bool', default=False),
|
||||
create=dict(type='bool', default=True)
|
||||
),
|
||||
mutually_exclusive=[
|
||||
['value', 'values']
|
||||
],
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -317,16 +443,23 @@ def main():
|
||||
section = module.params['section']
|
||||
option = module.params['option']
|
||||
value = module.params['value']
|
||||
values = module.params['values']
|
||||
state = module.params['state']
|
||||
exclusive = module.params['exclusive']
|
||||
backup = module.params['backup']
|
||||
no_extra_spaces = module.params['no_extra_spaces']
|
||||
allow_no_value = module.params['allow_no_value']
|
||||
create = module.params['create']
|
||||
|
||||
if state == 'present' and not allow_no_value and value is None:
|
||||
module.fail_json("Parameter 'value' must be defined if state=present and allow_no_value=False")
|
||||
if state == 'present' and not allow_no_value and value is None and not values:
|
||||
module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.")
|
||||
|
||||
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
|
||||
if value is not None:
|
||||
values = [value]
|
||||
elif values is None:
|
||||
values = []
|
||||
|
||||
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value)
|
||||
|
||||
if not module.check_mode and os.path.exists(path):
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
|
||||
@@ -72,6 +72,12 @@ options:
|
||||
aliases: ["searchtimelimit"]
|
||||
type: int
|
||||
version_added: '2.5.0'
|
||||
ipaselinuxusermaporder:
|
||||
description: The SELinux user map order (order in increasing priority of SELinux users).
|
||||
aliases: ["selinuxusermaporder"]
|
||||
type: list
|
||||
elements: str
|
||||
version_added: '3.7.0'
|
||||
ipauserauthtype:
|
||||
description: The authentication type to use by default.
|
||||
aliases: ["userauthtype"]
|
||||
@@ -181,6 +187,18 @@ EXAMPLES = r'''
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
|
||||
- name: Ensure the SELinux user map order is set
|
||||
community.general.ipa_config:
|
||||
ipaselinuxusermaporder:
|
||||
- "guest_u:s0"
|
||||
- "xguest_u:s0"
|
||||
- "user_u:s0"
|
||||
- "staff_u:s0-s0:c0.c1023"
|
||||
- "unconfined_u:s0-s0:c0.c1023"
|
||||
ipa_host: localhost
|
||||
ipa_user: admin
|
||||
ipa_pass: supersecret
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -213,8 +231,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
ipagroupsearchfields=None, ipahomesrootdir=None,
|
||||
ipakrbauthzdata=None, ipamaxusernamelength=None,
|
||||
ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
|
||||
ipasearchtimelimit=None, ipauserauthtype=None,
|
||||
ipausersearchfields=None):
|
||||
ipasearchtimelimit=None, ipaselinuxusermaporder=None,
|
||||
ipauserauthtype=None, ipausersearchfields=None):
|
||||
config = {}
|
||||
if ipaconfigstring is not None:
|
||||
config['ipaconfigstring'] = ipaconfigstring
|
||||
@@ -238,6 +256,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
|
||||
config['ipasearchrecordslimit'] = str(ipasearchrecordslimit)
|
||||
if ipasearchtimelimit is not None:
|
||||
config['ipasearchtimelimit'] = str(ipasearchtimelimit)
|
||||
if ipaselinuxusermaporder is not None:
|
||||
config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder)
|
||||
if ipauserauthtype is not None:
|
||||
config['ipauserauthtype'] = ipauserauthtype
|
||||
if ipausersearchfields is not None:
|
||||
@@ -263,6 +283,7 @@ def ensure(module, client):
|
||||
ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'),
|
||||
ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'),
|
||||
ipasearchtimelimit=module.params.get('ipasearchtimelimit'),
|
||||
ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'),
|
||||
ipauserauthtype=module.params.get('ipauserauthtype'),
|
||||
ipausersearchfields=module.params.get('ipausersearchfields'),
|
||||
)
|
||||
@@ -304,6 +325,8 @@ def main():
|
||||
ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']),
|
||||
ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']),
|
||||
ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']),
|
||||
ipaselinuxusermaporder=dict(type='list', elements='str',
|
||||
aliases=['selinuxusermaporder']),
|
||||
ipauserauthtype=dict(type='list', elements='str',
|
||||
aliases=['userauthtype'],
|
||||
choices=["password", "radius", "otp", "pkinit",
|
||||
|
||||
@@ -195,7 +195,6 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
:param kc: Keycloak API access.
|
||||
:param config: Representation of the authentication flow including it's executions.
|
||||
:param realm: Realm
|
||||
:return: True if executions have been modified. False otherwise.
|
||||
:return: tuple (changed, dict(before, after)
|
||||
WHERE
|
||||
bool changed indicates if changes have been made
|
||||
@@ -235,10 +234,14 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
elif new_exec["providerId"] is not None:
|
||||
kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
elif new_exec["displayName"] is not None:
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
if exec_found:
|
||||
changed = True
|
||||
|
||||
645
plugins/modules/identity/keycloak/keycloak_identity_provider.py
Normal file
645
plugins/modules/identity/keycloak/keycloak_identity_provider.py
Normal file
@@ -0,0 +1,645 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_identity_provider
|
||||
|
||||
short_description: Allows administration of Keycloak identity providers via Keycloak API
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API.
|
||||
It requires access to the REST API via OpenID Connect; the user connecting and the client being
|
||||
used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate client definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
|
||||
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the identity provider.
|
||||
- On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide.
|
||||
- On C(absent), the identity provider will be removed if it exists.
|
||||
default: 'present'
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
realm:
|
||||
description:
|
||||
- The Keycloak realm under which this identity provider resides.
|
||||
default: 'master'
|
||||
type: str
|
||||
|
||||
alias:
|
||||
description:
|
||||
- The alias uniquely identifies an identity provider and it is also used to build the redirect URI.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
display_name:
|
||||
description:
|
||||
- Friendly name for identity provider.
|
||||
aliases:
|
||||
- displayName
|
||||
type: str
|
||||
|
||||
enabled:
|
||||
description:
|
||||
- Enable/disable this identity provider.
|
||||
type: bool
|
||||
|
||||
store_token:
|
||||
description:
|
||||
- Enable/disable whether tokens must be stored after authenticating users.
|
||||
aliases:
|
||||
- storeToken
|
||||
type: bool
|
||||
|
||||
add_read_token_role_on_create:
|
||||
description:
|
||||
- Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role.
|
||||
aliases:
|
||||
- addReadTokenRoleOnCreate
|
||||
type: bool
|
||||
|
||||
trust_email:
|
||||
description:
|
||||
- If enabled, email provided by this provider is not verified even if verification is enabled for the realm.
|
||||
aliases:
|
||||
- trustEmail
|
||||
type: bool
|
||||
|
||||
link_only:
|
||||
description:
|
||||
- If true, users cannot log in through this provider. They can only link to this provider.
|
||||
This is useful if you don't want to allow login from the provider, but want to integrate with a provider.
|
||||
aliases:
|
||||
- linkOnly
|
||||
type: bool
|
||||
|
||||
first_broker_login_flow_alias:
|
||||
description:
|
||||
- Alias of authentication flow, which is triggered after first login with this identity provider.
|
||||
aliases:
|
||||
- firstBrokerLoginFlowAlias
|
||||
type: str
|
||||
|
||||
post_broker_login_flow_alias:
|
||||
description:
|
||||
- Alias of authentication flow, which is triggered after each login with this identity provider.
|
||||
aliases:
|
||||
- postBrokerLoginFlowAlias
|
||||
type: str
|
||||
|
||||
authenticate_by_default:
|
||||
description:
|
||||
- Specifies if this identity provider should be used by default for authentication even before displaying login screen.
|
||||
aliases:
|
||||
- authenticateByDefault
|
||||
type: bool
|
||||
|
||||
provider_id:
|
||||
description:
|
||||
- Protocol used by this provider (supported values are C(oidc) or C(saml)).
|
||||
aliases:
|
||||
- providerId
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId).
|
||||
Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing
|
||||
identity provider configuration through check-mode in the I(existing) field.
|
||||
type: dict
|
||||
suboptions:
|
||||
hide_on_login_page:
|
||||
description:
|
||||
- If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter.
|
||||
aliases:
|
||||
- hideOnLoginPage
|
||||
type: bool
|
||||
|
||||
gui_order:
|
||||
description:
|
||||
- Number defining order of the provider in GUI (for example, on Login page).
|
||||
aliases:
|
||||
- guiOrder
|
||||
type: int
|
||||
|
||||
sync_mode:
|
||||
description:
|
||||
- Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers.
|
||||
aliases:
|
||||
- syncMode
|
||||
type: str
|
||||
|
||||
issuer:
|
||||
description:
|
||||
- The issuer identifier for the issuer of the response. If not provided, no validation will be performed.
|
||||
type: str
|
||||
|
||||
authorizationUrl:
|
||||
description:
|
||||
- The Authorization URL.
|
||||
type: str
|
||||
|
||||
tokenUrl:
|
||||
description:
|
||||
- The Token URL.
|
||||
type: str
|
||||
|
||||
logoutUrl:
|
||||
description:
|
||||
- End session endpoint to use to logout user from external IDP.
|
||||
type: str
|
||||
|
||||
userInfoUrl:
|
||||
description:
|
||||
- The User Info URL.
|
||||
type: str
|
||||
|
||||
clientAuthMethod:
|
||||
description:
|
||||
- The client authentication method.
|
||||
type: str
|
||||
|
||||
clientId:
|
||||
description:
|
||||
- The client or client identifier registered within the identity provider.
|
||||
type: str
|
||||
|
||||
clientSecret:
|
||||
description:
|
||||
- The client or client secret registered within the identity provider.
|
||||
type: str
|
||||
|
||||
defaultScope:
|
||||
description:
|
||||
- The scopes to be sent when asking for authorization.
|
||||
type: str
|
||||
|
||||
validateSignature:
|
||||
description:
|
||||
- Enable/disable signature validation of external IDP signatures.
|
||||
type: bool
|
||||
|
||||
useJwksUrl:
|
||||
description:
|
||||
- If the switch is on, identity provider public keys will be downloaded from given JWKS URL.
|
||||
type: bool
|
||||
|
||||
jwksUrl:
|
||||
description:
|
||||
- URL where identity provider keys in JWK format are stored. See JWK specification for more details.
|
||||
type: str
|
||||
|
||||
entityId:
|
||||
description:
|
||||
- The Entity ID that will be used to uniquely identify this SAML Service Provider.
|
||||
type: str
|
||||
|
||||
singleSignOnServiceUrl:
|
||||
description:
|
||||
- The URL that must be used to send authentication requests (SAML AuthnRequest).
|
||||
type: str
|
||||
|
||||
singleLogoutServiceUrl:
|
||||
description:
|
||||
- The URL that must be used to send logout requests.
|
||||
type: str
|
||||
|
||||
backchannelSupported:
|
||||
description:
|
||||
- Does the external IDP support backchannel logout?
|
||||
type: str
|
||||
|
||||
nameIDPolicyFormat:
|
||||
description:
|
||||
- Specifies the URI reference corresponding to a name identifier format.
|
||||
type: str
|
||||
|
||||
principalType:
|
||||
description:
|
||||
- Way to identify and track external users from the assertion.
|
||||
type: str
|
||||
|
||||
mappers:
|
||||
description:
|
||||
- A list of dicts defining mappers associated with this Identity Provider.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
id:
|
||||
description:
|
||||
- Unique ID of this mapper.
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Name of the mapper.
|
||||
type: str
|
||||
|
||||
identityProviderAlias:
|
||||
description:
|
||||
- Alias of the identity provider for this mapper.
|
||||
type: str
|
||||
|
||||
identityProviderMapper:
|
||||
description:
|
||||
- Type of mapper.
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper).
|
||||
type: dict
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
|
||||
author:
|
||||
- Laurent Paumier (@laurpaum)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create OIDC identity provider, authentication with credentials
|
||||
community.general.keycloak_identity_provider:
|
||||
state: present
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: admin
|
||||
realm: myrealm
|
||||
alias: oidc-idp
|
||||
display_name: OpenID Connect IdP
|
||||
enabled: true
|
||||
provider_id: oidc
|
||||
config:
|
||||
issuer: https://idp.example.com
|
||||
authorizationUrl: https://idp.example.com/auth
|
||||
tokenUrl: https://idp.example.com/token
|
||||
userInfoUrl: https://idp.example.com/userinfo
|
||||
clientAuthMethod: client_secret_post
|
||||
clientId: my-client
|
||||
clientSecret: secret
|
||||
syncMode: FORCE
|
||||
mappers:
|
||||
- name: first_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: first_name
|
||||
user.attribute: first_name
|
||||
syncMode: INHERIT
|
||||
- name: last_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: last_name
|
||||
user.attribute: last_name
|
||||
syncMode: INHERIT
|
||||
|
||||
- name: Create SAML identity provider, authentication with credentials
|
||||
community.general.keycloak_identity_provider:
|
||||
state: present
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: admin
|
||||
realm: myrealm
|
||||
alias: saml-idp
|
||||
display_name: SAML IdP
|
||||
enabled: true
|
||||
provider_id: saml
|
||||
config:
|
||||
entityId: https://auth.example.com/auth/realms/myrealm
|
||||
singleSignOnServiceUrl: https://idp.example.com/login
|
||||
wantAuthnRequestsSigned: true
|
||||
wantAssertionsSigned: true
|
||||
mappers:
|
||||
- name: roles
|
||||
identityProviderMapper: saml-user-attribute-idp-mapper
|
||||
config:
|
||||
user.attribute: roles
|
||||
attribute.friendly.name: User Roles
|
||||
attribute.name: roles
|
||||
syncMode: INHERIT
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Identity provider my-idp has been created"
|
||||
|
||||
proposed:
|
||||
description: Representation of proposed changes to identity provider
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"authorizationUrl": "https://idp.example.com/auth",
|
||||
"clientAuthMethod": "client_secret_post",
|
||||
"clientId": "my-client",
|
||||
"clientSecret": "secret",
|
||||
"issuer": "https://idp.example.com",
|
||||
"tokenUrl": "https://idp.example.com/token",
|
||||
"userInfoUrl": "https://idp.example.com/userinfo"
|
||||
},
|
||||
"displayName": "OpenID Connect IdP",
|
||||
"providerId": "oidc"
|
||||
}
|
||||
|
||||
existing:
|
||||
description: Representation of existing identity provider
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"addReadTokenRoleOnCreate": false,
|
||||
"alias": "my-idp",
|
||||
"authenticateByDefault": false,
|
||||
"config": {
|
||||
"authorizationUrl": "https://old.example.com/auth",
|
||||
"clientAuthMethod": "client_secret_post",
|
||||
"clientId": "my-client",
|
||||
"clientSecret": "**********",
|
||||
"issuer": "https://old.example.com",
|
||||
"syncMode": "FORCE",
|
||||
"tokenUrl": "https://old.example.com/token",
|
||||
"userInfoUrl": "https://old.example.com/userinfo"
|
||||
},
|
||||
"displayName": "OpenID Connect IdP",
|
||||
"enabled": true,
|
||||
"firstBrokerLoginFlowAlias": "first broker login",
|
||||
"internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
|
||||
"linkOnly": false,
|
||||
"providerId": "oidc",
|
||||
"storeToken": false,
|
||||
"trustEmail": false,
|
||||
}
|
||||
|
||||
end_state:
|
||||
description: Representation of identity provider after module execution
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"addReadTokenRoleOnCreate": false,
|
||||
"alias": "my-idp",
|
||||
"authenticateByDefault": false,
|
||||
"config": {
|
||||
"authorizationUrl": "https://idp.example.com/auth",
|
||||
"clientAuthMethod": "client_secret_post",
|
||||
"clientId": "my-client",
|
||||
"clientSecret": "**********",
|
||||
"issuer": "https://idp.example.com",
|
||||
"tokenUrl": "https://idp.example.com/token",
|
||||
"userInfoUrl": "https://idp.example.com/userinfo"
|
||||
},
|
||||
"displayName": "OpenID Connect IdP",
|
||||
"enabled": true,
|
||||
"firstBrokerLoginFlowAlias": "first broker login",
|
||||
"internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
|
||||
"linkOnly": false,
|
||||
"providerId": "oidc",
|
||||
"storeToken": false,
|
||||
"trustEmail": false,
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize(idp):
|
||||
idpcopy = deepcopy(idp)
|
||||
if 'config' in idpcopy:
|
||||
if 'clientSecret' in idpcopy['config']:
|
||||
idpcopy['clientSecret'] = '**********'
|
||||
return idpcopy
|
||||
|
||||
|
||||
def get_identity_provider_with_mappers(kc, alias, realm):
|
||||
idp = kc.get_identity_provider(alias, realm)
|
||||
if idp is not None:
|
||||
idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name'))
|
||||
if idp is None:
|
||||
idp = dict()
|
||||
return idp
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
mapper_spec = dict(
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
identityProviderAlias=dict(type='str'),
|
||||
identityProviderMapper=dict(type='str'),
|
||||
config=dict(type='dict'),
|
||||
)
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
realm=dict(type='str', default='master'),
|
||||
alias=dict(type='str', required=True),
|
||||
add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']),
|
||||
authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']),
|
||||
config=dict(type='dict'),
|
||||
display_name=dict(type='str', aliases=['displayName']),
|
||||
enabled=dict(type='bool'),
|
||||
first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']),
|
||||
link_only=dict(type='bool', aliases=['linkOnly']),
|
||||
post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']),
|
||||
provider_id=dict(type='str', aliases=['providerId']),
|
||||
store_token=dict(type='bool', aliases=['storeToken']),
|
||||
trust_email=dict(type='bool', aliases=['trustEmail']),
|
||||
mappers=dict(type='list', elements='dict', options=mapper_spec),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
alias = module.params.get('alias')
|
||||
state = module.params.get('state')
|
||||
|
||||
# convert module parameters to client representation parameters (if they belong in there)
|
||||
idp_params = [x for x in module.params
|
||||
if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and
|
||||
module.params.get(x) is not None]
|
||||
|
||||
# does the identity provider already exist?
|
||||
before_idp = get_identity_provider_with_mappers(kc, alias, realm)
|
||||
|
||||
# build a changeset
|
||||
changeset = dict()
|
||||
|
||||
for param in idp_params:
|
||||
new_param_value = module.params.get(param)
|
||||
old_value = before_idp[camel(param)] if camel(param) in before_idp else None
|
||||
if new_param_value != old_value:
|
||||
changeset[camel(param)] = new_param_value
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
if module.params.get('mappers') is not None:
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
|
||||
if before_idp == dict():
|
||||
old_mapper = dict()
|
||||
elif change.get('id') is not None:
|
||||
old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm)
|
||||
if old_mapper is None:
|
||||
old_mapper = dict()
|
||||
else:
|
||||
found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']]
|
||||
if len(found) == 1:
|
||||
old_mapper = found[0]
|
||||
else:
|
||||
old_mapper = dict()
|
||||
new_mapper = old_mapper.copy()
|
||||
new_mapper.update(change)
|
||||
if new_mapper != old_mapper:
|
||||
if changeset.get('mappers') is None:
|
||||
changeset['mappers'] = list()
|
||||
changeset['mappers'].append(new_mapper)
|
||||
|
||||
# prepare the new representation
|
||||
updated_idp = before_idp.copy()
|
||||
updated_idp.update(changeset)
|
||||
|
||||
result['proposed'] = sanitize(changeset)
|
||||
result['existing'] = sanitize(before_idp)
|
||||
|
||||
# if before_idp is none, the identity provider doesn't exist.
|
||||
if before_idp == dict():
|
||||
if state == 'absent':
|
||||
# nothing to do.
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after='')
|
||||
result['changed'] = False
|
||||
result['end_state'] = dict()
|
||||
result['msg'] = 'Identity provider does not exist; doing nothing.'
|
||||
module.exit_json(**result)
|
||||
|
||||
# for 'present', create a new identity provider.
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after=sanitize(updated_idp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do it for real!
|
||||
updated_idp = updated_idp.copy()
|
||||
mappers = updated_idp.pop('mappers', [])
|
||||
kc.create_identity_provider(updated_idp, realm)
|
||||
for mapper in mappers:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
after_idp = get_identity_provider_with_mappers(kc, alias, realm)
|
||||
|
||||
result['end_state'] = sanitize(after_idp)
|
||||
|
||||
result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias)
|
||||
module.exit_json(**result)
|
||||
|
||||
else:
|
||||
if state == 'present':
|
||||
# no changes
|
||||
if updated_idp == before_idp:
|
||||
result['changed'] = False
|
||||
result['end_state'] = sanitize(updated_idp)
|
||||
result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias)
|
||||
module.exit_json(**result)
|
||||
|
||||
# update the existing role
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_idp), after=sanitize(updated_idp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do the update
|
||||
updated_idp = updated_idp.copy()
|
||||
updated_mappers = updated_idp.pop('mappers', [])
|
||||
kc.update_identity_provider(updated_idp, realm)
|
||||
for mapper in updated_mappers:
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_identity_provider_mapper(mapper, alias, realm)
|
||||
else:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
for mapper in [x for x in before_idp['mappers']
|
||||
if [y for y in updated_mappers if y["name"] == x['name']] == []]:
|
||||
kc.delete_identity_provider_mapper(mapper['id'], alias, realm)
|
||||
|
||||
after_idp = get_identity_provider_with_mappers(kc, alias, realm)
|
||||
|
||||
result['end_state'] = sanitize(after_idp)
|
||||
|
||||
result['msg'] = "Identity provider {alias} has been updated".format(alias=alias)
|
||||
module.exit_json(**result)
|
||||
|
||||
elif state == 'absent':
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_idp), after='')
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# delete for real
|
||||
kc.delete_identity_provider(alias, realm)
|
||||
|
||||
result['end_state'] = dict()
|
||||
|
||||
result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias)
|
||||
module.exit_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -242,6 +242,13 @@ options:
|
||||
- enabledEventTypes
|
||||
type: list
|
||||
elements: str
|
||||
events_enabled:
|
||||
description:
|
||||
- Enables or disables login events for this realm.
|
||||
aliases:
|
||||
- eventsEnabled
|
||||
type: bool
|
||||
version_added: 3.6.0
|
||||
events_expiration:
|
||||
description:
|
||||
- The realm events expiration.
|
||||
@@ -254,7 +261,7 @@ options:
|
||||
aliases:
|
||||
- eventsListeners
|
||||
type: list
|
||||
elements: dict
|
||||
elements: str
|
||||
failure_factor:
|
||||
description:
|
||||
- The realm failure factor.
|
||||
@@ -626,8 +633,9 @@ def main():
|
||||
email_theme=dict(type='str', aliases=['emailTheme']),
|
||||
enabled=dict(type='bool'),
|
||||
enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']),
|
||||
events_enabled=dict(type='bool', aliases=['eventsEnabled']),
|
||||
events_expiration=dict(type='int', aliases=['eventsExpiration']),
|
||||
events_listeners=dict(type='list', elements='dict', aliases=['eventsListeners']),
|
||||
events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']),
|
||||
failure_factor=dict(type='int', aliases=['failureFactor']),
|
||||
internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']),
|
||||
login_theme=dict(type='str', aliases=['loginTheme']),
|
||||
|
||||
979
plugins/modules/identity/keycloak/keycloak_user_federation.py
Normal file
979
plugins/modules/identity/keycloak/keycloak_user_federation.py
Normal file
@@ -0,0 +1,979 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_user_federation
|
||||
|
||||
short_description: Allows administration of Keycloak user federations via Keycloak API
|
||||
|
||||
version_added: 3.7.0
|
||||
|
||||
description:
|
||||
- This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API.
|
||||
It requires access to the REST API via OpenID Connect; the user connecting and the client being
|
||||
used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate client definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
|
||||
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the user federation.
|
||||
- On C(present), the user federation will be created if it does not yet exist, or updated with
|
||||
the parameters you provide.
|
||||
- On C(absent), the user federation will be removed if it exists.
|
||||
default: 'present'
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
realm:
|
||||
description:
|
||||
- The Keycloak realm under which this user federation resides.
|
||||
default: 'master'
|
||||
type: str
|
||||
|
||||
id:
|
||||
description:
|
||||
- The unique ID for this user federation. If left empty, the user federation will be searched
|
||||
by its I(name).
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Display name of provider when linked in admin console.
|
||||
type: str
|
||||
|
||||
provider_id:
|
||||
description:
|
||||
- Provider for this user federation.
|
||||
aliases:
|
||||
- providerId
|
||||
type: str
|
||||
choices:
|
||||
- ldap
|
||||
- kerberos
|
||||
|
||||
provider_type:
|
||||
description:
|
||||
- Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)).
|
||||
aliases:
|
||||
- providerType
|
||||
default: org.keycloak.storage.UserStorageProvider
|
||||
type: str
|
||||
|
||||
parent_id:
|
||||
description:
|
||||
- Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank.
|
||||
aliases:
|
||||
- parentId
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the provider; the contents differ depending on
|
||||
the value of I(provider_id). Examples are given below for C(ldap) and C(kerberos). It is easiest
|
||||
to obtain valid config values by dumping an already-existing user federation configuration
|
||||
through check-mode in the I(existing) field.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Enable/disable this user federation.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
priority:
|
||||
description:
|
||||
- Priority of provider when doing a user lookup. Lowest first.
|
||||
default: 0
|
||||
type: int
|
||||
|
||||
importEnabled:
|
||||
description:
|
||||
- If C(true), LDAP users will be imported into Keycloak DB and synced by the configured
|
||||
sync policies.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
editMode:
|
||||
description:
|
||||
- C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP
|
||||
on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP.
|
||||
type: str
|
||||
choices:
|
||||
- READ_ONLY
|
||||
- WRITABLE
|
||||
- UNSYNCED
|
||||
|
||||
syncRegistrations:
|
||||
description:
|
||||
- Should newly created users be created within LDAP store? Priority effects which
|
||||
provider is chosen to sync the new user.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
vendor:
|
||||
description:
|
||||
- LDAP vendor (provider).
|
||||
type: str
|
||||
|
||||
usernameLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server
|
||||
vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn).
|
||||
The attribute should be filled for all LDAP user records you want to import from
|
||||
LDAP to Keycloak.
|
||||
type: str
|
||||
|
||||
rdnLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN.
|
||||
Usually it's the same as Username LDAP attribute, however it is not required. For
|
||||
example for Active directory, it is common to use C(cn) as RDN attribute when
|
||||
username attribute might be C(sAMAccountName).
|
||||
type: str
|
||||
|
||||
uuidLDAPAttribute:
|
||||
description:
|
||||
- Name of LDAP attribute, which is used as unique object identifier (UUID) for objects
|
||||
in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different.
|
||||
For example for Active directory it should be C(objectGUID). If your LDAP server does
|
||||
not support the notion of UUID, you can use any other attribute that is supposed to
|
||||
be unique among LDAP users in tree.
|
||||
type: str
|
||||
|
||||
userObjectClasses:
|
||||
description:
|
||||
- All values of LDAP objectClass attribute for users in LDAP divided by comma.
|
||||
For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users
|
||||
will be written to LDAP with all those object classes and existing LDAP user records
|
||||
are found just if they contain all those object classes.
|
||||
type: str
|
||||
|
||||
connectionUrl:
|
||||
description:
|
||||
- Connection URL to your LDAP server.
|
||||
type: str
|
||||
|
||||
usersDn:
|
||||
description:
|
||||
- Full DN of LDAP tree where your users are. This DN is the parent of LDAP users.
|
||||
type: str
|
||||
|
||||
customUserSearchFilter:
|
||||
description:
|
||||
- Additional LDAP Filter for filtering searched users. Leave this empty if you don't
|
||||
need additional filter.
|
||||
type: str
|
||||
|
||||
searchScope:
|
||||
description:
|
||||
- For one level, the search applies only for users in the DNs specified by User DNs.
|
||||
For subtree, the search applies to the whole subtree. See LDAP documentation for
|
||||
more details
|
||||
default: '1'
|
||||
type: str
|
||||
choices:
|
||||
- '1'
|
||||
- '2'
|
||||
|
||||
authType:
|
||||
description:
|
||||
- Type of the Authentication method used during LDAP Bind operation. It is used in
|
||||
most of the requests sent to the LDAP server.
|
||||
default: 'none'
|
||||
type: str
|
||||
choices:
|
||||
- none
|
||||
- simple
|
||||
|
||||
bindDn:
|
||||
description:
|
||||
- DN of LDAP user which will be used by Keycloak to access LDAP server.
|
||||
type: str
|
||||
|
||||
bindCredential:
|
||||
description:
|
||||
- Password of LDAP admin.
|
||||
type: str
|
||||
|
||||
startTls:
|
||||
description:
|
||||
- Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
usePasswordModifyExtendedOp:
|
||||
description:
|
||||
- Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify
|
||||
extended operation usually requires that LDAP user already has password in the LDAP
|
||||
server. So when this is used with 'Sync Registrations', it can be good to add also
|
||||
'Hardcoded LDAP attribute mapper' with randomly generated initial password.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
validatePasswordPolicy:
|
||||
description:
|
||||
- Determines if Keycloak should validate the password with the realm password policy
|
||||
before updating it.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
trustEmail:
|
||||
description:
|
||||
- If enabled, email provided by this provider is not verified even if verification is
|
||||
enabled for the realm.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
useTruststoreSpi:
|
||||
description:
|
||||
- Specifies whether LDAP connection will use the truststore SPI with the truststore
|
||||
configured in standalone.xml/domain.xml. C(Always) means that it will always use it.
|
||||
C(Never) means that it will not use it. C(Only for ldaps) means that it will use if
|
||||
your connection URL use ldaps. Note even if standalone.xml/domain.xml is not
|
||||
configured, the default Java cacerts or certificate specified by
|
||||
C(javax.net.ssl.trustStore) property will be used.
|
||||
default: ldapsOnly
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- ldapsOnly
|
||||
- never
|
||||
|
||||
connectionTimeout:
|
||||
description:
|
||||
- LDAP Connection Timeout in milliseconds.
|
||||
type: int
|
||||
|
||||
readTimeout:
|
||||
description:
|
||||
- LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations.
|
||||
type: int
|
||||
|
||||
pagination:
|
||||
description:
|
||||
- Does the LDAP server support pagination.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
connectionPooling:
|
||||
description:
|
||||
- Determines if Keycloak should use connection pooling for accessing LDAP server.
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
connectionPoolingAuthentication:
|
||||
description:
|
||||
- A list of space-separated authentication types of connections that may be pooled.
|
||||
type: str
|
||||
choices:
|
||||
- none
|
||||
- simple
|
||||
- DIGEST-MD5
|
||||
|
||||
connectionPoolingDebug:
|
||||
description:
|
||||
- A string that indicates the level of debug output to produce. Example valid values are
|
||||
C(fine) (trace connection creation and removal) and C(all) (all debugging information).
|
||||
type: str
|
||||
|
||||
connectionPoolingInitSize:
|
||||
description:
|
||||
- The number of connections per connection identity to create when initially creating a
|
||||
connection for the identity.
|
||||
type: int
|
||||
|
||||
connectionPoolingMaxSize:
|
||||
description:
|
||||
- The maximum number of connections per connection identity that can be maintained
|
||||
concurrently.
|
||||
type: int
|
||||
|
||||
connectionPoolingPrefSize:
|
||||
description:
|
||||
- The preferred number of connections per connection identity that should be maintained
|
||||
concurrently.
|
||||
type: int
|
||||
|
||||
connectionPoolingProtocol:
|
||||
description:
|
||||
- A list of space-separated protocol types of connections that may be pooled.
|
||||
Valid types are C(plain) and C(ssl).
|
||||
type: str
|
||||
|
||||
connectionPoolingTimeout:
|
||||
description:
|
||||
- The number of milliseconds that an idle connection may remain in the pool without
|
||||
being closed and removed from the pool.
|
||||
type: int
|
||||
|
||||
allowKerberosAuthentication:
|
||||
description:
|
||||
- Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data
|
||||
about authenticated users will be provisioned from this LDAP server.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kerberosRealm:
|
||||
description:
|
||||
- Name of kerberos realm.
|
||||
type: str
|
||||
|
||||
serverPrincipal:
|
||||
description:
|
||||
- Full name of server principal for HTTP service including server and domain name. For
|
||||
example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the
|
||||
KeyTab file.
|
||||
type: str
|
||||
|
||||
keyTab:
|
||||
description:
|
||||
- Location of Kerberos KeyTab file containing the credentials of server principal. For
|
||||
example C(/etc/krb5.keytab).
|
||||
type: str
|
||||
|
||||
debug:
|
||||
description:
|
||||
- Enable/disable debug logging to standard output for Krb5LoginModule.
|
||||
type: bool
|
||||
|
||||
useKerberosForPasswordAuthentication:
|
||||
description:
|
||||
- Use Kerberos login module for authenticate username/password against Kerberos server
|
||||
instead of authenticating against LDAP server with Directory Service API.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
allowPasswordAuthentication:
|
||||
description:
|
||||
- Enable/disable possibility of username/password authentication against Kerberos database.
|
||||
type: bool
|
||||
|
||||
batchSizeForSync:
|
||||
description:
|
||||
- Count of LDAP users to be imported from LDAP to Keycloak within a single transaction.
|
||||
default: 1000
|
||||
type: int
|
||||
|
||||
fullSyncPeriod:
|
||||
description:
|
||||
- Period for full synchronization in seconds.
|
||||
default: -1
|
||||
type: int
|
||||
|
||||
changedSyncPeriod:
|
||||
description:
|
||||
- Period for synchronization of changed or newly created LDAP users in seconds.
|
||||
default: -1
|
||||
type: int
|
||||
|
||||
updateProfileFirstLogin:
|
||||
description:
|
||||
- Update profile on first login.
|
||||
type: bool
|
||||
|
||||
cachePolicy:
|
||||
description:
|
||||
- Cache Policy for this storage provider.
|
||||
type: str
|
||||
default: 'DEFAULT'
|
||||
choices:
|
||||
- DEFAULT
|
||||
- EVICT_DAILY
|
||||
- EVICT_WEEKLY
|
||||
- MAX_LIFESPAN
|
||||
- NO_CACHE
|
||||
|
||||
evictionDay:
|
||||
description:
|
||||
- Day of the week the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
evictionHour:
|
||||
description:
|
||||
- Hour of day the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
evictionMinute:
|
||||
description:
|
||||
- Minute of day the entry will become invalid on.
|
||||
type: str
|
||||
|
||||
maxLifespan:
|
||||
description:
|
||||
- Max lifespan of cache entry in milliseconds.
|
||||
type: int
|
||||
|
||||
mappers:
|
||||
description:
|
||||
- A list of dicts defining mappers associated with this Identity Provider.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
id:
|
||||
description:
|
||||
- Unique ID of this mapper.
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Name of the mapper. If no ID is given, the mapper will be searched by name.
|
||||
type: str
|
||||
|
||||
parentId:
|
||||
description:
|
||||
- Unique ID for the parent of this mapper. ID of the user federation will automatically
|
||||
be used if left blank.
|
||||
type: str
|
||||
|
||||
providerId:
|
||||
description:
|
||||
- The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)).
|
||||
type: str
|
||||
|
||||
providerType:
|
||||
description:
|
||||
- Component type for this mapper (only supported value is C(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)).
|
||||
type: str
|
||||
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the mapper; the contents differ
|
||||
depending on the value of I(identityProviderMapper).
|
||||
type: dict
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
|
||||
author:
|
||||
- Laurent Paumier (@laurpaum)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create LDAP user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-ldap
|
||||
state: present
|
||||
provider_id: ldap
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
priority: 0
|
||||
enabled: true
|
||||
cachePolicy: DEFAULT
|
||||
batchSizeForSync: 1000
|
||||
editMode: READ_ONLY
|
||||
importEnabled: true
|
||||
syncRegistrations: false
|
||||
vendor: other
|
||||
usernameLDAPAttribute: uid
|
||||
rdnLDAPAttribute: uid
|
||||
uuidLDAPAttribute: entryUUID
|
||||
userObjectClasses: inetOrgPerson, organizationalPerson
|
||||
connectionUrl: ldaps://ldap.example.com:636
|
||||
usersDn: ou=Users,dc=example,dc=com
|
||||
authType: simple
|
||||
bindDn: cn=directory reader
|
||||
bindCredential: password
|
||||
searchScope: 1
|
||||
validatePasswordPolicy: false
|
||||
trustEmail: false
|
||||
useTruststoreSpi: ldapsOnly
|
||||
connectionPooling: true
|
||||
pagination: true
|
||||
allowKerberosAuthentication: false
|
||||
debug: false
|
||||
useKerberosForPasswordAuthentication: false
|
||||
mappers:
|
||||
- name: "full name"
|
||||
providerId: "full-name-ldap-mapper"
|
||||
providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
config:
|
||||
ldap.full.name.attribute: cn
|
||||
read.only: true
|
||||
write.only: false
|
||||
|
||||
- name: Create Kerberos user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-kerberos
|
||||
state: present
|
||||
provider_id: kerberos
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
priority: 0
|
||||
enabled: true
|
||||
cachePolicy: DEFAULT
|
||||
kerberosRealm: EXAMPLE.COM
|
||||
serverPrincipal: HTTP/host.example.com@EXAMPLE.COM
|
||||
keyTab: keytab
|
||||
allowPasswordAuthentication: false
|
||||
updateProfileFirstLogin: false
|
||||
|
||||
- name: Delete user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-federation
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
|
||||
|
||||
proposed:
|
||||
description: Representation of proposed changes to user federation.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowKerberosAuthentication": "false",
|
||||
"authType": "simple",
|
||||
"batchSizeForSync": "1000",
|
||||
"bindCredential": "**********",
|
||||
"bindDn": "cn=directory reader",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"connectionPooling": "true",
|
||||
"connectionUrl": "ldaps://ldap.example.com:636",
|
||||
"debug": "false",
|
||||
"editMode": "READ_ONLY",
|
||||
"enabled": "true",
|
||||
"importEnabled": "true",
|
||||
"pagination": "true",
|
||||
"priority": "0",
|
||||
"rdnLDAPAttribute": "uid",
|
||||
"searchScope": "1",
|
||||
"syncRegistrations": "false",
|
||||
"trustEmail": "false",
|
||||
"useKerberosForPasswordAuthentication": "false",
|
||||
"useTruststoreSpi": "ldapsOnly",
|
||||
"userObjectClasses": "inetOrgPerson, organizationalPerson",
|
||||
"usernameLDAPAttribute": "uid",
|
||||
"usersDn": "ou=Users,dc=example,dc=com",
|
||||
"uuidLDAPAttribute": "entryUUID",
|
||||
"validatePasswordPolicy": "false",
|
||||
"vendor": "other"
|
||||
},
|
||||
"name": "ldap",
|
||||
"providerId": "ldap",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
existing:
|
||||
description: Representation of existing user federation.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowKerberosAuthentication": "false",
|
||||
"authType": "simple",
|
||||
"batchSizeForSync": "1000",
|
||||
"bindCredential": "**********",
|
||||
"bindDn": "cn=directory reader",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"changedSyncPeriod": "-1",
|
||||
"connectionPooling": "true",
|
||||
"connectionUrl": "ldaps://ldap.example.com:636",
|
||||
"debug": "false",
|
||||
"editMode": "READ_ONLY",
|
||||
"enabled": "true",
|
||||
"fullSyncPeriod": "-1",
|
||||
"importEnabled": "true",
|
||||
"pagination": "true",
|
||||
"priority": "0",
|
||||
"rdnLDAPAttribute": "uid",
|
||||
"searchScope": "1",
|
||||
"syncRegistrations": "false",
|
||||
"trustEmail": "false",
|
||||
"useKerberosForPasswordAuthentication": "false",
|
||||
"useTruststoreSpi": "ldapsOnly",
|
||||
"userObjectClasses": "inetOrgPerson, organizationalPerson",
|
||||
"usernameLDAPAttribute": "uid",
|
||||
"usersDn": "ou=Users,dc=example,dc=com",
|
||||
"uuidLDAPAttribute": "entryUUID",
|
||||
"validatePasswordPolicy": "false",
|
||||
"vendor": "other"
|
||||
},
|
||||
"id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
|
||||
"mappers": [
|
||||
{
|
||||
"config": {
|
||||
"always.read.value.from.ldap": "false",
|
||||
"is.mandatory.in.ldap": "false",
|
||||
"ldap.attribute": "mail",
|
||||
"read.only": "true",
|
||||
"user.model.attribute": "email"
|
||||
},
|
||||
"id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
|
||||
"name": "email",
|
||||
"parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
|
||||
"providerId": "user-attribute-ldap-mapper",
|
||||
"providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
}
|
||||
],
|
||||
"name": "myfed",
|
||||
"parentId": "myrealm",
|
||||
"providerId": "ldap",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
end_state:
|
||||
description: Representation of user federation after module execution.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"config": {
|
||||
"allowPasswordAuthentication": "false",
|
||||
"cachePolicy": "DEFAULT",
|
||||
"enabled": "true",
|
||||
"kerberosRealm": "EXAMPLE.COM",
|
||||
"keyTab": "/etc/krb5.keytab",
|
||||
"priority": "0",
|
||||
"serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
|
||||
"updateProfileFirstLogin": "false"
|
||||
},
|
||||
"id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
|
||||
"mappers": [],
|
||||
"name": "kerberos",
|
||||
"parentId": "myrealm",
|
||||
"providerId": "kerberos",
|
||||
"providerType": "org.keycloak.storage.UserStorageProvider"
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize(comp):
|
||||
compcopy = deepcopy(comp)
|
||||
if 'config' in compcopy:
|
||||
compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items())
|
||||
if 'bindCredential' in compcopy['config']:
|
||||
compcopy['config']['bindCredential'] = '**********'
|
||||
if 'mappers' in compcopy:
|
||||
for mapper in compcopy['mappers']:
|
||||
if 'config' in mapper:
|
||||
mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items())
|
||||
return compcopy
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
config_spec = dict(
|
||||
allowKerberosAuthentication=dict(type='bool', default=False),
|
||||
allowPasswordAuthentication=dict(type='bool'),
|
||||
authType=dict(type='str', choices=['none', 'simple'], default='none'),
|
||||
batchSizeForSync=dict(type='int', default=1000),
|
||||
bindCredential=dict(type='str', no_log=True),
|
||||
bindDn=dict(type='str'),
|
||||
cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'),
|
||||
changedSyncPeriod=dict(type='int', default=-1),
|
||||
connectionPooling=dict(type='bool', default=True),
|
||||
connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']),
|
||||
connectionPoolingDebug=dict(type='str'),
|
||||
connectionPoolingInitSize=dict(type='int'),
|
||||
connectionPoolingMaxSize=dict(type='int'),
|
||||
connectionPoolingPrefSize=dict(type='int'),
|
||||
connectionPoolingProtocol=dict(type='str'),
|
||||
connectionPoolingTimeout=dict(type='int'),
|
||||
connectionTimeout=dict(type='int'),
|
||||
connectionUrl=dict(type='str'),
|
||||
customUserSearchFilter=dict(type='str'),
|
||||
debug=dict(type='bool'),
|
||||
editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']),
|
||||
enabled=dict(type='bool', default=True),
|
||||
evictionDay=dict(type='str'),
|
||||
evictionHour=dict(type='str'),
|
||||
evictionMinute=dict(type='str'),
|
||||
fullSyncPeriod=dict(type='int', default=-1),
|
||||
importEnabled=dict(type='bool', default=True),
|
||||
kerberosRealm=dict(type='str'),
|
||||
keyTab=dict(type='str', no_log=False),
|
||||
maxLifespan=dict(type='int'),
|
||||
pagination=dict(type='bool', default=True),
|
||||
priority=dict(type='int', default=0),
|
||||
rdnLDAPAttribute=dict(type='str'),
|
||||
readTimeout=dict(type='int'),
|
||||
searchScope=dict(type='str', choices=['1', '2'], default='1'),
|
||||
serverPrincipal=dict(type='str'),
|
||||
startTls=dict(type='bool', default=False),
|
||||
syncRegistrations=dict(type='bool', default=False),
|
||||
trustEmail=dict(type='bool', default=False),
|
||||
updateProfileFirstLogin=dict(type='bool'),
|
||||
useKerberosForPasswordAuthentication=dict(type='bool', default=False),
|
||||
usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False),
|
||||
useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'),
|
||||
userObjectClasses=dict(type='str'),
|
||||
usernameLDAPAttribute=dict(type='str'),
|
||||
usersDn=dict(type='str'),
|
||||
uuidLDAPAttribute=dict(type='str'),
|
||||
validatePasswordPolicy=dict(type='bool', default=False),
|
||||
vendor=dict(type='str'),
|
||||
)
|
||||
|
||||
mapper_spec = dict(
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
parentId=dict(type='str'),
|
||||
providerId=dict(type='str'),
|
||||
providerType=dict(type='str'),
|
||||
config=dict(type='dict'),
|
||||
)
|
||||
|
||||
meta_args = dict(
|
||||
config=dict(type='dict', options=config_spec),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
realm=dict(type='str', default='master'),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos']),
|
||||
provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'),
|
||||
parent_id=dict(type='str', aliases=['parentId']),
|
||||
mappers=dict(type='list', elements='dict', options=mapper_spec),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['id', 'name'],
|
||||
['token', 'auth_realm', 'auth_username', 'auth_password']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
state = module.params.get('state')
|
||||
config = module.params.get('config')
|
||||
mappers = module.params.get('mappers')
|
||||
cid = module.params.get('id')
|
||||
name = module.params.get('name')
|
||||
|
||||
# Keycloak API expects config parameters to be arrays containing a single string element
|
||||
if config is not None:
|
||||
module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
|
||||
for k, v in config.items() if config[k] is not None)
|
||||
|
||||
if mappers is not None:
|
||||
for mapper in mappers:
|
||||
if mapper.get('config') is not None:
|
||||
mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
|
||||
for k, v in mapper['config'].items() if mapper['config'][k] is not None)
|
||||
|
||||
# convert module parameters to client representation parameters (if they belong in there)
|
||||
comp_params = [x for x in module.params
|
||||
if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and
|
||||
module.params.get(x) is not None]
|
||||
|
||||
# does the user federation already exist?
|
||||
if cid is None:
|
||||
found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm)
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name))
|
||||
before_comp = next(iter(found), None)
|
||||
if before_comp is not None:
|
||||
cid = before_comp['id']
|
||||
else:
|
||||
before_comp = kc.get_component(cid, realm)
|
||||
|
||||
if before_comp is None:
|
||||
before_comp = dict()
|
||||
|
||||
# if user federation exists, get associated mappers
|
||||
if cid is not None:
|
||||
before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name'))
|
||||
|
||||
# build a changeset
|
||||
changeset = dict()
|
||||
|
||||
for param in comp_params:
|
||||
new_param_value = module.params.get(param)
|
||||
old_value = before_comp[camel(param)] if camel(param) in before_comp else None
|
||||
if param == 'mappers':
|
||||
new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
|
||||
if new_param_value != old_value:
|
||||
changeset[camel(param)] = new_param_value
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
if module.params.get('mappers') is not None:
|
||||
if module.params['provider_id'] == 'kerberos':
|
||||
module.fail_json(msg='Cannot configure mappers for Kerberos federations.')
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
|
||||
if cid is None:
|
||||
old_mapper = dict()
|
||||
elif change.get('id') is not None:
|
||||
old_mapper = kc.get_component(change['id'], realm)
|
||||
if old_mapper is None:
|
||||
old_mapper = dict()
|
||||
else:
|
||||
found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm)
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name']))
|
||||
if len(found) == 1:
|
||||
old_mapper = found[0]
|
||||
else:
|
||||
old_mapper = dict()
|
||||
new_mapper = old_mapper.copy()
|
||||
new_mapper.update(change)
|
||||
if new_mapper != old_mapper:
|
||||
if changeset.get('mappers') is None:
|
||||
changeset['mappers'] = list()
|
||||
changeset['mappers'].append(new_mapper)
|
||||
|
||||
# prepare the new representation
|
||||
updated_comp = before_comp.copy()
|
||||
updated_comp.update(changeset)
|
||||
|
||||
result['proposed'] = sanitize(changeset)
|
||||
result['existing'] = sanitize(before_comp)
|
||||
|
||||
# if before_comp is none, the user federation doesn't exist.
|
||||
if before_comp == dict():
|
||||
if state == 'absent':
|
||||
# nothing to do.
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after='')
|
||||
result['changed'] = False
|
||||
result['end_state'] = dict()
|
||||
result['msg'] = 'User federation does not exist; doing nothing.'
|
||||
module.exit_json(**result)
|
||||
|
||||
# for 'present', create a new user federation.
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before='', after=sanitize(updated_comp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do it for real!
|
||||
updated_comp = updated_comp.copy()
|
||||
updated_mappers = updated_comp.pop('mappers', [])
|
||||
after_comp = kc.create_component(updated_comp, realm)
|
||||
|
||||
for mapper in updated_mappers:
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_component(mapper, realm)
|
||||
else:
|
||||
if mapper.get('parentId') is None:
|
||||
mapper['parentId'] = after_comp['id']
|
||||
mapper = kc.create_component(mapper, realm)
|
||||
|
||||
after_comp['mappers'] = updated_mappers
|
||||
result['end_state'] = sanitize(after_comp)
|
||||
|
||||
result['msg'] = "User federation {id} has been created".format(id=after_comp['id'])
|
||||
module.exit_json(**result)
|
||||
|
||||
else:
|
||||
if state == 'present':
|
||||
# no changes
|
||||
if updated_comp == before_comp:
|
||||
result['changed'] = False
|
||||
result['end_state'] = sanitize(updated_comp)
|
||||
result['msg'] = "No changes required to user federation {id}.".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
# update the existing role
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_comp), after=sanitize(updated_comp))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# do the update
|
||||
updated_comp = updated_comp.copy()
|
||||
updated_mappers = updated_comp.pop('mappers', [])
|
||||
kc.update_component(updated_comp, realm)
|
||||
after_comp = kc.get_component(cid, realm)
|
||||
|
||||
for mapper in updated_mappers:
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_component(mapper, realm)
|
||||
else:
|
||||
if mapper.get('parentId') is None:
|
||||
mapper['parentId'] = updated_comp['id']
|
||||
mapper = kc.create_component(mapper, realm)
|
||||
|
||||
after_comp['mappers'] = updated_mappers
|
||||
result['end_state'] = sanitize(after_comp)
|
||||
|
||||
result['msg'] = "User federation {id} has been updated".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
elif state == 'absent':
|
||||
result['changed'] = True
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=sanitize(before_comp), after='')
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# delete for real
|
||||
kc.delete_component(cid, realm)
|
||||
|
||||
result['end_state'] = dict()
|
||||
|
||||
result['msg'] = "User federation {id} has been deleted".format(id=cid)
|
||||
module.exit_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/keycloak_identity_provider.py
Symbolic link
1
plugins/modules/keycloak_identity_provider.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./identity/keycloak/keycloak_identity_provider.py
|
||||
1
plugins/modules/keycloak_user_federation.py
Symbolic link
1
plugins/modules/keycloak_user_federation.py
Symbolic link
@@ -0,0 +1 @@
|
||||
identity/keycloak/keycloak_user_federation.py
|
||||
@@ -54,8 +54,9 @@ options:
|
||||
- Type C(dummy) is added in community.general 3.5.0.
|
||||
- Type C(generic) is added in Ansible 2.5.
|
||||
- Type C(infiniband) is added in community.general 2.0.0.
|
||||
- Type C(gsm) is added in community.general 3.7.0.
|
||||
type: str
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ]
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ]
|
||||
mode:
|
||||
description:
|
||||
- This is the type of device or network connection that you wish to create for a bond or bridge.
|
||||
@@ -99,7 +100,8 @@ options:
|
||||
routing_rules4:
|
||||
description:
|
||||
- Is the same as in an C(ip route add) command, except always requires specifying a priority.
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 3.3.0
|
||||
never_default4:
|
||||
description:
|
||||
@@ -183,7 +185,7 @@ options:
|
||||
mtu:
|
||||
description:
|
||||
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
|
||||
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
|
||||
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband)
|
||||
- This parameter defaults to C(1500) when unset.
|
||||
type: int
|
||||
dhcp_client_id:
|
||||
@@ -314,16 +316,28 @@ options:
|
||||
type: str
|
||||
ip_tunnel_dev:
|
||||
description:
|
||||
- This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
|
||||
- This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname.
|
||||
type: str
|
||||
ip_tunnel_remote:
|
||||
description:
|
||||
- This is used with IPIP/SIT - IPIP/SIT destination IP address.
|
||||
- This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address.
|
||||
type: str
|
||||
ip_tunnel_local:
|
||||
description:
|
||||
- This is used with IPIP/SIT - IPIP/SIT local IP address.
|
||||
- This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address.
|
||||
type: str
|
||||
ip_tunnel_input_key:
|
||||
description:
|
||||
- The key used for tunnel input packets.
|
||||
- Only used when I(type=gre).
|
||||
type: str
|
||||
version_added: 3.6.0
|
||||
ip_tunnel_output_key:
|
||||
description:
|
||||
- The key used for tunnel output packets.
|
||||
- Only used when I(type=gre).
|
||||
type: str
|
||||
version_added: 3.6.0
|
||||
zone:
|
||||
description:
|
||||
- The trust level of the connection.
|
||||
@@ -332,11 +346,141 @@ options:
|
||||
version_added: 2.0.0
|
||||
wifi_sec:
|
||||
description:
|
||||
- 'The security configuration of the WiFi connection. The valid attributes are listed on:
|
||||
- The security configuration of the WiFi connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).'
|
||||
- 'For instance to use common WPA-PSK auth with a password:
|
||||
C({key-mgmt: wpa-psk, psk: my_password}).'
|
||||
type: dict
|
||||
suboptions:
|
||||
auth-alg:
|
||||
description:
|
||||
- When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here.
|
||||
- One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP.
|
||||
- When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties
|
||||
must be specified.
|
||||
type: str
|
||||
choices: [ open, shared, leap ]
|
||||
fils:
|
||||
description:
|
||||
- Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
|
||||
- One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3)
|
||||
(enable FILS and fail if not supported).
|
||||
- When set to C(0) and no global default is set, FILS will be optionally enabled.
|
||||
type: int
|
||||
choices: [ 0, 1, 2, 3 ]
|
||||
default: 0
|
||||
group:
|
||||
description:
|
||||
- A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in
|
||||
the list.
|
||||
- For maximum compatibility leave this property empty.
|
||||
type: list
|
||||
elements: str
|
||||
choices: [ wep40, wep104, tkip, ccmp ]
|
||||
key-mgmt:
|
||||
description:
|
||||
- Key management used for the connection.
|
||||
- One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2
|
||||
+ WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only).
|
||||
- This property must be set for any Wi-Fi connection that uses security.
|
||||
type: str
|
||||
choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ]
|
||||
leap-password-flags:
|
||||
description: Flags indicating how to handle the I(leap-password) property.
|
||||
type: list
|
||||
elements: int
|
||||
leap-password:
|
||||
description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
|
||||
type: str
|
||||
leap-username:
|
||||
description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
|
||||
type: str
|
||||
pairwise:
|
||||
description:
|
||||
- A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the
|
||||
list.
|
||||
- For maximum compatibility leave this property empty.
|
||||
type: list
|
||||
elements: str
|
||||
choices: [ tkip, ccmp ]
|
||||
pmf:
|
||||
description:
|
||||
- Indicates whether Protected Management Frames (802.11w) must be enabled for the connection.
|
||||
- One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3)
|
||||
(enable PMF and fail if not supported).
|
||||
- When set to C(0) and no global default is set, PMF will be optionally enabled.
|
||||
type: int
|
||||
choices: [ 0, 1, 2, 3 ]
|
||||
default: 0
|
||||
proto:
|
||||
description:
|
||||
- List of strings specifying the allowed WPA protocol versions to use.
|
||||
- Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN).
|
||||
- If not specified, both WPA and RSN connections are allowed.
|
||||
type: list
|
||||
elements: str
|
||||
choices: [ wpa, rsn ]
|
||||
psk-flags:
|
||||
description: Flags indicating how to handle the I(psk) property.
|
||||
type: list
|
||||
elements: int
|
||||
psk:
|
||||
description:
|
||||
- Pre-Shared-Key for WPA networks.
|
||||
- For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the
|
||||
actual key, or the key in form of 64 hexadecimal character.
|
||||
- The WPA3-Personal networks use a passphrase of any length for SAE authentication.
|
||||
type: str
|
||||
wep-key-flags:
|
||||
description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties.
|
||||
type: list
|
||||
elements: int
|
||||
wep-key-type:
|
||||
description:
|
||||
- Controls the interpretation of WEP keys.
|
||||
- Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII
|
||||
password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the
|
||||
actual WEP key.
|
||||
type: int
|
||||
choices: [ 1, 2 ]
|
||||
wep-key0:
|
||||
description:
|
||||
- Index 0 WEP key. This is the WEP key used in most networks.
|
||||
- See the I(wep-key-type) property for a description of how this key is interpreted.
|
||||
type: str
|
||||
wep-key1:
|
||||
description:
|
||||
- Index 1 WEP key. This WEP index is not used by most networks.
|
||||
- See the I(wep-key-type) property for a description of how this key is interpreted.
|
||||
type: str
|
||||
wep-key2:
|
||||
description:
|
||||
- Index 2 WEP key. This WEP index is not used by most networks.
|
||||
- See the I(wep-key-type) property for a description of how this key is interpreted.
|
||||
type: str
|
||||
wep-key3:
|
||||
description:
|
||||
- Index 3 WEP key. This WEP index is not used by most networks.
|
||||
- See the I(wep-key-type) property for a description of how this key is interpreted.
|
||||
type: str
|
||||
wep-tx-keyidx:
|
||||
description:
|
||||
- When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here.
|
||||
- Valid values are C(0) (default key) through C(3).
|
||||
- Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4).
|
||||
type: int
|
||||
choices: [ 0, 1, 2, 3 ]
|
||||
default: 0
|
||||
wps-method:
|
||||
description:
|
||||
- Flags indicating which mode of WPS is to be used if any.
|
||||
- There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS
|
||||
enrollment from the Access Point capabilities.
|
||||
- WPS can be disabled by setting this property to a value of C(1).
|
||||
type: int
|
||||
default: 0
|
||||
version_added: 3.0.0
|
||||
ssid:
|
||||
description:
|
||||
@@ -345,12 +489,257 @@ options:
|
||||
version_added: 3.0.0
|
||||
wifi:
|
||||
description:
|
||||
- 'The configuration of the WiFi connection. The valid attributes are listed on:
|
||||
- The configuration of the WiFi connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).'
|
||||
- 'For instance to create a hidden AP mode WiFi connection:
|
||||
C({hidden: true, mode: ap}).'
|
||||
type: dict
|
||||
suboptions:
|
||||
ap-isolation:
|
||||
description:
|
||||
- Configures AP isolation, which prevents communication between wireless devices connected to this AP.
|
||||
- This property can be set to a value different from C(-1) only when the interface is configured in AP mode.
|
||||
- If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks
|
||||
from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file
|
||||
shares, printers, etc.
|
||||
- If set to C(0), devices can talk to each other.
|
||||
- When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0).
|
||||
type: int
|
||||
choices: [ -1, 0, 1 ]
|
||||
default: -1
|
||||
assigned-mac-address:
|
||||
description:
|
||||
- The new field for the cloned MAC address.
|
||||
- It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or
|
||||
C(stable).
|
||||
- This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses.
|
||||
- Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address).
|
||||
type: str
|
||||
band:
|
||||
description:
|
||||
- 802.11 frequency band of the network.
|
||||
- One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11.
|
||||
- This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not
|
||||
associate with the same network in the 2.4GHz band even if the network's settings are compatible.
|
||||
- This setting depends on specific driver capability and may not work with all drivers.
|
||||
type: str
|
||||
choices: [ a, bg ]
|
||||
bssid:
|
||||
description:
|
||||
- If specified, directs the device to only associate with the given access point.
|
||||
- This capability is highly driver dependent and not supported by all devices.
|
||||
- Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future.
|
||||
type: str
|
||||
channel:
|
||||
description:
|
||||
- Wireless channel to use for the Wi-Fi connection.
|
||||
- The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel.
|
||||
- Because channel numbers overlap between bands, this property also requires the I(band) property to be set.
|
||||
type: int
|
||||
default: 0
|
||||
cloned-mac-address:
|
||||
description:
|
||||
- This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like
|
||||
C(random).
|
||||
- For libnm and nmcli, this field is called I(cloned-mac-address).
|
||||
type: str
|
||||
generate-mac-address-mask:
|
||||
description:
|
||||
- With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a
|
||||
locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed.
|
||||
- Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address.
|
||||
- If the property is C(null), it is eligible to be overwritten by a default connection setting.
|
||||
- If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address.
|
||||
- If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC
|
||||
address of the device, while the unset bits are subject to randomization.
|
||||
- Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the
|
||||
C(random) or C(stable) algorithm.
|
||||
- If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits
|
||||
that shall not be randomized.
|
||||
- For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are
|
||||
randomized.
|
||||
- A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address.
|
||||
- If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example,
|
||||
C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally
|
||||
administered.
|
||||
type: str
|
||||
hidden:
|
||||
description:
|
||||
- If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode.
|
||||
- In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID.
|
||||
However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with
|
||||
caution.
|
||||
- In AP mode, the created network does not broadcast its SSID.
|
||||
- Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the
|
||||
explicit probe-scans are distinctly recognizable on the air.
|
||||
type: bool
|
||||
default: false
|
||||
mac-address-blacklist:
|
||||
description:
|
||||
- A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply.
|
||||
- Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)).
|
||||
type: list
|
||||
elements: str
|
||||
mac-address-randomization:
|
||||
description:
|
||||
- One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1)
|
||||
(never randomize the MAC address), or C(2) (always randomize the MAC address).
|
||||
- This property is deprecated for I(cloned-mac-address).
|
||||
type: int
|
||||
default: 0
|
||||
choices: [ 0, 1, 2 ]
|
||||
mac-address:
|
||||
description:
|
||||
- If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches.
|
||||
- This property does not change the MAC address of the device (for example for MAC spoofing).
|
||||
type: str
|
||||
mode:
|
||||
description: Wi-Fi network mode. If blank, C(infrastructure) is assumed.
|
||||
type: str
|
||||
choices: [ infrastructure, mesh, adhoc, ap ]
|
||||
default: infrastructure
|
||||
mtu:
|
||||
description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
|
||||
type: int
|
||||
default: 0
|
||||
powersave:
|
||||
description:
|
||||
- One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use
|
||||
the globally configured value).
|
||||
- All other values are reserved.
|
||||
type: int
|
||||
default: 0
|
||||
choices: [ 0, 1, 2, 3 ]
|
||||
rate:
|
||||
description:
|
||||
- If non-zero, directs the device to only use the specified bitrate for communication with the access point.
|
||||
- Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s.
|
||||
- This property is highly driver dependent and not all devices support setting a static bitrate.
|
||||
type: int
|
||||
default: 0
|
||||
tx-power:
|
||||
description:
|
||||
- If non-zero, directs the device to use the specified transmit power.
|
||||
- Units are dBm.
|
||||
- This property is highly driver dependent and not all devices support setting a static transmit power.
|
||||
type: int
|
||||
default: 0
|
||||
wake-on-wlan:
|
||||
description:
|
||||
- The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options.
|
||||
- May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)),
|
||||
C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)),
|
||||
C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)),
|
||||
C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values
|
||||
C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager).
|
||||
- Note the option values' sum must be specified in order to combine multiple options.
|
||||
type: int
|
||||
default: 1
|
||||
version_added: 3.5.0
|
||||
ignore_unsupported_suboptions:
|
||||
description:
|
||||
- Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host.
|
||||
- Only I(wifi) and I(wifi_sec) options are currently affected.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.6.0
|
||||
gsm:
|
||||
description:
|
||||
- The configuration of the GSM connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).'
|
||||
- 'For instance to use apn, pin, username and password:
|
||||
C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
|
||||
type: dict
|
||||
version_added: 3.7.0
|
||||
suboptions:
|
||||
apn:
|
||||
description:
|
||||
- The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network.
|
||||
- The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or
|
||||
just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan.
|
||||
- The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
|
||||
type: str
|
||||
auto-config:
|
||||
description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network
|
||||
the modem will register to in the Mobile Broadband Provider database.
|
||||
type: bool
|
||||
default: false
|
||||
device-id:
|
||||
description:
|
||||
- The device unique identifier (as given by the C(WWAN) management service) which this connection applies to.
|
||||
- If given, the connection will only apply to the specified device.
|
||||
type: str
|
||||
home-only:
|
||||
description:
|
||||
- When C(true), only connections to the home network will be allowed.
|
||||
- Connections to roaming networks will not be made.
|
||||
type: bool
|
||||
default: false
|
||||
mtu:
|
||||
description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
|
||||
type: int
|
||||
default: 0
|
||||
network-id:
|
||||
description:
|
||||
- The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration.
|
||||
- If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network.
|
||||
- This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible.
|
||||
type: str
|
||||
number:
|
||||
description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password used to authenticate with the network, if required.
|
||||
- Many providers do not require a password, or accept any password.
|
||||
- But if a password is required, it is specified here.
|
||||
type: str
|
||||
password-flags:
|
||||
description:
|
||||
- NMSettingSecretFlags indicating how to handle the I(password) property.
|
||||
- 'Following choices are allowed:
|
||||
C(0) B(NONE): The system is responsible for providing and storing this secret (default),
|
||||
C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
|
||||
asked to retrieve it
|
||||
C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
|
||||
C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
|
||||
(some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
|
||||
type: int
|
||||
choices: [ 0, 1, 2 , 4 ]
|
||||
default: 0
|
||||
pin:
|
||||
description:
|
||||
- If the SIM is locked with a PIN it must be unlocked before any other operations are requested.
|
||||
- Specify the PIN here to allow operation of the device.
|
||||
type: str
|
||||
pin-flags:
|
||||
description:
|
||||
- NMSettingSecretFlags indicating how to handle the I(gsm.pin) property.
|
||||
- See I(gsm.password-flags) for NMSettingSecretFlags choices.
|
||||
type: int
|
||||
choices: [ 0, 1, 2 , 4 ]
|
||||
default: 0
|
||||
sim-id:
|
||||
description:
|
||||
- The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
|
||||
- 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching
|
||||
the given identifier.'
|
||||
type: str
|
||||
sim-operator-id:
|
||||
description:
|
||||
- A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to.
|
||||
- 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card
|
||||
provisioned by the given operator.'
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- The username used to authenticate with the network, if required.
|
||||
- Many providers do not require a username, or accept any username.
|
||||
- But if a username is required, it is specified here.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -616,6 +1005,14 @@ EXAMPLES = r'''
|
||||
vxlan_local: 192.168.1.2
|
||||
vxlan_remote: 192.168.1.5
|
||||
|
||||
- name: Add gre
|
||||
community.general.nmcli:
|
||||
type: gre
|
||||
conn_name: gre_test1
|
||||
ip_tunnel_dev: eth0
|
||||
ip_tunnel_local: 192.168.1.2
|
||||
ip_tunnel_remote: 192.168.1.5
|
||||
|
||||
- name: Add ipip
|
||||
community.general.nmcli:
|
||||
type: ipip
|
||||
@@ -679,6 +1076,19 @@ EXAMPLES = r'''
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a gsm connection
|
||||
community.general.nmcli:
|
||||
type: gsm
|
||||
conn_name: my-gsm-provider
|
||||
ifname: cdc-wdm0
|
||||
gsm:
|
||||
apn: my.provider.apn
|
||||
username: my-provider-username
|
||||
password: my-provider-password
|
||||
pin: my-sim-pin
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r"""#
|
||||
@@ -699,6 +1109,7 @@ class Nmcli(object):
|
||||
A subclass may wish to override the following action methods:-
|
||||
- create_connection()
|
||||
- delete_connection()
|
||||
- edit_connection()
|
||||
- modify_connection()
|
||||
- show_connection()
|
||||
- up_connection()
|
||||
@@ -721,6 +1132,7 @@ class Nmcli(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.state = module.params['state']
|
||||
self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions']
|
||||
self.autoconnect = module.params['autoconnect']
|
||||
self.conn_name = module.params['conn_name']
|
||||
self.master = module.params['master']
|
||||
@@ -776,12 +1188,15 @@ class Nmcli(object):
|
||||
self.ip_tunnel_dev = module.params['ip_tunnel_dev']
|
||||
self.ip_tunnel_local = module.params['ip_tunnel_local']
|
||||
self.ip_tunnel_remote = module.params['ip_tunnel_remote']
|
||||
self.ip_tunnel_input_key = module.params['ip_tunnel_input_key']
|
||||
self.ip_tunnel_output_key = module.params['ip_tunnel_output_key']
|
||||
self.nmcli_bin = self.module.get_bin_path('nmcli', True)
|
||||
self.dhcp_client_id = module.params['dhcp_client_id']
|
||||
self.zone = module.params['zone']
|
||||
self.ssid = module.params['ssid']
|
||||
self.wifi = module.params['wifi']
|
||||
self.wifi_sec = module.params['wifi_sec']
|
||||
self.gsm = module.params['gsm']
|
||||
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
@@ -810,6 +1225,12 @@ class Nmcli(object):
|
||||
cmd = to_text(cmd)
|
||||
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
|
||||
|
||||
def execute_edit_commands(self, commands, arguments):
|
||||
arguments = arguments or []
|
||||
cmd = [self.nmcli_bin, 'con', 'edit'] + arguments
|
||||
data = "\n".join(commands)
|
||||
return self.execute_command(cmd, data=data)
|
||||
|
||||
def connection_options(self, detect_change=False):
|
||||
# Options common to multiple connection types.
|
||||
options = {
|
||||
@@ -902,6 +1323,11 @@ class Nmcli(object):
|
||||
'ip-tunnel.parent': self.ip_tunnel_dev,
|
||||
'ip-tunnel.remote': self.ip_tunnel_remote,
|
||||
})
|
||||
if self.type == 'gre':
|
||||
options.update({
|
||||
'ip-tunnel.input-key': self.ip_tunnel_input_key,
|
||||
'ip-tunnel.output-key': self.ip_tunnel_output_key
|
||||
})
|
||||
elif self.type == 'vlan':
|
||||
options.update({
|
||||
'vlan.id': self.vlanid,
|
||||
@@ -920,9 +1346,6 @@ class Nmcli(object):
|
||||
})
|
||||
if self.wifi:
|
||||
for name, value in self.wifi.items():
|
||||
# Disregard 'ssid' via 'wifi.ssid'
|
||||
if name == 'ssid':
|
||||
continue
|
||||
options.update({
|
||||
'802-11-wireless.%s' % name: value
|
||||
})
|
||||
@@ -931,6 +1354,12 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'802-11-wireless-security.%s' % name: value
|
||||
})
|
||||
elif self.type == 'gsm':
|
||||
if self.gsm:
|
||||
for name, value in self.gsm.items():
|
||||
options.update({
|
||||
'gsm.%s' % name: value,
|
||||
})
|
||||
# Convert settings values based on the situation.
|
||||
for setting, value in options.items():
|
||||
setting_type = self.settings_type(setting)
|
||||
@@ -962,10 +1391,14 @@ class Nmcli(object):
|
||||
'dummy',
|
||||
'ethernet',
|
||||
'generic',
|
||||
'gre',
|
||||
'infiniband',
|
||||
'ipip',
|
||||
'sit',
|
||||
'team',
|
||||
'vlan',
|
||||
'wifi'
|
||||
'wifi',
|
||||
'gsm',
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -1006,6 +1439,7 @@ class Nmcli(object):
|
||||
@property
|
||||
def tunnel_conn_type(self):
|
||||
return self.type in (
|
||||
'gre',
|
||||
'ipip',
|
||||
'sit',
|
||||
)
|
||||
@@ -1037,9 +1471,17 @@ class Nmcli(object):
|
||||
elif setting in ('ipv4.dns',
|
||||
'ipv4.dns-search',
|
||||
'ipv4.routes',
|
||||
'ipv4.routing-rules',
|
||||
'ipv4.route-metric'
|
||||
'ipv6.dns',
|
||||
'ipv6.dns-search'):
|
||||
'ipv6.dns-search',
|
||||
'802-11-wireless-security.group',
|
||||
'802-11-wireless-security.leap-password-flags',
|
||||
'802-11-wireless-security.pairwise',
|
||||
'802-11-wireless-security.proto',
|
||||
'802-11-wireless-security.psk-flags',
|
||||
'802-11-wireless-security.wep-key-flags',
|
||||
'802-11-wireless.mac-address-blacklist'):
|
||||
return list
|
||||
return str
|
||||
|
||||
@@ -1127,9 +1569,8 @@ class Nmcli(object):
|
||||
return status
|
||||
|
||||
def edit_connection(self):
|
||||
data = "\n".join(self.edit_commands + ['save', 'quit'])
|
||||
cmd = [self.nmcli_bin, 'con', 'edit', self.conn_name]
|
||||
return self.execute_command(cmd, data=data)
|
||||
commands = self.edit_commands + ['save', 'quit']
|
||||
return self.execute_edit_commands(commands, arguments=[self.conn_name])
|
||||
|
||||
def show_connection(self):
|
||||
cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name]
|
||||
@@ -1173,6 +1614,60 @@ class Nmcli(object):
|
||||
|
||||
return conn_info
|
||||
|
||||
def get_supported_properties(self, setting):
|
||||
properties = []
|
||||
|
||||
if setting == '802-11-wireless-security':
|
||||
set_property = 'psk'
|
||||
set_value = 'FAKEVALUE'
|
||||
commands = ['set %s.%s %s' % (setting, set_property, set_value)]
|
||||
else:
|
||||
commands = []
|
||||
|
||||
commands += ['print %s' % setting, 'quit', 'yes']
|
||||
|
||||
(rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type])
|
||||
|
||||
if rc != 0:
|
||||
raise NmcliModuleError(err)
|
||||
|
||||
for line in out.splitlines():
|
||||
prefix = '%s.' % setting
|
||||
if (line.startswith(prefix)):
|
||||
pair = line.split(':', 1)
|
||||
property = pair[0].strip().replace(prefix, '')
|
||||
properties.append(property)
|
||||
|
||||
return properties
|
||||
|
||||
def check_for_unsupported_properties(self, setting):
|
||||
if setting == '802-11-wireless':
|
||||
setting_key = 'wifi'
|
||||
elif setting == '802-11-wireless-security':
|
||||
setting_key = 'wifi_sec'
|
||||
else:
|
||||
setting_key = setting
|
||||
|
||||
supported_properties = self.get_supported_properties(setting)
|
||||
unsupported_properties = []
|
||||
|
||||
for property, value in getattr(self, setting_key).items():
|
||||
if property not in supported_properties:
|
||||
unsupported_properties.append(property)
|
||||
|
||||
if unsupported_properties:
|
||||
msg_options = []
|
||||
for property in unsupported_properties:
|
||||
msg_options.append('%s.%s' % (setting_key, property))
|
||||
|
||||
msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options)
|
||||
if self.ignore_unsupported_suboptions:
|
||||
self.module.warn(msg)
|
||||
else:
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
return unsupported_properties
|
||||
|
||||
def _compare_conn_params(self, conn_info, options):
|
||||
changed = False
|
||||
diff_before = dict()
|
||||
@@ -1197,6 +1692,10 @@ class Nmcli(object):
|
||||
value = value.upper()
|
||||
# ensure current_value is also converted to uppercase in case nmcli changes behaviour
|
||||
current_value = current_value.upper()
|
||||
if key == 'gsm.apn':
|
||||
# Depending on version nmcli adds double-qoutes to gsm.apn
|
||||
# Need to strip them in order to compare both
|
||||
current_value = current_value.strip('"')
|
||||
else:
|
||||
# parameter does not exist
|
||||
current_value = None
|
||||
@@ -1230,6 +1729,7 @@ def main():
|
||||
# Parsing argument file
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
ignore_unsupported_suboptions=dict(type='bool', default=False),
|
||||
autoconnect=dict(type='bool', default=True),
|
||||
state=dict(type='str', required=True, choices=['absent', 'present']),
|
||||
conn_name=dict(type='str', required=True),
|
||||
@@ -1244,6 +1744,7 @@ def main():
|
||||
'dummy',
|
||||
'ethernet',
|
||||
'generic',
|
||||
'gre',
|
||||
'infiniband',
|
||||
'ipip',
|
||||
'sit',
|
||||
@@ -1252,13 +1753,14 @@ def main():
|
||||
'vlan',
|
||||
'vxlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
]),
|
||||
ip4=dict(type='str'),
|
||||
gw4=dict(type='str'),
|
||||
gw4_ignore_auto=dict(type='bool', default=False),
|
||||
routes4=dict(type='list', elements='str'),
|
||||
route_metric4=dict(type='int'),
|
||||
routing_rules4=dict(type='str'),
|
||||
routing_rules4=dict(type='list', elements='str'),
|
||||
never_default4=dict(type='bool', default=False),
|
||||
dns4=dict(type='list', elements='str'),
|
||||
dns4_search=dict(type='list', elements='str'),
|
||||
@@ -1315,9 +1817,14 @@ def main():
|
||||
ip_tunnel_dev=dict(type='str'),
|
||||
ip_tunnel_local=dict(type='str'),
|
||||
ip_tunnel_remote=dict(type='str'),
|
||||
# ip-tunnel type gre specific vars
|
||||
ip_tunnel_input_key=dict(type='str', no_log=True),
|
||||
ip_tunnel_output_key=dict(type='str', no_log=True),
|
||||
# 802-11-wireless* specific vars
|
||||
ssid=dict(type='str'),
|
||||
wifi=dict(type='dict'),
|
||||
wifi_sec=dict(type='dict', no_log=True),
|
||||
gsm=dict(type='dict'),
|
||||
),
|
||||
mutually_exclusive=[['never_default4', 'gw4']],
|
||||
required_if=[("type", "wifi", [("ssid")])],
|
||||
@@ -1343,6 +1850,19 @@ def main():
|
||||
nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
|
||||
if nmcli.ifname is None:
|
||||
nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
|
||||
if nmcli.type == 'wifi':
|
||||
unsupported_properties = {}
|
||||
if nmcli.wifi:
|
||||
if 'ssid' in nmcli.wifi:
|
||||
module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'")
|
||||
del nmcli.wifi['ssid']
|
||||
unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless')
|
||||
if nmcli.wifi_sec:
|
||||
unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security')
|
||||
if nmcli.ignore_unsupported_suboptions and unsupported_properties:
|
||||
for setting_key, properties in unsupported_properties.items():
|
||||
for property in properties:
|
||||
del getattr(nmcli, setting_key)[property]
|
||||
|
||||
try:
|
||||
if nmcli.state == 'absent':
|
||||
|
||||
@@ -125,6 +125,11 @@ options:
|
||||
- Sets the timeout in seconds for connection attempts.
|
||||
type: int
|
||||
default: 20
|
||||
ehlohost:
|
||||
description:
|
||||
- Allows for manual specification of host for EHLO.
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -189,6 +194,16 @@ EXAMPLES = r'''
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
|
||||
- name: Sending an e-mail using StartTLS, remote server, custom EHLO
|
||||
community.general.mail:
|
||||
host: some.smtp.host.tld
|
||||
port: 25
|
||||
ehlohost: my-resolvable-hostname.tld
|
||||
to: John Smith <john.smith@example.com>
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -215,6 +230,7 @@ def main():
|
||||
password=dict(type='str', no_log=True),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=25),
|
||||
ehlohost=dict(type='str', default=None),
|
||||
sender=dict(type='str', default='root', aliases=['from']),
|
||||
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
|
||||
cc=dict(type='list', elements='str', default=[]),
|
||||
@@ -235,6 +251,7 @@ def main():
|
||||
password = module.params.get('password')
|
||||
host = module.params.get('host')
|
||||
port = module.params.get('port')
|
||||
local_hostname = module.params.get('ehlohost')
|
||||
sender = module.params.get('sender')
|
||||
recipients = module.params.get('to')
|
||||
copies = module.params.get('cc')
|
||||
@@ -259,9 +276,9 @@ def main():
|
||||
if secure != 'never':
|
||||
try:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP_SSL(timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
secure_state = True
|
||||
except ssl.SSLError as e:
|
||||
@@ -273,9 +290,9 @@ def main():
|
||||
|
||||
if not secure_state:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP(timeout=timeout)
|
||||
smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
|
||||
@@ -264,12 +264,12 @@ def is_valid_hex_color(color_choice):
|
||||
|
||||
|
||||
def escape_quotes(text):
|
||||
'''Backslash any quotes within text.'''
|
||||
"""Backslash any quotes within text."""
|
||||
return "".join(escape_table.get(c, c) for c in text)
|
||||
|
||||
|
||||
def recursive_escape_quotes(obj, keys):
|
||||
'''Recursively escape quotes inside supplied keys inside block kit objects'''
|
||||
"""Recursively escape quotes inside supplied keys inside block kit objects"""
|
||||
if isinstance(obj, dict):
|
||||
escaped = {}
|
||||
for k, v in obj.items():
|
||||
@@ -284,7 +284,7 @@ def recursive_escape_quotes(obj, keys):
|
||||
return escaped
|
||||
|
||||
|
||||
def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
|
||||
def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names,
|
||||
parse, color, attachments, blocks, message_id):
|
||||
payload = {}
|
||||
if color == "normal" and text is not None:
|
||||
@@ -344,7 +344,7 @@ def build_payload_for_slack(module, text, channel, thread_id, username, icon_url
|
||||
return payload
|
||||
|
||||
|
||||
def get_slack_message(module, domain, token, channel, ts):
|
||||
def get_slack_message(module, token, channel, ts):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json',
|
||||
@@ -372,7 +372,7 @@ def do_notify_slack(module, domain, token, payload):
|
||||
use_webapi = False
|
||||
if token.count('/') >= 2:
|
||||
# New style webhook token
|
||||
slack_uri = SLACK_INCOMING_WEBHOOK % (token)
|
||||
slack_uri = SLACK_INCOMING_WEBHOOK % token
|
||||
elif re.match(r'^xox[abp]-\S+$', token):
|
||||
slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
|
||||
use_webapi = True
|
||||
@@ -396,7 +396,7 @@ def do_notify_slack(module, domain, token, payload):
|
||||
if use_webapi:
|
||||
obscured_incoming_webhook = slack_uri
|
||||
else:
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
|
||||
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]'
|
||||
module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
|
||||
|
||||
# each API requires different handling
|
||||
@@ -409,21 +409,21 @@ def do_notify_slack(module, domain, token, payload):
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
domain=dict(type='str', required=False, default=None),
|
||||
domain=dict(type='str'),
|
||||
token=dict(type='str', required=True, no_log=True),
|
||||
msg=dict(type='str', required=False, default=None),
|
||||
channel=dict(type='str', default=None),
|
||||
thread_id=dict(type='str', default=None),
|
||||
msg=dict(type='str'),
|
||||
channel=dict(type='str'),
|
||||
thread_id=dict(type='str'),
|
||||
username=dict(type='str', default='Ansible'),
|
||||
icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
|
||||
icon_emoji=dict(type='str', default=None),
|
||||
icon_emoji=dict(type='str'),
|
||||
link_names=dict(type='int', default=1, choices=[0, 1]),
|
||||
parse=dict(type='str', default=None, choices=['none', 'full']),
|
||||
parse=dict(type='str', choices=['none', 'full']),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
color=dict(type='str', default='normal'),
|
||||
attachments=dict(type='list', elements='dict', required=False, default=None),
|
||||
attachments=dict(type='list', elements='dict'),
|
||||
blocks=dict(type='list', elements='dict'),
|
||||
message_id=dict(type='str', default=None),
|
||||
message_id=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -453,7 +453,7 @@ def main():
|
||||
# if updating an existing message, we can check if there's anything to update
|
||||
if message_id is not None:
|
||||
changed = False
|
||||
msg = get_slack_message(module, domain, token, channel, message_id)
|
||||
msg = get_slack_message(module, token, channel, message_id)
|
||||
for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
|
||||
if msg.get(key) != module.params.get(key):
|
||||
changed = True
|
||||
@@ -465,7 +465,7 @@ def main():
|
||||
elif module.check_mode:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
|
||||
payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names,
|
||||
parse, color, attachments, blocks, message_id)
|
||||
slack_response = do_notify_slack(module, domain, token, payload)
|
||||
|
||||
|
||||
@@ -248,8 +248,7 @@ class CPANMinus(CmdMixin, ModuleHelper):
|
||||
|
||||
|
||||
def main():
|
||||
cpanm = CPANMinus()
|
||||
cpanm.run()
|
||||
CPANMinus.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -565,7 +565,7 @@ class MavenDownloader:
|
||||
return "Cannot find %s checksum from %s" % (checksum_alg, remote_url)
|
||||
try:
|
||||
# Check if remote checksum only contains md5/sha1 or md5/sha1 + filename
|
||||
_remote_checksum = remote_checksum.split(None)[0]
|
||||
_remote_checksum = remote_checksum.split(None, 1)[0]
|
||||
remote_checksum = _remote_checksum
|
||||
# remote_checksum is empty so we continue and keep original checksum string
|
||||
# This should not happen since we check for remote_checksum before
|
||||
|
||||
282
plugins/modules/packaging/language/pipx.py
Normal file
282
plugins/modules/packaging/language/pipx.py
Normal file
@@ -0,0 +1,282 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2021, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pipx
|
||||
short_description: Manages applications installed with pipx
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Manage Python applications installed in isolated virtualenvs using pipx.
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all]
|
||||
default: install
|
||||
description:
|
||||
- Desired state for the application.
|
||||
- The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively.
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
The name of the application to be installed. It must to be a simple package name.
|
||||
For passing package specifications or installing from URLs or directories,
|
||||
please use the I(source) option.
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
If the application source, such as a package with version specifier, or an URL,
|
||||
directory or any other accepted specification. See C(pipx) documentation for more details.
|
||||
- When specified, the C(pipx) command will use I(source) instead of I(name).
|
||||
install_deps:
|
||||
description:
|
||||
- Include applications of dependent packages.
|
||||
- Only used when I(state=install) or I(state=upgrade).
|
||||
type: bool
|
||||
default: false
|
||||
inject_packages:
|
||||
description:
|
||||
- Packages to be injected into an existing virtual environment.
|
||||
- Only used when I(state=inject).
|
||||
type: list
|
||||
elements: str
|
||||
force:
|
||||
description:
|
||||
- Force modification of the application's virtual environment. See C(pipx) for details.
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
include_injected:
|
||||
description:
|
||||
- Upgrade the injected packages along with the application.
|
||||
- Only used when I(state=upgrade) or I(state=upgrade_all).
|
||||
type: bool
|
||||
default: false
|
||||
index_url:
|
||||
description:
|
||||
- Base URL of Python Package Index.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
type: str
|
||||
python:
|
||||
description:
|
||||
- Python version to be used when creating the application virtual environment. Must be 3.6+.
|
||||
- Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all).
|
||||
type: str
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(pipx) installed in the system.
|
||||
- >
|
||||
If not specified, the module will use C(python -m pipx) to run the tool,
|
||||
using the same Python interpreter as ansible itself.
|
||||
type: path
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
Hence, when using version operators, C(pipx) module will always try to execute the operation,
|
||||
even when the application was previously installed.
|
||||
This feature will be added in the future.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
|
||||
- name: Install tox from git repository
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
source: git+https://github.com/tox-dev/tox.git
|
||||
|
||||
- name: Upgrade tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: upgrade
|
||||
|
||||
- name: Reinstall black with specific Python version
|
||||
community.general.pipx:
|
||||
name: black
|
||||
state: reinstall
|
||||
python: 3.7
|
||||
|
||||
- name: Uninstall pycowsay
|
||||
community.general.pipx:
|
||||
name: pycowsay
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import (
|
||||
CmdStateModuleHelper, ArgFormat, ModuleHelperException
|
||||
)
|
||||
from ansible.module_utils.facts.compat import ansible_facts
|
||||
|
||||
|
||||
_state_map = dict(
|
||||
present='install',
|
||||
absent='uninstall',
|
||||
uninstall_all='uninstall-all',
|
||||
upgrade_all='upgrade-all',
|
||||
reinstall_all='reinstall-all',
|
||||
)
|
||||
|
||||
|
||||
class PipX(CmdStateModuleHelper):
|
||||
output_params = ['name', 'source', 'index_url', 'force', 'installdeps']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='install',
|
||||
choices=[
|
||||
'present', 'absent', 'install', 'uninstall', 'uninstall_all',
|
||||
'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all']),
|
||||
name=dict(type='str'),
|
||||
source=dict(type='str'),
|
||||
install_deps=dict(type='bool', default=False),
|
||||
inject_packages=dict(type='list', elements='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
executable=dict(type='path')
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['name']),
|
||||
('state', 'install', ['name']),
|
||||
('state', 'absent', ['name']),
|
||||
('state', 'uninstall', ['name']),
|
||||
('state', 'inject', ['name', 'inject_packages']),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
command_args_formats = dict(
|
||||
state=dict(fmt=lambda v: [_state_map.get(v, v)]),
|
||||
name_source=dict(fmt=lambda n, s: [s] if s else [n], stars=1),
|
||||
install_deps=dict(fmt="--install-deps", style=ArgFormat.BOOLEAN),
|
||||
inject_packages=dict(fmt=lambda v: v),
|
||||
force=dict(fmt="--force", style=ArgFormat.BOOLEAN),
|
||||
include_injected=dict(fmt="--include-injected", style=ArgFormat.BOOLEAN),
|
||||
index_url=dict(fmt=('--index-url', '{0}'),),
|
||||
python=dict(fmt=('--python', '{0}'),),
|
||||
_list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN),
|
||||
)
|
||||
check_rc = True
|
||||
|
||||
def _retrieve_installed(self):
|
||||
def process_list(rc, out, err):
|
||||
if not out:
|
||||
return {}
|
||||
|
||||
results = {}
|
||||
raw_data = json.loads(out)
|
||||
for venv_name, venv in raw_data['venvs'].items():
|
||||
results[venv_name] = {
|
||||
'version': venv['metadata']['main_package']['package_version'],
|
||||
'injected': dict(
|
||||
(k, v['package_version']) for k, v in venv['metadata']['injected_packages']
|
||||
),
|
||||
}
|
||||
return results
|
||||
|
||||
installed = self.run_command(params=[{'_list': True}], process_output=process_list,
|
||||
publish_rc=False, publish_out=False, publish_err=False)
|
||||
|
||||
if self.vars.name is not None:
|
||||
app_list = installed.get(self.vars.name)
|
||||
if app_list:
|
||||
return {self.vars.name: app_list}
|
||||
else:
|
||||
return {}
|
||||
|
||||
return installed
|
||||
|
||||
def __init_module__(self):
|
||||
if self.vars.executable:
|
||||
self.command = [self.vars.executable]
|
||||
else:
|
||||
facts = ansible_facts(self.module, gather_subset=['python'])
|
||||
self.command = [facts['python']['executable'], '-m', 'pipx']
|
||||
|
||||
self.vars.set('will_change', False, output=False, change=True)
|
||||
self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
|
||||
|
||||
def __quit_module__(self):
|
||||
self.vars.application = self._retrieve_installed()
|
||||
|
||||
def state_install(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'python',
|
||||
{'name_source': [self.vars.name, self.vars.source]}])
|
||||
|
||||
state_present = state_install
|
||||
|
||||
def state_upgrade(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to upgrade a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'name'])
|
||||
|
||||
def state_uninstall(self):
|
||||
if self.vars.application and not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name'])
|
||||
|
||||
state_absent = state_uninstall
|
||||
|
||||
def state_reinstall(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to reinstall a non-existent application: {0}".format(self.vars.name))
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name', 'python'])
|
||||
|
||||
def state_inject(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to inject packages into a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'force', 'name', 'inject_packages'])
|
||||
|
||||
def state_uninstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state'])
|
||||
|
||||
def state_reinstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'python'])
|
||||
|
||||
def state_upgrade_all(self):
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'include_injected', 'force'])
|
||||
|
||||
|
||||
def main():
|
||||
PipX.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -120,8 +120,7 @@ class CoprModule(object):
|
||||
@property
|
||||
def short_chroot(self):
|
||||
"""str: Chroot (distribution-version-architecture) shorten to distribution-version."""
|
||||
chroot_parts = self.chroot.split("-")
|
||||
return "{0}-{1}".format(chroot_parts[0], chroot_parts[1])
|
||||
return self.chroot.rsplit('-', 1)[0]
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
@@ -193,18 +192,20 @@ class CoprModule(object):
|
||||
Returns:
|
||||
Information about the repository.
|
||||
"""
|
||||
distribution, version = self.short_chroot.split("-")
|
||||
distribution, version = self.short_chroot.split('-', 1)
|
||||
chroot = self.short_chroot
|
||||
while True:
|
||||
repo_info, status_code = self._get(chroot)
|
||||
if repo_info:
|
||||
return repo_info
|
||||
if distribution == "rhel":
|
||||
chroot = "centos-stream"
|
||||
chroot = "centos-stream-8"
|
||||
distribution = "centos"
|
||||
elif distribution == "centos":
|
||||
if version == "stream":
|
||||
if version == "stream-8":
|
||||
version = "8"
|
||||
elif version == "stream-9":
|
||||
version = "9"
|
||||
chroot = "epel-{0}".format(version)
|
||||
distribution = "epel"
|
||||
else:
|
||||
|
||||
@@ -120,7 +120,7 @@ def selfupdate(module, port_path):
|
||||
changed = False
|
||||
msg = "Macports already up-to-date"
|
||||
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
|
||||
|
||||
@@ -134,11 +134,11 @@ def upgrade(module, port_path):
|
||||
if out.strip() == "Nothing to upgrade.":
|
||||
changed = False
|
||||
msg = "Ports already upgraded"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
elif rc == 0:
|
||||
changed = True
|
||||
msg = "Outdated ports upgraded successfully"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
|
||||
|
||||
@@ -165,7 +165,7 @@ def query_port(module, port_path, name, state="present"):
|
||||
return False
|
||||
|
||||
|
||||
def remove_ports(module, port_path, ports):
|
||||
def remove_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Uninstalls one or more ports if installed. """
|
||||
|
||||
remove_c = 0
|
||||
@@ -176,20 +176,21 @@ def remove_ports(module, port_path, ports):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already absent")
|
||||
module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def install_ports(module, port_path, ports, variant):
|
||||
def install_ports(module, port_path, ports, variant, stdout, stderr):
|
||||
""" Installs one or more ports if not already installed. """
|
||||
|
||||
install_c = 0
|
||||
@@ -199,66 +200,70 @@ def install_ports(module, port_path, ports, variant):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already present")
|
||||
module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def activate_ports(module, port_path, ports):
|
||||
def activate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Activate a port if it's inactive. """
|
||||
|
||||
activate_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s activate %s" % (port_path, port))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
activate_c += 1
|
||||
|
||||
if activate_c > 0:
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already active")
|
||||
module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def deactivate_ports(module, port_path, ports):
|
||||
def deactivate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Deactivate a port if it's active. """
|
||||
|
||||
deactivated_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
deactivated_c += 1
|
||||
|
||||
if deactivated_c > 0:
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive")
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -272,35 +277,42 @@ def main():
|
||||
)
|
||||
)
|
||||
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
|
||||
|
||||
p = module.params
|
||||
|
||||
if p["selfupdate"]:
|
||||
(changed, msg) = selfupdate(module, port_path)
|
||||
(changed, msg, out, err) = selfupdate(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not (p["name"] or p["upgrade"]):
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
if p["upgrade"]:
|
||||
(changed, msg) = upgrade(module, port_path)
|
||||
(changed, msg, out, err) = upgrade(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not p["name"]:
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
pkgs = p["name"]
|
||||
|
||||
variant = p["variant"]
|
||||
|
||||
if p["state"] in ["present", "installed"]:
|
||||
install_ports(module, port_path, pkgs, variant)
|
||||
install_ports(module, port_path, pkgs, variant, stdout, stderr)
|
||||
|
||||
elif p["state"] in ["absent", "removed"]:
|
||||
remove_ports(module, port_path, pkgs)
|
||||
remove_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "active":
|
||||
activate_ports(module, port_path, pkgs)
|
||||
activate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "inactive":
|
||||
deactivate_ports(module, port_path, pkgs)
|
||||
deactivate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -246,6 +246,7 @@ def package_present(names, pkg_spec, module):
|
||||
if match:
|
||||
# It turns out we were able to install the package.
|
||||
module.debug("package_present(): we were able to install package for name '%s'" % name)
|
||||
pkg_spec[name]['changed'] = True
|
||||
else:
|
||||
# We really did fail, fake the return code.
|
||||
module.debug("package_present(): we really did fail for name '%s'" % name)
|
||||
|
||||
@@ -134,6 +134,7 @@ EXAMPLES = '''
|
||||
'''
|
||||
|
||||
|
||||
from collections import defaultdict
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -226,7 +227,8 @@ def remove_packages(module, pkgng_path, packages, dir_arg):
|
||||
|
||||
|
||||
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
|
||||
install_c = 0
|
||||
action_queue = defaultdict(list)
|
||||
action_count = defaultdict(int)
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
@@ -263,29 +265,48 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, sta
|
||||
if already_installed and state == "present":
|
||||
continue
|
||||
|
||||
update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
if not update_available and already_installed and state == "latest":
|
||||
if (
|
||||
already_installed and state == "latest"
|
||||
and not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
):
|
||||
continue
|
||||
|
||||
if not module.check_mode:
|
||||
if already_installed:
|
||||
action = "upgrade"
|
||||
else:
|
||||
action = "install"
|
||||
if already_installed:
|
||||
action_queue["upgrade"].append(package)
|
||||
else:
|
||||
action_queue["install"].append(package)
|
||||
|
||||
if not module.check_mode:
|
||||
# install/upgrade all named packages with one pkg command
|
||||
for (action, package_list) in action_queue.items():
|
||||
packages = ' '.join(package_list)
|
||||
if old_pkgng:
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, packages))
|
||||
else:
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, packages))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
|
||||
module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
|
||||
# individually verify packages are in requested state
|
||||
for package in package_list:
|
||||
verified = False
|
||||
if action == 'install':
|
||||
verified = query_package(module, pkgng_path, package, dir_arg)
|
||||
elif action == 'upgrade':
|
||||
verified = not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
|
||||
install_c += 1
|
||||
if verified:
|
||||
action_count[action] += 1
|
||||
else:
|
||||
module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr)
|
||||
|
||||
if install_c > 0:
|
||||
return (True, "added %s package(s)" % (install_c), stdout, stderr)
|
||||
if sum(action_count.values()) > 0:
|
||||
past_tense = {'install': 'installed', 'upgrade': 'upgraded'}
|
||||
messages = []
|
||||
for (action, count) in action_count.items():
|
||||
messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else ""))
|
||||
|
||||
return (True, '; '.join(messages), stdout, stderr)
|
||||
|
||||
return (False, "package(s) already %s" % (state), stdout, stderr)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Alexei Znamensky (russoz) <russoz@gmail.com>
|
||||
# Copyright: (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
|
||||
# Copyright: (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
|
||||
|
||||
@@ -12,17 +13,13 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: snap
|
||||
|
||||
short_description: Manages snaps
|
||||
|
||||
|
||||
description:
|
||||
- "Manages snaps packages."
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the snap to install or remove. Can be a list of snaps.
|
||||
- Name of the snaps.
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
@@ -117,10 +114,10 @@ from ansible_collections.community.general.plugins.module_utils.module_helper im
|
||||
__state_map = dict(
|
||||
present='install',
|
||||
absent='remove',
|
||||
info='info', # not public
|
||||
list='list', # not public
|
||||
enabled='enable',
|
||||
disabled='disable',
|
||||
info='info', # not public
|
||||
list='list', # not public
|
||||
)
|
||||
|
||||
|
||||
@@ -171,9 +168,6 @@ class Snap(CmdStateModuleHelper):
|
||||
'\n'.join(results[3]),
|
||||
]
|
||||
|
||||
def snap_exists(self, snap_name):
|
||||
return 0 == self.run_command(params=[{'state': 'info'}, {'name': snap_name}])[0]
|
||||
|
||||
def is_snap_installed(self, snap_name):
|
||||
return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0]
|
||||
|
||||
@@ -188,14 +182,7 @@ class Snap(CmdStateModuleHelper):
|
||||
notes = match.group('notes')
|
||||
return "disabled" not in notes.split(',')
|
||||
|
||||
def validate_input_snaps(self):
|
||||
"""Ensure that all exist."""
|
||||
for snap_name in self.vars.name:
|
||||
if not self.snap_exists(snap_name):
|
||||
raise ModuleHelperException(msg="No snap matching '%s' available." % snap_name)
|
||||
|
||||
def state_present(self):
|
||||
self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install
|
||||
self.vars.meta('classic').set(output=True)
|
||||
self.vars.meta('channel').set(output=True)
|
||||
actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)]
|
||||
@@ -227,59 +214,32 @@ class Snap(CmdStateModuleHelper):
|
||||
"error output for more details.".format(cmd=self.vars.cmd)
|
||||
raise ModuleHelperException(msg=msg)
|
||||
|
||||
def state_absent(self):
|
||||
self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition
|
||||
actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)]
|
||||
def _generic_state_action(self, actionable_func, actionable_var, params=None):
|
||||
actionable_snaps = [s for s in self.vars.name if actionable_func(s)]
|
||||
if not actionable_snaps:
|
||||
return
|
||||
self.changed = True
|
||||
self.vars.snaps_removed = actionable_snaps
|
||||
self.vars[actionable_var] = actionable_snaps
|
||||
if self.module.check_mode:
|
||||
return
|
||||
params = ['classic', 'channel', 'state'] # get base cmd parts
|
||||
if params is None:
|
||||
params = ['state']
|
||||
commands = [params + [{'actionable_snaps': actionable_snaps}]]
|
||||
self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
|
||||
if rc == 0:
|
||||
return
|
||||
msg = "Ooops! Snap removal failed while executing '{cmd}', please examine logs and " \
|
||||
msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \
|
||||
"error output for more details.".format(cmd=self.vars.cmd)
|
||||
raise ModuleHelperException(msg=msg)
|
||||
|
||||
def state_absent(self):
|
||||
self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state'])
|
||||
|
||||
def state_enabled(self):
|
||||
self.validate_input_snaps()
|
||||
actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is False]
|
||||
if not actionable_snaps:
|
||||
return
|
||||
self.changed = True
|
||||
self.vars.snaps_enabled = actionable_snaps
|
||||
if self.module.check_mode:
|
||||
return
|
||||
params = ['classic', 'channel', 'state'] # get base cmd parts
|
||||
commands = [params + [{'actionable_snaps': actionable_snaps}]]
|
||||
self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
|
||||
if rc == 0:
|
||||
return
|
||||
msg = "Ooops! Snap enabling failed while executing '{cmd}', please examine logs and " \
|
||||
"error output for more details.".format(cmd=self.vars.cmd)
|
||||
raise ModuleHelperException(msg=msg)
|
||||
self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state'])
|
||||
|
||||
def state_disabled(self):
|
||||
self.validate_input_snaps()
|
||||
actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is True]
|
||||
if not actionable_snaps:
|
||||
return
|
||||
self.changed = True
|
||||
self.vars.snaps_enabled = actionable_snaps
|
||||
if self.module.check_mode:
|
||||
return
|
||||
params = ['classic', 'channel', 'state'] # get base cmd parts
|
||||
commands = [params + [{'actionable_snaps': actionable_snaps}]]
|
||||
self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
|
||||
if rc == 0:
|
||||
return
|
||||
msg = "Ooops! Snap disabling failed while executing '{cmd}', please examine logs and " \
|
||||
"error output for more details.".format(cmd=self.vars.cmd)
|
||||
raise ModuleHelperException(msg=msg)
|
||||
self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state'])
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -29,6 +29,7 @@ author:
|
||||
short_description: Manage packages on SUSE and openSUSE
|
||||
description:
|
||||
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
|
||||
- Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
@@ -213,6 +214,7 @@ EXAMPLES = '''
|
||||
ZYPP_LOCK_TIMEOUT: 20
|
||||
'''
|
||||
|
||||
import os.path
|
||||
import xml
|
||||
import re
|
||||
from xml.dom.minidom import parseString as parseXML
|
||||
@@ -337,6 +339,8 @@ def get_cmd(m, subcommand):
|
||||
is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
|
||||
is_refresh = subcommand == 'refresh'
|
||||
cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout']
|
||||
if transactional_updates():
|
||||
cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd
|
||||
if m.params['extra_args_precommand']:
|
||||
args_list = m.params['extra_args_precommand'].split()
|
||||
cmd.extend(args_list)
|
||||
@@ -491,6 +495,10 @@ def repo_refresh(m):
|
||||
|
||||
return retvals
|
||||
|
||||
|
||||
def transactional_updates():
|
||||
return os.path.exists('/var/lib/misc/transactional-update.state')
|
||||
|
||||
# ===========================================
|
||||
# Main control flow
|
||||
|
||||
|
||||
@@ -137,6 +137,10 @@ from distutils.version import LooseVersion
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six.moves import configparser, StringIO
|
||||
from io import open
|
||||
|
||||
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
|
||||
|
||||
@@ -382,12 +386,62 @@ def main():
|
||||
if not alias and state == "present":
|
||||
module.fail_json(msg='Name required when adding non-repo files.')
|
||||
|
||||
# Download / Open and parse .repo file to ensure idempotency
|
||||
if repo and repo.endswith('.repo'):
|
||||
if repo.startswith(('http://', 'https://')):
|
||||
response, info = fetch_url(module=module, url=repo, force=True)
|
||||
if not response or info['status'] != 200:
|
||||
module.fail_json(msg='Error downloading .repo file from provided URL')
|
||||
repofile_text = to_text(response.read(), errors='surrogate_or_strict')
|
||||
else:
|
||||
try:
|
||||
with open(repo, encoding='utf-8') as file:
|
||||
repofile_text = file.read()
|
||||
except IOError:
|
||||
module.fail_json(msg='Error opening .repo file from provided path')
|
||||
|
||||
repofile = configparser.ConfigParser()
|
||||
try:
|
||||
repofile.readfp(StringIO(repofile_text))
|
||||
except configparser.Error:
|
||||
module.fail_json(msg='Invalid format, .repo file could not be parsed')
|
||||
|
||||
# No support for .repo file with zero or more than one repository
|
||||
if len(repofile.sections()) != 1:
|
||||
err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections())
|
||||
module.fail_json(msg=err)
|
||||
|
||||
section = repofile.sections()[0]
|
||||
repofile_items = dict(repofile.items(section))
|
||||
# Only proceed if at least baseurl is available
|
||||
if 'baseurl' not in repofile_items:
|
||||
module.fail_json(msg='No baseurl found in .repo file')
|
||||
|
||||
# Set alias (name) and url based on values from .repo file
|
||||
alias = section
|
||||
repodata['alias'] = section
|
||||
repodata['url'] = repofile_items['baseurl']
|
||||
|
||||
# If gpgkey is part of the .repo file, auto import key
|
||||
if 'gpgkey' in repofile_items:
|
||||
auto_import_keys = True
|
||||
|
||||
# Map additional values, if available
|
||||
if 'name' in repofile_items:
|
||||
repodata['name'] = repofile_items['name']
|
||||
if 'enabled' in repofile_items:
|
||||
repodata['enabled'] = repofile_items['enabled']
|
||||
if 'autorefresh' in repofile_items:
|
||||
repodata['autorefresh'] = repofile_items['autorefresh']
|
||||
if 'gpgcheck' in repofile_items:
|
||||
repodata['gpgcheck'] = repofile_items['gpgcheck']
|
||||
|
||||
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
|
||||
|
||||
if repo:
|
||||
shortname = repo
|
||||
else:
|
||||
if alias:
|
||||
shortname = alias
|
||||
else:
|
||||
shortname = repo
|
||||
|
||||
if state == 'present':
|
||||
if exists and not mod:
|
||||
|
||||
1
plugins/modules/pipx.py
Symbolic link
1
plugins/modules/pipx.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./packaging/language/pipx.py
|
||||
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
cloud/misc/proxmox_tasks_info.py
|
||||
1
plugins/modules/redis_data.py
Symbolic link
1
plugins/modules/redis_data.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./database/misc/redis_data.py
|
||||
1
plugins/modules/redis_data_info.py
Symbolic link
1
plugins/modules/redis_data_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
database/misc/redis_data_info.py
|
||||
@@ -168,7 +168,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data }}"
|
||||
|
||||
- name: Get Lenovo FoD key collection resource via GetCollectionResource command
|
||||
@@ -180,7 +182,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data_list }}"
|
||||
|
||||
- name: Update ComputeSystem property AssetTag via PatchResource command
|
||||
|
||||
@@ -56,7 +56,8 @@ options:
|
||||
required: false
|
||||
aliases: [ account_id ]
|
||||
description:
|
||||
- ID of account to delete/modify
|
||||
- ID of account to delete/modify.
|
||||
- Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request.
|
||||
type: str
|
||||
new_username:
|
||||
required: false
|
||||
@@ -207,6 +208,15 @@ options:
|
||||
description:
|
||||
- The transfer method to use with the image
|
||||
type: str
|
||||
strip_etag_quotes:
|
||||
description:
|
||||
- Removes surrounding quotes of etag used in C(If-Match) header
|
||||
of C(PATCH) requests.
|
||||
- Only use this option to resolve bad vendor implementation where
|
||||
C(If-Match) only matches the unquoted etag string.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.7.0
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
@@ -631,7 +641,8 @@ def main():
|
||||
transfer_protocol_type=dict(),
|
||||
transfer_method=dict(),
|
||||
)
|
||||
)
|
||||
),
|
||||
strip_etag_quotes=dict(type='bool', default=False),
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -686,10 +697,13 @@ def main():
|
||||
# VirtualMedia options
|
||||
virtual_media = module.params['virtual_media']
|
||||
|
||||
# Etag options
|
||||
strip_etag_quotes = module.params['strip_etag_quotes']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
|
||||
@@ -91,6 +91,15 @@ options:
|
||||
- setting dict of EthernetInterface on OOB controller
|
||||
type: dict
|
||||
version_added: '0.2.0'
|
||||
strip_etag_quotes:
|
||||
description:
|
||||
- Removes surrounding quotes of etag used in C(If-Match) header
|
||||
of C(PATCH) requests.
|
||||
- Only use this option to resolve bad vendor implementation where
|
||||
C(If-Match) only matches the unquoted etag string.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.7.0
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
@@ -237,7 +246,8 @@ def main():
|
||||
nic_config=dict(
|
||||
type='dict',
|
||||
default={}
|
||||
)
|
||||
),
|
||||
strip_etag_quotes=dict(type='bool', default=False),
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -275,10 +285,13 @@ def main():
|
||||
nic_addr = module.params['nic_addr']
|
||||
nic_config = module.params['nic_config']
|
||||
|
||||
# Etag options
|
||||
strip_etag_quotes = module.params['strip_etag_quotes']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
|
||||
@@ -67,7 +67,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
|
||||
|
||||
- name: Get CPU model
|
||||
@@ -78,7 +80,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
|
||||
|
||||
- name: Get memory inventory
|
||||
@@ -108,7 +112,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Volume Inventory
|
||||
@@ -119,7 +125,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Session information
|
||||
@@ -130,7 +137,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
|
||||
|
||||
- name: Get default inventory information
|
||||
@@ -139,7 +148,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts | to_nice_json }}"
|
||||
|
||||
- name: Get several inventories
|
||||
|
||||
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_executions_info.py
|
||||
1
plugins/modules/rundeck_job_run.py
Symbolic link
1
plugins/modules/rundeck_job_run.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_run.py
|
||||
@@ -211,7 +211,7 @@ class GitLabDeployKey(object):
|
||||
@param key_title Title of the key
|
||||
'''
|
||||
def findDeployKey(self, project, key_title):
|
||||
deployKeys = project.keys.list()
|
||||
deployKeys = project.keys.list(all=True)
|
||||
for deployKey in deployKeys:
|
||||
if (deployKey.title == key_title):
|
||||
return deployKey
|
||||
|
||||
@@ -61,6 +61,28 @@ options:
|
||||
choices: ["private", "internal", "public"]
|
||||
default: private
|
||||
type: str
|
||||
project_creation_level:
|
||||
description:
|
||||
- Determine if developers can create projects in the group.
|
||||
choices: ["developer", "maintainer", "noone"]
|
||||
type: str
|
||||
version_added: 3.7.0
|
||||
auto_devops_enabled:
|
||||
description:
|
||||
- Default to Auto DevOps pipeline for all projects within this group.
|
||||
type: bool
|
||||
version_added: 3.7.0
|
||||
subgroup_creation_level:
|
||||
description:
|
||||
- Allowed to create subgroups.
|
||||
choices: ["maintainer", "owner"]
|
||||
type: str
|
||||
version_added: 3.7.0
|
||||
require_two_factor_authentication:
|
||||
description:
|
||||
- Require all users in this group to setup two-factor authentication.
|
||||
type: bool
|
||||
version_added: 3.7.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -93,6 +115,20 @@ EXAMPLES = '''
|
||||
path: my_first_group
|
||||
state: present
|
||||
parent: "super_parent/parent"
|
||||
|
||||
# Other group which only allows sub-groups - no projects
|
||||
- name: "Create GitLab Group for SubGroups only"
|
||||
community.general.gitlab_group:
|
||||
api_url: https://gitlab.example.com/
|
||||
validate_certs: True
|
||||
api_username: dj-wasabi
|
||||
api_password: "MySecretPassword"
|
||||
name: my_main_group
|
||||
path: my_main_group
|
||||
state: present
|
||||
project_creation_level: noone
|
||||
auto_devops_enabled: false
|
||||
subgroup_creation_level: maintainer
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -166,17 +202,27 @@ class GitLabGroup(object):
|
||||
'name': name,
|
||||
'path': options['path'],
|
||||
'parent_id': parent_id,
|
||||
'visibility': options['visibility']
|
||||
'visibility': options['visibility'],
|
||||
'project_creation_level': options['project_creation_level'],
|
||||
'auto_devops_enabled': options['auto_devops_enabled'],
|
||||
'subgroup_creation_level': options['subgroup_creation_level'],
|
||||
}
|
||||
if options.get('description'):
|
||||
payload['description'] = options['description']
|
||||
if options.get('require_two_factor_authentication'):
|
||||
payload['require_two_factor_authentication'] = options['require_two_factor_authentication']
|
||||
group = self.createGroup(payload)
|
||||
changed = True
|
||||
else:
|
||||
changed, group = self.updateGroup(self.groupObject, {
|
||||
'name': name,
|
||||
'description': options['description'],
|
||||
'visibility': options['visibility']})
|
||||
'visibility': options['visibility'],
|
||||
'project_creation_level': options['project_creation_level'],
|
||||
'auto_devops_enabled': options['auto_devops_enabled'],
|
||||
'subgroup_creation_level': options['subgroup_creation_level'],
|
||||
'require_two_factor_authentication': options['require_two_factor_authentication'],
|
||||
})
|
||||
|
||||
self.groupObject = group
|
||||
if changed:
|
||||
@@ -258,6 +304,10 @@ def main():
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
parent=dict(type='str'),
|
||||
visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
|
||||
project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']),
|
||||
auto_devops_enabled=dict(type='bool'),
|
||||
subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']),
|
||||
require_two_factor_authentication=dict(type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -281,6 +331,10 @@ def main():
|
||||
state = module.params['state']
|
||||
parent_identifier = module.params['parent']
|
||||
group_visibility = module.params['visibility']
|
||||
project_creation_level = module.params['project_creation_level']
|
||||
auto_devops_enabled = module.params['auto_devops_enabled']
|
||||
subgroup_creation_level = module.params['subgroup_creation_level']
|
||||
require_two_factor_authentication = module.params['require_two_factor_authentication']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -314,7 +368,12 @@ def main():
|
||||
if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
|
||||
"path": group_path,
|
||||
"description": description,
|
||||
"visibility": group_visibility}):
|
||||
"visibility": group_visibility,
|
||||
"project_creation_level": project_creation_level,
|
||||
"auto_devops_enabled": auto_devops_enabled,
|
||||
"subgroup_creation_level": subgroup_creation_level,
|
||||
"require_two_factor_authentication": require_two_factor_authentication,
|
||||
}):
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
|
||||
|
||||
@@ -32,15 +32,38 @@ options:
|
||||
type: str
|
||||
gitlab_user:
|
||||
description:
|
||||
- The username of the member to add to/remove from the GitLab group.
|
||||
required: true
|
||||
type: str
|
||||
- A username or a list of usernames to add to/remove from the GitLab group.
|
||||
- Mutually exclusive with I(gitlab_users_access).
|
||||
type: list
|
||||
elements: str
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
- Mutually exclusive with I(gitlab_users_access).
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
|
||||
gitlab_users_access:
|
||||
description:
|
||||
- Provide a list of user to access level mappings.
|
||||
- Every dictionary in this list specifies a user (by username) and the access level the user should have.
|
||||
- Mutually exclusive with I(gitlab_user) and I(access_level).
|
||||
- Use together with I(purge_users) to remove all users not specified here from the group.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
description: A username or a list of usernames to add to/remove from the GitLab group.
|
||||
type: str
|
||||
required: true
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
|
||||
required: true
|
||||
version_added: 3.6.0
|
||||
state:
|
||||
description:
|
||||
- State of the member in the group.
|
||||
@@ -49,6 +72,15 @@ options:
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
purge_users:
|
||||
description:
|
||||
- Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
|
||||
If omitted do not purge orphaned members.
|
||||
- Is only used when I(state=present).
|
||||
type: list
|
||||
elements: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
|
||||
version_added: 3.6.0
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
'''
|
||||
@@ -70,6 +102,51 @@ EXAMPLES = r'''
|
||||
gitlab_group: groupname
|
||||
gitlab_user: username
|
||||
state: absent
|
||||
|
||||
- name: Add a list of Users to A GitLab Group
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
gitlab_user:
|
||||
- user1
|
||||
- user2
|
||||
access_level: developer
|
||||
state: present
|
||||
|
||||
- name: Add a list of Users with Dedicated Access Levels to A GitLab Group
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: present
|
||||
|
||||
- name: Add a user, remove all others which might be on this access level
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
gitlab_user: username
|
||||
access_level: developer
|
||||
pruge_users: developer
|
||||
state: present
|
||||
|
||||
- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group
|
||||
community.general.gitlab_group_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_group: groupname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
@@ -102,15 +179,30 @@ class GitLabGroup(object):
|
||||
|
||||
# get group id if group exists
|
||||
def get_group_id(self, gitlab_group):
|
||||
group_exists = self._gitlab.groups.list(search=gitlab_group)
|
||||
if group_exists:
|
||||
return group_exists[0].id
|
||||
groups = self._gitlab.groups.list(search=gitlab_group)
|
||||
for group in groups:
|
||||
if group.full_path == gitlab_group:
|
||||
return group.id
|
||||
for group in groups:
|
||||
if group.path == gitlab_group or group.name == gitlab_group:
|
||||
return group.id
|
||||
|
||||
# get all members in a group
|
||||
def get_members_in_a_group(self, gitlab_group_id):
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
return group.members.list(all=True)
|
||||
|
||||
# get single member in a group by user name
|
||||
def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id):
|
||||
member = None
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
try:
|
||||
member = group.members.get(gitlab_user_id)
|
||||
if member:
|
||||
return member
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
return None
|
||||
|
||||
# check if the user is a member of the group
|
||||
def is_user_a_member(self, members, gitlab_user_id):
|
||||
for member in members:
|
||||
@@ -120,27 +212,14 @@ class GitLabGroup(object):
|
||||
|
||||
# add user to a group
|
||||
def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
|
||||
try:
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
add_member = group.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
if add_member:
|
||||
return add_member.username
|
||||
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e))
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
add_member = group.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
# remove user from a group
|
||||
def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
|
||||
try:
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
group.members.delete(gitlab_user_id)
|
||||
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e))
|
||||
group = self._gitlab.groups.get(gitlab_group_id)
|
||||
group.members.delete(gitlab_user_id)
|
||||
|
||||
# get user's access level
|
||||
def get_user_access_level(self, members, gitlab_user_id):
|
||||
@@ -152,12 +231,8 @@ class GitLabGroup(object):
|
||||
def update_user_access_level(self, members, gitlab_user_id, access_level):
|
||||
for member in members:
|
||||
if member.id == gitlab_user_id:
|
||||
try:
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
|
||||
|
||||
def main():
|
||||
@@ -165,9 +240,18 @@ def main():
|
||||
argument_spec.update(dict(
|
||||
api_token=dict(type='str', required=True, no_log=True),
|
||||
gitlab_group=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'])
|
||||
access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
|
||||
purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
|
||||
gitlab_users_access=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True),
|
||||
)
|
||||
),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -175,15 +259,19 @@ def main():
|
||||
mutually_exclusive=[
|
||||
['api_username', 'api_token'],
|
||||
['api_password', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
['access_level', 'gitlab_users_access'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password'],
|
||||
['gitlab_user', 'access_level'],
|
||||
],
|
||||
required_one_of=[
|
||||
['api_username', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
],
|
||||
required_if=[
|
||||
['state', 'present', ['access_level']],
|
||||
['state', 'present', ['access_level', 'gitlab_users_access'], True],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -191,72 +279,166 @@ def main():
|
||||
if not HAS_PY_GITLAB:
|
||||
module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
|
||||
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.OWNER_ACCESS
|
||||
}
|
||||
|
||||
gitlab_group = module.params['gitlab_group']
|
||||
gitlab_user = module.params['gitlab_user']
|
||||
state = module.params['state']
|
||||
access_level = module.params['access_level']
|
||||
purge_users = module.params['purge_users']
|
||||
|
||||
# convert access level string input to int
|
||||
if access_level:
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
'owner': gitlab.OWNER_ACCESS
|
||||
}
|
||||
|
||||
access_level = access_level_int[access_level]
|
||||
if purge_users:
|
||||
purge_users = [access_level_int[level] for level in purge_users]
|
||||
|
||||
# connect to gitlab server
|
||||
gl = gitlabAuthentication(module)
|
||||
|
||||
group = GitLabGroup(module, gl)
|
||||
|
||||
gitlab_user_id = group.get_user_id(gitlab_user)
|
||||
gitlab_group_id = group.get_group_id(gitlab_group)
|
||||
|
||||
# group doesn't exist
|
||||
if not gitlab_group_id:
|
||||
module.fail_json(msg="group '%s' not found." % gitlab_group)
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user)
|
||||
else:
|
||||
module.fail_json(msg="user '%s' not found." % gitlab_user)
|
||||
members = []
|
||||
if module.params['gitlab_user'] is not None:
|
||||
gitlab_users_access = []
|
||||
gitlab_users = module.params['gitlab_user']
|
||||
for gl_user in gitlab_users:
|
||||
gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None})
|
||||
elif module.params['gitlab_users_access'] is not None:
|
||||
gitlab_users_access = module.params['gitlab_users_access']
|
||||
for user_level in gitlab_users_access:
|
||||
user_level['access_level'] = access_level_int[user_level['access_level']]
|
||||
|
||||
members = group.get_members_in_a_group(gitlab_group_id)
|
||||
is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the group
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the group
|
||||
if not module.check_mode:
|
||||
group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user)
|
||||
# state as absent
|
||||
else:
|
||||
module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user)
|
||||
# in case that a user is a member
|
||||
if len(gitlab_users_access) == 1 and not purge_users:
|
||||
# only single user given
|
||||
members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))]
|
||||
if members[0] is None:
|
||||
members = []
|
||||
elif len(gitlab_users_access) > 1 or purge_users:
|
||||
# list of users given
|
||||
members = group.get_members_in_a_group(gitlab_group_id)
|
||||
else:
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = group.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == access_level:
|
||||
module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user)
|
||||
module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.",
|
||||
result_data=[])
|
||||
|
||||
changed = False
|
||||
error = False
|
||||
changed_users = []
|
||||
changed_data = []
|
||||
|
||||
for gitlab_user in gitlab_users_access:
|
||||
gitlab_user_id = group.get_user_id(gitlab_user['name'])
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
if not module.check_mode:
|
||||
group.update_user_access_level(members, gitlab_user_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
|
||||
error = True
|
||||
changed_users.append("user '%s' not found." % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "user '%s' not found." % gitlab_user['name']})
|
||||
continue
|
||||
|
||||
is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the group
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the group
|
||||
try:
|
||||
if not module.check_mode:
|
||||
group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully added user '%s' to group" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully added user '%s' to group" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
# state as absent
|
||||
else:
|
||||
changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']})
|
||||
# in case that a user is a member
|
||||
else:
|
||||
# remove the user from the group
|
||||
if not module.check_mode:
|
||||
group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
|
||||
module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user)
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = group.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == gitlab_user['access_level']:
|
||||
changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
try:
|
||||
if not module.check_mode:
|
||||
group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabUpdateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
else:
|
||||
# remove the user from the group
|
||||
try:
|
||||
if not module.check_mode:
|
||||
group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
# if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users
|
||||
if state == 'present' and purge_users:
|
||||
uppercase_names_in_gitlab_users_access = []
|
||||
for name in gitlab_users_access:
|
||||
uppercase_names_in_gitlab_users_access.append(name['name'].upper())
|
||||
|
||||
for member in members:
|
||||
if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access:
|
||||
try:
|
||||
if not module.check_mode:
|
||||
group.remove_user_from_group(member.id, gitlab_group_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username)
|
||||
changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
if len(gitlab_users_access) == 1 and error:
|
||||
# if single user given and an error occurred return error for list errors will be per user
|
||||
module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data)
|
||||
elif error:
|
||||
module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data)
|
||||
|
||||
module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -145,7 +145,16 @@ options:
|
||||
type: str
|
||||
choices: ["never", "always", "default_off", "default_on"]
|
||||
version_added: "3.4.0"
|
||||
|
||||
ci_config_path:
|
||||
description:
|
||||
- Custom path to the CI configuration file for this project.
|
||||
type: str
|
||||
version_added: "3.7.0"
|
||||
shared_runners_enabled:
|
||||
description:
|
||||
- Enable shared runners for this project.
|
||||
type: bool
|
||||
version_added: "3.7.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -252,6 +261,8 @@ class GitLabProject(object):
|
||||
'packages_enabled': options['packages_enabled'],
|
||||
'remove_source_branch_after_merge': options['remove_source_branch_after_merge'],
|
||||
'squash_option': options['squash_option'],
|
||||
'ci_config_path': options['ci_config_path'],
|
||||
'shared_runners_enabled': options['shared_runners_enabled'],
|
||||
}
|
||||
# Because we have already call userExists in main()
|
||||
if self.projectObject is None:
|
||||
@@ -364,6 +375,8 @@ def main():
|
||||
packages_enabled=dict(type='bool'),
|
||||
remove_source_branch_after_merge=dict(type='bool'),
|
||||
squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']),
|
||||
ci_config_path=dict(type='str'),
|
||||
shared_runners_enabled=dict(type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -402,6 +415,8 @@ def main():
|
||||
packages_enabled = module.params['packages_enabled']
|
||||
remove_source_branch_after_merge = module.params['remove_source_branch_after_merge']
|
||||
squash_option = module.params['squash_option']
|
||||
ci_config_path = module.params['ci_config_path']
|
||||
shared_runners_enabled = module.params['shared_runners_enabled']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -466,6 +481,8 @@ def main():
|
||||
"packages_enabled": packages_enabled,
|
||||
"remove_source_branch_after_merge": remove_source_branch_after_merge,
|
||||
"squash_option": squash_option,
|
||||
"ci_config_path": ci_config_path,
|
||||
"shared_runners_enabled": shared_runners_enabled,
|
||||
}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
|
||||
|
||||
@@ -53,15 +53,37 @@ options:
|
||||
type: str
|
||||
gitlab_user:
|
||||
description:
|
||||
- The username of the member to add to/remove from the GitLab project.
|
||||
required: true
|
||||
type: str
|
||||
- A username or a list of usernames to add to/remove from the GitLab project.
|
||||
- Mutually exclusive with I(gitlab_users_access).
|
||||
type: list
|
||||
elements: str
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
gitlab_users_access:
|
||||
description:
|
||||
- Provide a list of user to access level mappings.
|
||||
- Every dictionary in this list specifies a user (by username) and the access level the user should have.
|
||||
- Mutually exclusive with I(gitlab_user) and I(access_level).
|
||||
- Use together with I(purge_users) to remove all users not specified here from the project.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
description: A username or a list of usernames to add to/remove from the GitLab project.
|
||||
type: str
|
||||
required: true
|
||||
access_level:
|
||||
description:
|
||||
- The access level for the user.
|
||||
- Required if I(state=present), user state is set to present.
|
||||
type: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
required: true
|
||||
version_added: 3.7.0
|
||||
state:
|
||||
description:
|
||||
- State of the member in the project.
|
||||
@@ -70,6 +92,15 @@ options:
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
purge_users:
|
||||
description:
|
||||
- Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
|
||||
If omitted do not purge orphaned members.
|
||||
- Is only used when I(state=present).
|
||||
type: list
|
||||
elements: str
|
||||
choices: ['guest', 'reporter', 'developer', 'maintainer']
|
||||
version_added: 3.7.0
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
'''
|
||||
@@ -93,6 +124,51 @@ EXAMPLES = r'''
|
||||
project: projectname
|
||||
gitlab_user: username
|
||||
state: absent
|
||||
|
||||
- name: Add a list of Users to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
gitlab_project: projectname
|
||||
gitlab_user:
|
||||
- user1
|
||||
- user2
|
||||
access_level: developer
|
||||
state: present
|
||||
|
||||
- name: Add a list of Users with Dedicated Access Levels to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: present
|
||||
|
||||
- name: Add a user, remove all others which might be on this access level
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_user: username
|
||||
access_level: developer
|
||||
pruge_users: developer
|
||||
state: present
|
||||
|
||||
- name: Remove a list of Users with Dedicated Access Levels to A GitLab project
|
||||
community.general.gitlab_project_members:
|
||||
api_url: 'https://gitlab.example.com'
|
||||
api_token: 'Your-Private-Token'
|
||||
project: projectname
|
||||
gitlab_users_access:
|
||||
- name: user1
|
||||
access_level: developer
|
||||
- name: user2
|
||||
access_level: maintainer
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
@@ -132,6 +208,17 @@ class GitLabProjectMembers(object):
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
return project.members.list(all=True)
|
||||
|
||||
# get single member in a project by user name
|
||||
def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id):
|
||||
member = None
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
try:
|
||||
member = project.members.get(gitlab_user_id)
|
||||
if member:
|
||||
return member
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
return None
|
||||
|
||||
# check if the user is a member of the project
|
||||
def is_user_a_member(self, members, gitlab_user_id):
|
||||
for member in members:
|
||||
@@ -141,27 +228,14 @@ class GitLabProjectMembers(object):
|
||||
|
||||
# add user to a project
|
||||
def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level):
|
||||
try:
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
add_member = project.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
if add_member:
|
||||
return add_member.username
|
||||
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to add member to the project, project ID %s: %s" % (gitlab_project_id, e))
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
add_member = project.members.create(
|
||||
{'user_id': gitlab_user_id, 'access_level': access_level})
|
||||
|
||||
# remove user from a project
|
||||
def remove_user_from_project(self, gitlab_user_id, gitlab_project_id):
|
||||
try:
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
project.members.delete(gitlab_user_id)
|
||||
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to remove member from GitLab project, ID %s: %s" % (gitlab_project_id, e))
|
||||
project = self._gitlab.projects.get(gitlab_project_id)
|
||||
project.members.delete(gitlab_user_id)
|
||||
|
||||
# get user's access level
|
||||
def get_user_access_level(self, members, gitlab_user_id):
|
||||
@@ -173,12 +247,8 @@ class GitLabProjectMembers(object):
|
||||
def update_user_access_level(self, members, gitlab_user_id, access_level):
|
||||
for member in members:
|
||||
if member.id == gitlab_user_id:
|
||||
try:
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(
|
||||
msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
|
||||
member.access_level = access_level
|
||||
member.save()
|
||||
|
||||
|
||||
def main():
|
||||
@@ -186,9 +256,20 @@ def main():
|
||||
argument_spec.update(dict(
|
||||
api_token=dict(type='str', required=True, no_log=True),
|
||||
project=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='str', required=True),
|
||||
gitlab_user=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer'])
|
||||
access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']),
|
||||
purge_users=dict(type='list', elements='str', choices=[
|
||||
'guest', 'reporter', 'developer', 'maintainer']),
|
||||
gitlab_users_access=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
access_level=dict(type='str', choices=[
|
||||
'guest', 'reporter', 'developer', 'maintainer'], required=True),
|
||||
)
|
||||
),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -196,15 +277,19 @@ def main():
|
||||
mutually_exclusive=[
|
||||
['api_username', 'api_token'],
|
||||
['api_password', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
['access_level', 'gitlab_users_access'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password'],
|
||||
['gitlab_user', 'access_level'],
|
||||
],
|
||||
required_one_of=[
|
||||
['api_username', 'api_token'],
|
||||
['gitlab_user', 'gitlab_users_access'],
|
||||
],
|
||||
required_if=[
|
||||
['state', 'present', ['access_level']],
|
||||
['state', 'present', ['access_level', 'gitlab_users_access'], True],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -212,71 +297,168 @@ def main():
|
||||
if not HAS_PY_GITLAB:
|
||||
module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
|
||||
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS,
|
||||
}
|
||||
|
||||
gitlab_project = module.params['project']
|
||||
gitlab_user = module.params['gitlab_user']
|
||||
state = module.params['state']
|
||||
access_level = module.params['access_level']
|
||||
purge_users = module.params['purge_users']
|
||||
|
||||
# convert access level string input to int
|
||||
if access_level:
|
||||
access_level_int = {
|
||||
'guest': gitlab.GUEST_ACCESS,
|
||||
'reporter': gitlab.REPORTER_ACCESS,
|
||||
'developer': gitlab.DEVELOPER_ACCESS,
|
||||
'maintainer': gitlab.MAINTAINER_ACCESS
|
||||
}
|
||||
|
||||
access_level = access_level_int[access_level]
|
||||
if purge_users:
|
||||
purge_users = [access_level_int[level] for level in purge_users]
|
||||
|
||||
# connect to gitlab server
|
||||
gl = gitlabAuthentication(module)
|
||||
|
||||
project = GitLabProjectMembers(module, gl)
|
||||
|
||||
gitlab_user_id = project.get_user_id(gitlab_user)
|
||||
gitlab_project_id = project.get_project(gitlab_project)
|
||||
|
||||
# project doesn't exist
|
||||
if not gitlab_project_id:
|
||||
module.fail_json(msg="project '%s' not found." % gitlab_project)
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the project" % gitlab_user)
|
||||
else:
|
||||
module.fail_json(msg="user '%s' not found." % gitlab_user)
|
||||
members = []
|
||||
if module.params['gitlab_user'] is not None:
|
||||
gitlab_users_access = []
|
||||
gitlab_users = module.params['gitlab_user']
|
||||
for gl_user in gitlab_users:
|
||||
gitlab_users_access.append(
|
||||
{'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None})
|
||||
elif module.params['gitlab_users_access'] is not None:
|
||||
gitlab_users_access = module.params['gitlab_users_access']
|
||||
for user_level in gitlab_users_access:
|
||||
user_level['access_level'] = access_level_int[user_level['access_level']]
|
||||
|
||||
members = project.get_members_in_a_project(gitlab_project_id)
|
||||
is_user_a_member = project.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the project
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the project
|
||||
if not module.check_mode:
|
||||
project.add_member_to_project(gitlab_user_id, gitlab_project_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully added user '%s' to the project." % gitlab_user)
|
||||
# state as absent
|
||||
else:
|
||||
module.exit_json(changed=False, result="User, '%s', is not a member in the project. No change to report" % gitlab_user)
|
||||
# in case that a user is a member
|
||||
if len(gitlab_users_access) == 1 and not purge_users:
|
||||
# only single user given
|
||||
members = [project.get_member_in_a_project(
|
||||
gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))]
|
||||
if members[0] is None:
|
||||
members = []
|
||||
elif len(gitlab_users_access) > 1 or purge_users:
|
||||
# list of users given
|
||||
members = project.get_members_in_a_project(gitlab_project_id)
|
||||
else:
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = project.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == access_level:
|
||||
module.exit_json(changed=False, result="User, '%s', is already a member in the project. No change to report" % gitlab_user)
|
||||
module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.",
|
||||
result_data=[])
|
||||
|
||||
changed = False
|
||||
error = False
|
||||
changed_users = []
|
||||
changed_data = []
|
||||
|
||||
for gitlab_user in gitlab_users_access:
|
||||
gitlab_user_id = project.get_user_id(gitlab_user['name'])
|
||||
|
||||
# user doesn't exist
|
||||
if not gitlab_user_id:
|
||||
if state == 'absent':
|
||||
changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
if not module.check_mode:
|
||||
project.update_user_access_level(members, gitlab_user_id, access_level)
|
||||
module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
|
||||
error = True
|
||||
changed_users.append("user '%s' not found." % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "user '%s' not found." % gitlab_user['name']})
|
||||
continue
|
||||
|
||||
is_user_a_member = project.is_user_a_member(members, gitlab_user_id)
|
||||
|
||||
# check if the user is a member in the project
|
||||
if not is_user_a_member:
|
||||
if state == 'present':
|
||||
# add user to the project
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully added user '%s' to project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully added user '%s' to project" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
# state as absent
|
||||
else:
|
||||
changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']})
|
||||
# in case that a user is a member
|
||||
else:
|
||||
# remove the user from the project
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(gitlab_user_id, gitlab_project_id)
|
||||
module.exit_json(changed=True, result="Successfully removed user, '%s', from the project" % gitlab_user)
|
||||
if state == 'present':
|
||||
# compare the access level
|
||||
user_access_level = project.get_user_access_level(members, gitlab_user_id)
|
||||
if user_access_level == gitlab_user['access_level']:
|
||||
changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
|
||||
'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']})
|
||||
else:
|
||||
# update the access level for the user
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level'])
|
||||
changed = True
|
||||
changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabUpdateError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)})
|
||||
else:
|
||||
# remove the user from the project
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(gitlab_user_id, gitlab_project_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
# if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users
|
||||
if state == 'present' and purge_users:
|
||||
uppercase_names_in_gitlab_users_access = []
|
||||
for name in gitlab_users_access:
|
||||
uppercase_names_in_gitlab_users_access.append(name['name'].upper())
|
||||
|
||||
for member in members:
|
||||
if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access:
|
||||
try:
|
||||
if not module.check_mode:
|
||||
project.remove_user_from_project(member.id, gitlab_project_id)
|
||||
changed = True
|
||||
changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username)
|
||||
changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED',
|
||||
'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username})
|
||||
except (gitlab.exceptions.GitlabDeleteError) as e:
|
||||
error = True
|
||||
changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
|
||||
changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
|
||||
'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
|
||||
|
||||
if len(gitlab_users_access) == 1 and error:
|
||||
# if single user given and an error occurred return error for list errors will be per user
|
||||
module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data)
|
||||
elif error:
|
||||
module.fail_json(
|
||||
msg='FAILED: At least one given user/permission could not be set', result_data=changed_data)
|
||||
|
||||
module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com)
|
||||
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
|
||||
# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -38,6 +39,11 @@ options:
|
||||
description:
|
||||
- Your private token to interact with the GitLab API.
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- ID or full path of the project in the form of group/name.
|
||||
type: str
|
||||
version_added: '3.7.0'
|
||||
description:
|
||||
description:
|
||||
- The unique name of the runner.
|
||||
@@ -131,6 +137,15 @@ EXAMPLES = '''
|
||||
description: Docker Machine t1
|
||||
owned: yes
|
||||
state: absent
|
||||
|
||||
- name: Register runner for a specific project
|
||||
community.general.gitlab_runner:
|
||||
api_url: https://gitlab.example.com/
|
||||
api_token: "{{ access_token }}"
|
||||
registration_token: 4gfdsg345
|
||||
description: MyProject runner
|
||||
state: present
|
||||
project: mygroup/mysubgroup/myproject
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -181,9 +196,13 @@ except NameError:
|
||||
|
||||
|
||||
class GitLabRunner(object):
|
||||
def __init__(self, module, gitlab_instance):
|
||||
def __init__(self, module, gitlab_instance, project=None):
|
||||
self._module = module
|
||||
self._gitlab = gitlab_instance
|
||||
# Whether to operate on GitLab-instance-wide or project-wide runners
|
||||
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774
|
||||
# for group runner token access
|
||||
self._runners_endpoint = project.runners if project else gitlab_instance.runners
|
||||
self.runnerObject = None
|
||||
|
||||
def createOrUpdateRunner(self, description, options):
|
||||
@@ -230,7 +249,7 @@ class GitLabRunner(object):
|
||||
return True
|
||||
|
||||
try:
|
||||
runner = self._gitlab.runners.create(arguments)
|
||||
runner = self._runners_endpoint.create(arguments)
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
|
||||
|
||||
@@ -265,19 +284,19 @@ class GitLabRunner(object):
|
||||
'''
|
||||
def findRunner(self, description, owned=False):
|
||||
if owned:
|
||||
runners = self._gitlab.runners.list(as_list=False)
|
||||
runners = self._runners_endpoint.list(as_list=False)
|
||||
else:
|
||||
runners = self._gitlab.runners.all(as_list=False)
|
||||
runners = self._runners_endpoint.all(as_list=False)
|
||||
|
||||
for runner in runners:
|
||||
# python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
|
||||
# object, so we need to handle both
|
||||
if hasattr(runner, "description"):
|
||||
if (runner.description == description):
|
||||
return self._gitlab.runners.get(runner.id)
|
||||
return self._runners_endpoint.get(runner.id)
|
||||
else:
|
||||
if (runner['description'] == description):
|
||||
return self._gitlab.runners.get(runner['id'])
|
||||
return self._runners_endpoint.get(runner['id'])
|
||||
|
||||
'''
|
||||
@param description Description of the runner
|
||||
@@ -313,6 +332,7 @@ def main():
|
||||
access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
|
||||
maximum_timeout=dict(type='int', default=3600),
|
||||
registration_token=dict(type='str', no_log=True),
|
||||
project=dict(type='str'),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
))
|
||||
|
||||
@@ -344,13 +364,20 @@ def main():
|
||||
access_level = module.params['access_level']
|
||||
maximum_timeout = module.params['maximum_timeout']
|
||||
registration_token = module.params['registration_token']
|
||||
project = module.params['project']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
|
||||
gitlab_instance = gitlabAuthentication(module)
|
||||
gitlab_project = None
|
||||
if project:
|
||||
try:
|
||||
gitlab_project = gitlab_instance.projects.get(project)
|
||||
except gitlab.exceptions.GitlabGetError as e:
|
||||
module.fail_json(msg='No such a project %s' % project, exception=to_native(e))
|
||||
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance)
|
||||
gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project)
|
||||
runner_exists = gitlab_runner.existsRunner(runner_description, owned)
|
||||
|
||||
if state == 'absent':
|
||||
|
||||
@@ -194,12 +194,16 @@ class Zfs(object):
|
||||
self.module.fail_json(msg=err)
|
||||
|
||||
def set_properties_if_changed(self):
|
||||
diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}}
|
||||
current_properties = self.get_current_properties()
|
||||
for prop, value in self.properties.items():
|
||||
if current_properties.get(prop, None) != value:
|
||||
current_value = current_properties.get(prop, None)
|
||||
if current_value != value:
|
||||
self.set_property(prop, value)
|
||||
diff['before']['extra_zfs_properties'][prop] = current_value
|
||||
diff['after']['extra_zfs_properties'][prop] = value
|
||||
if self.module.check_mode:
|
||||
return
|
||||
return diff
|
||||
updated_properties = self.get_current_properties()
|
||||
for prop in self.properties:
|
||||
value = updated_properties.get(prop, None)
|
||||
@@ -207,6 +211,9 @@ class Zfs(object):
|
||||
self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop)
|
||||
if current_properties.get(prop, None) != value:
|
||||
self.changed = True
|
||||
if prop in diff['after']['extra_zfs_properties']:
|
||||
diff['after']['extra_zfs_properties'][prop] = value
|
||||
return diff
|
||||
|
||||
def get_current_properties(self):
|
||||
cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"]
|
||||
@@ -220,7 +227,7 @@ class Zfs(object):
|
||||
# include source '-' so that creation-only properties are not removed
|
||||
# to avoids errors when the dataset already exists and the property is not changed
|
||||
# this scenario is most likely when the same playbook is run more than once
|
||||
if source == 'local' or source == '-':
|
||||
if source == 'local' or source == 'received' or source == '-':
|
||||
properties[prop] = value
|
||||
# Add alias for enhanced sharing properties
|
||||
if self.enhanced_sharing:
|
||||
@@ -266,13 +273,20 @@ def main():
|
||||
|
||||
if state == 'present':
|
||||
if zfs.exists():
|
||||
zfs.set_properties_if_changed()
|
||||
result['diff'] = zfs.set_properties_if_changed()
|
||||
else:
|
||||
zfs.create()
|
||||
result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}}
|
||||
|
||||
elif state == 'absent':
|
||||
if zfs.exists():
|
||||
zfs.destroy()
|
||||
result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}}
|
||||
else:
|
||||
result['diff'] = {}
|
||||
|
||||
result['diff']['before_header'] = name
|
||||
result['diff']['after_header'] = name
|
||||
|
||||
result.update(zfs.properties)
|
||||
result['changed'] = zfs.changed
|
||||
|
||||
@@ -125,23 +125,16 @@ class ZPoolFacts(object):
|
||||
def __init__(self, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
self.name = module.params['name']
|
||||
self.parsable = module.params['parsable']
|
||||
self.properties = module.params['properties']
|
||||
|
||||
self._pools = defaultdict(dict)
|
||||
self.facts = []
|
||||
|
||||
def pool_exists(self):
|
||||
cmd = [self.module.get_bin_path('zpool'), 'list', self.name]
|
||||
|
||||
(rc, out, err) = self.module.run_command(cmd)
|
||||
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
rc, dummy, dummy = self.module.run_command(cmd)
|
||||
return rc == 0
|
||||
|
||||
def get_facts(self):
|
||||
cmd = [self.module.get_bin_path('zpool'), 'get', '-H']
|
||||
@@ -153,41 +146,36 @@ class ZPoolFacts(object):
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
(rc, out, err) = self.module.run_command(cmd)
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if rc == 0:
|
||||
for line in out.splitlines():
|
||||
pool, property, value = line.split('\t')
|
||||
for line in out.splitlines():
|
||||
pool, prop, value = line.split('\t')
|
||||
|
||||
self._pools[pool].update({property: value})
|
||||
self._pools[pool].update({prop: value})
|
||||
|
||||
for k, v in iteritems(self._pools):
|
||||
v.update({'name': k})
|
||||
self.facts.append(v)
|
||||
for k, v in iteritems(self._pools):
|
||||
v.update({'name': k})
|
||||
self.facts.append(v)
|
||||
|
||||
return {'ansible_zfs_pools': self.facts}
|
||||
else:
|
||||
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
return {'ansible_zfs_pools': self.facts}
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
|
||||
parsable=dict(required=False, default=False, type='bool'),
|
||||
properties=dict(required=False, default='all', type='str'),
|
||||
name=dict(aliases=['pool', 'zpool'], type='str'),
|
||||
parsable=dict(default=False, type='bool'),
|
||||
properties=dict(default='all', type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
zpool_facts = ZPoolFacts(module)
|
||||
|
||||
result = {}
|
||||
result['changed'] = False
|
||||
result['name'] = zpool_facts.name
|
||||
|
||||
result = {
|
||||
'changed': False,
|
||||
'name': zpool_facts.name,
|
||||
}
|
||||
if zpool_facts.parsable:
|
||||
result['parsable'] = zpool_facts.parsable
|
||||
|
||||
|
||||
@@ -148,57 +148,48 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
|
||||
|
||||
def lineDict(line):
|
||||
def line_dict(line):
|
||||
return {'line': line, 'line_type': 'unknown'}
|
||||
|
||||
|
||||
def optionDict(line, iface, option, value, address_family):
|
||||
def make_option_dict(line, iface, option, value, address_family):
|
||||
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
|
||||
|
||||
|
||||
def getValueFromLine(s):
|
||||
spaceRe = re.compile(r'\s+')
|
||||
for m in spaceRe.finditer(s):
|
||||
pass
|
||||
valueEnd = m.start()
|
||||
option = s.split()[0]
|
||||
optionStart = s.find(option)
|
||||
optionLen = len(option)
|
||||
valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
|
||||
return s[valueStart:valueEnd]
|
||||
def get_option_value(line):
|
||||
patt = re.compile(r'^\s+(?P<option>\S+)\s+(?P<value>\S?.*\S)\s*$')
|
||||
match = patt.match(line)
|
||||
if not match:
|
||||
return None, None
|
||||
return match.group("option"), match.group("value")
|
||||
|
||||
|
||||
def read_interfaces_file(module, filename):
|
||||
f = open(filename, 'r')
|
||||
return read_interfaces_lines(module, f)
|
||||
with open(filename, 'r') as f:
|
||||
return read_interfaces_lines(module, f)
|
||||
|
||||
|
||||
def _is_line_processing_none(first_word):
|
||||
return first_word in ("source", "source-dir", "source-directory", "auto", "no-auto-down", "no-scripts") or first_word.startswith("auto-")
|
||||
|
||||
|
||||
def read_interfaces_lines(module, line_strings):
|
||||
lines = []
|
||||
ifaces = {}
|
||||
iface_name = None
|
||||
address_family = None
|
||||
currif = {}
|
||||
currently_processing = None
|
||||
i = 0
|
||||
for line in line_strings:
|
||||
i += 1
|
||||
for i, line in enumerate(line_strings):
|
||||
words = line.split()
|
||||
if len(words) < 1:
|
||||
lines.append(lineDict(line))
|
||||
continue
|
||||
if words[0][0] == "#":
|
||||
lines.append(lineDict(line))
|
||||
if not words or words[0].startswith("#"):
|
||||
lines.append(line_dict(line))
|
||||
continue
|
||||
if words[0] == "mapping":
|
||||
# currmap = calloc(1, sizeof *currmap);
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
currently_processing = "MAPPING"
|
||||
elif words[0] == "source":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-dir":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "source-directory":
|
||||
lines.append(lineDict(line))
|
||||
elif _is_line_processing_none(words[0]):
|
||||
lines.append(line_dict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "iface":
|
||||
currif = {
|
||||
@@ -221,39 +212,26 @@ def read_interfaces_lines(module, line_strings):
|
||||
ifaces[iface_name] = currif
|
||||
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
|
||||
currently_processing = "IFACE"
|
||||
elif words[0] == "auto":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0].startswith("allow-"):
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-auto-down":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
elif words[0] == "no-scripts":
|
||||
lines.append(lineDict(line))
|
||||
currently_processing = "NONE"
|
||||
else:
|
||||
if currently_processing == "IFACE":
|
||||
option_name = words[0]
|
||||
option_name, value = get_option_value(line)
|
||||
# TODO: if option_name in currif.options
|
||||
value = getValueFromLine(line)
|
||||
lines.append(optionDict(line, iface_name, option_name, value, address_family))
|
||||
lines.append(make_option_dict(line, iface_name, option_name, value, address_family))
|
||||
if option_name in ["pre-up", "up", "down", "post-up"]:
|
||||
currif[option_name].append(value)
|
||||
else:
|
||||
currif[option_name] = value
|
||||
elif currently_processing == "MAPPING":
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
elif currently_processing == "NONE":
|
||||
lines.append(lineDict(line))
|
||||
lines.append(line_dict(line))
|
||||
else:
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
|
||||
return None, None
|
||||
module.fail_json(msg="misplaced option %s in line %d" % (line, i + 1))
|
||||
|
||||
return lines, ifaces
|
||||
|
||||
|
||||
def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
|
||||
def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None):
|
||||
value = str(raw_value)
|
||||
changed = False
|
||||
|
||||
@@ -262,57 +240,54 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state, address_f
|
||||
iface_lines = [item for item in iface_lines
|
||||
if "address_family" in item and item["address_family"] == address_family]
|
||||
|
||||
if len(iface_lines) < 1:
|
||||
if not iface_lines:
|
||||
# interface not found
|
||||
module.fail_json(msg="Error: interface %s not found" % iface)
|
||||
return changed, None
|
||||
|
||||
iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
|
||||
target_options = list(filter(lambda i: i['option'] == option, iface_options))
|
||||
iface_options = [il for il in iface_lines if il['line_type'] == 'option']
|
||||
target_options = [io for io in iface_options if io['option'] == option]
|
||||
|
||||
if state == "present":
|
||||
if len(target_options) < 1:
|
||||
changed = True
|
||||
if not target_options:
|
||||
# add new option
|
||||
last_line_dict = iface_lines[-1]
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
else:
|
||||
if option in ["pre-up", "up", "down", "post-up"]:
|
||||
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
|
||||
changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
else:
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
address_family = target_option['address_family']
|
||||
prefix_start = old_line.find(option)
|
||||
optionLen = len(option)
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
|
||||
start = old_value_position.start() + prefix_start + optionLen
|
||||
end = old_value_position.end() + prefix_start + optionLen
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = optionDict(line, iface, option, value, address_family)
|
||||
elif state == "absent":
|
||||
if len(target_options) >= 1:
|
||||
return add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family)
|
||||
|
||||
if option in ["pre-up", "up", "down", "post-up"] and all(ito for ito in target_options if ito['value'] != value):
|
||||
return add_option_after_line(option, value, iface, lines, target_options[-1], iface_options, address_family)
|
||||
|
||||
# if more than one option found edit the last one
|
||||
if target_options[-1]['value'] != value:
|
||||
changed = True
|
||||
target_option = target_options[-1]
|
||||
old_line = target_option['line']
|
||||
old_value = target_option['value']
|
||||
address_family = target_option['address_family']
|
||||
prefix_start = old_line.find(option)
|
||||
option_len = len(option)
|
||||
old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + option_len:])
|
||||
start = old_value_position.start() + prefix_start + option_len
|
||||
end = old_value_position.end() + prefix_start + option_len
|
||||
line = old_line[:start] + value + old_line[end:]
|
||||
index = len(lines) - lines[::-1].index(target_option) - 1
|
||||
lines[index] = make_option_dict(line, iface, option, value, address_family)
|
||||
return changed, lines
|
||||
|
||||
if state == "absent":
|
||||
if target_options:
|
||||
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
|
||||
for target_option in filter(lambda i: i['value'] == value, target_options):
|
||||
for target_option in [ito for ito in target_options if ito['value'] == value]:
|
||||
changed = True
|
||||
lines = list(filter(lambda ln: ln != target_option, lines))
|
||||
lines = [ln for ln in lines if ln != target_option]
|
||||
else:
|
||||
changed = True
|
||||
for target_option in target_options:
|
||||
lines = list(filter(lambda ln: ln != target_option, lines))
|
||||
else:
|
||||
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
||||
lines = [ln for ln in lines if ln != target_option]
|
||||
|
||||
return changed, lines
|
||||
|
||||
|
||||
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
def add_option_after_line(option, value, iface, lines, last_line_dict, iface_options, address_family):
|
||||
# Changing method of interface is not an addition
|
||||
if option == 'method':
|
||||
changed = False
|
||||
@@ -328,23 +303,21 @@ def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_option
|
||||
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
|
||||
prefix = last_line[:prefix_start]
|
||||
|
||||
if len(iface_options) < 1:
|
||||
if not iface_options:
|
||||
# interface has no options, ident
|
||||
prefix += " "
|
||||
|
||||
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
|
||||
option_dict = optionDict(line, iface, option, value, address_family)
|
||||
option_dict = make_option_dict(line, iface, option, value, address_family)
|
||||
index = len(lines) - lines[::-1].index(last_line_dict)
|
||||
lines.insert(index, option_dict)
|
||||
return True, lines
|
||||
|
||||
|
||||
def write_changes(module, lines, dest):
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
f = os.fdopen(tmpfd, 'wb')
|
||||
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
|
||||
f.close()
|
||||
with os.fdopen(tmpfd, 'wb') as f:
|
||||
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
|
||||
module.atomic_move(tmpfile, os.path.realpath(dest))
|
||||
|
||||
|
||||
@@ -382,7 +355,7 @@ def main():
|
||||
changed = False
|
||||
|
||||
if option is not None:
|
||||
changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
|
||||
changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family)
|
||||
|
||||
if changed:
|
||||
dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Alexei Znamensky (@russoz) <russoz@gmail.com>
|
||||
# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -32,6 +33,7 @@ options:
|
||||
description:
|
||||
- If specified, use this blacklist file instead of
|
||||
C(/etc/modprobe.d/blacklist-ansible.conf).
|
||||
default: /etc/modprobe.d/blacklist-ansible.conf
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -43,110 +45,73 @@ EXAMPLES = '''
|
||||
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
|
||||
|
||||
|
||||
class Blacklist(object):
|
||||
def __init__(self, module, filename, checkmode):
|
||||
self.filename = filename
|
||||
self.module = module
|
||||
self.checkmode = checkmode
|
||||
|
||||
def create_file(self):
|
||||
if not self.checkmode and not os.path.exists(self.filename):
|
||||
open(self.filename, 'a').close()
|
||||
return True
|
||||
elif self.checkmode and not os.path.exists(self.filename):
|
||||
self.filename = os.devnull
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_pattern(self):
|
||||
return r'^blacklist\s*' + self.module + '$'
|
||||
|
||||
def readlines(self):
|
||||
f = open(self.filename, 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
return lines
|
||||
|
||||
def module_listed(self):
|
||||
lines = self.readlines()
|
||||
pattern = self.get_pattern()
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('#'):
|
||||
continue
|
||||
|
||||
if re.match(pattern, stripped):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def remove_module(self):
|
||||
lines = self.readlines()
|
||||
pattern = self.get_pattern()
|
||||
|
||||
if self.checkmode:
|
||||
f = open(os.devnull, 'w')
|
||||
else:
|
||||
f = open(self.filename, 'w')
|
||||
|
||||
for line in lines:
|
||||
if not re.match(pattern, line.strip()):
|
||||
f.write(line)
|
||||
|
||||
f.close()
|
||||
|
||||
def add_module(self):
|
||||
if self.checkmode:
|
||||
f = open(os.devnull, 'a')
|
||||
else:
|
||||
f = open(self.filename, 'a')
|
||||
|
||||
f.write('blacklist %s\n' % self.module)
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
class Blacklist(StateModuleHelper):
|
||||
output_params = ('name', 'state')
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
blacklist_file=dict(type='str')
|
||||
blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
args = dict(changed=False, failed=False,
|
||||
name=module.params['name'], state=module.params['state'])
|
||||
def __init_module__(self):
|
||||
self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
|
||||
self.vars.filename = self.vars.blacklist_file
|
||||
self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)
|
||||
if not self.vars.file_exists:
|
||||
with open(self.vars.filename, 'a'):
|
||||
pass
|
||||
self.vars.file_exists = True
|
||||
self.vars.set('lines', [], change=True, diff=True)
|
||||
else:
|
||||
with open(self.vars.filename) as fd:
|
||||
self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)
|
||||
self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)
|
||||
|
||||
filename = '/etc/modprobe.d/blacklist-ansible.conf'
|
||||
def _is_module_blocked(self):
|
||||
for line in self.vars.lines:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('#'):
|
||||
continue
|
||||
if self.pattern.match(stripped):
|
||||
return True
|
||||
return False
|
||||
|
||||
if module.params['blacklist_file']:
|
||||
filename = module.params['blacklist_file']
|
||||
def state_absent(self):
|
||||
if not self.vars.is_blacklisted:
|
||||
return
|
||||
self.vars.is_blacklisted = False
|
||||
self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]
|
||||
|
||||
blacklist = Blacklist(args['name'], filename, module.check_mode)
|
||||
def state_present(self):
|
||||
if self.vars.is_blacklisted:
|
||||
return
|
||||
self.vars.is_blacklisted = True
|
||||
self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]
|
||||
|
||||
if blacklist.create_file():
|
||||
args['changed'] = True
|
||||
else:
|
||||
args['changed'] = False
|
||||
def __quit_module__(self):
|
||||
if self.has_changed() and not self.module.check_mode:
|
||||
dummy, tmpfile = tempfile.mkstemp()
|
||||
try:
|
||||
os.remove(tmpfile)
|
||||
self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
|
||||
with open(tmpfile, 'w') as fd:
|
||||
fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
|
||||
self.module.atomic_move(tmpfile, self.vars.filename)
|
||||
finally:
|
||||
if os.path.exists(tmpfile):
|
||||
os.remove(tmpfile)
|
||||
|
||||
if blacklist.module_listed():
|
||||
if args['state'] == 'absent':
|
||||
blacklist.remove_module()
|
||||
args['changed'] = True
|
||||
else:
|
||||
if args['state'] == 'present':
|
||||
blacklist.add_module()
|
||||
args['changed'] = True
|
||||
|
||||
module.exit_json(**args)
|
||||
def main():
|
||||
Blacklist.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -141,14 +141,14 @@ class Plist:
|
||||
self.__changed = False
|
||||
self.__service = service
|
||||
|
||||
state, pid, dummy, dummy = LaunchCtlList(module, service).run()
|
||||
state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run()
|
||||
|
||||
# Check if readPlist is available or not
|
||||
self.old_plistlib = hasattr(plistlib, 'readPlist')
|
||||
|
||||
self.__file = self.__find_service_plist(service)
|
||||
self.__file = self.__find_service_plist(self.__service)
|
||||
if self.__file is None:
|
||||
msg = 'Unable to infer the path of %s service plist file' % service
|
||||
msg = 'Unable to infer the path of %s service plist file' % self.__service
|
||||
if pid is None and state == ServiceState.UNLOADED:
|
||||
msg += ' and it was not found among active services'
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
@@ -41,17 +41,27 @@ options:
|
||||
aliases: [ state ]
|
||||
node_auth:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.authmethod).
|
||||
- The value for C(node.session.auth.authmethod).
|
||||
type: str
|
||||
default: CHAP
|
||||
node_user:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.username).
|
||||
- The value for C(node.session.auth.username).
|
||||
type: str
|
||||
node_pass:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.password).
|
||||
- The value for C(node.session.auth.password).
|
||||
type: str
|
||||
node_user_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.username_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
node_pass_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.password_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
auto_node_startup:
|
||||
description:
|
||||
- Whether the target node should be automatically connected at startup.
|
||||
@@ -125,6 +135,7 @@ import time
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
ISCSIADM = 'iscsiadm'
|
||||
iscsiadm_cmd = None
|
||||
|
||||
|
||||
def compare_nodelists(l1, l2):
|
||||
@@ -134,12 +145,12 @@ def compare_nodelists(l1, l2):
|
||||
|
||||
|
||||
def iscsi_get_cached_nodes(module, portal=None):
|
||||
cmd = '%s --mode node' % iscsiadm_cmd
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node']
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
nodes = []
|
||||
if rc == 0:
|
||||
lines = out.splitlines()
|
||||
nodes = []
|
||||
for line in lines:
|
||||
# line format is "ip:port,target_portal_group_tag targetname"
|
||||
parts = line.split()
|
||||
@@ -156,7 +167,7 @@ def iscsi_get_cached_nodes(module, portal=None):
|
||||
# for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
|
||||
# err can contain [N|n]o records...
|
||||
elif rc == 21 or (rc == 255 and "o records found" in err):
|
||||
nodes = []
|
||||
pass
|
||||
else:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
|
||||
@@ -164,16 +175,13 @@ def iscsi_get_cached_nodes(module, portal=None):
|
||||
|
||||
|
||||
def iscsi_discover(module, portal, port):
|
||||
cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)]
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def target_loggedon(module, target, portal=None, port=None):
|
||||
cmd = '%s --mode session' % iscsiadm_cmd
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'session']
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
if portal is None:
|
||||
portal = ""
|
||||
@@ -193,36 +201,38 @@ def target_login(module, target, portal=None, port=None):
|
||||
node_auth = module.params['node_auth']
|
||||
node_user = module.params['node_user']
|
||||
node_pass = module.params['node_pass']
|
||||
node_user_in = module.params['node_user_in']
|
||||
node_pass_in = module.params['node_pass_in']
|
||||
|
||||
if node_user:
|
||||
params = [('node.session.auth.authmethod', node_auth),
|
||||
('node.session.auth.username', node_user),
|
||||
('node.session.auth.password', node_pass)]
|
||||
for (name, value) in params:
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value]
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
if node_user_in:
|
||||
params = [('node.session.auth.username_in', node_user_in),
|
||||
('node.session.auth.password_in', node_pass_in)]
|
||||
for (name, value) in params:
|
||||
cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login']
|
||||
if portal is not None and port is not None:
|
||||
cmd += ' --portal %s:%s' % (portal, port)
|
||||
cmd.append('--portal')
|
||||
cmd.append('%s:%s' % (portal, port))
|
||||
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def target_logout(module, target):
|
||||
cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout']
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def target_device_node(module, target):
|
||||
def target_device_node(target):
|
||||
# if anyone know a better way to find out which devicenodes get created for
|
||||
# a given target...
|
||||
|
||||
@@ -239,51 +249,39 @@ def target_device_node(module, target):
|
||||
|
||||
|
||||
def target_isauto(module, target, portal=None, port=None):
|
||||
cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target]
|
||||
|
||||
if portal is not None:
|
||||
if port is not None:
|
||||
portal = '%s:%s' % (portal, port)
|
||||
cmd = '%s --portal %s' % (cmd, portal)
|
||||
if portal is not None and port is not None:
|
||||
cmd.append('--portal')
|
||||
cmd.append('%s:%s' % (portal, port))
|
||||
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
dummy, out, dummy = module.run_command(cmd, check_rc=True)
|
||||
|
||||
if rc == 0:
|
||||
lines = out.splitlines()
|
||||
for line in lines:
|
||||
if 'node.startup' in line:
|
||||
return 'automatic' in line
|
||||
return False
|
||||
else:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
lines = out.splitlines()
|
||||
for line in lines:
|
||||
if 'node.startup' in line:
|
||||
return 'automatic' in line
|
||||
return False
|
||||
|
||||
|
||||
def target_setauto(module, target, portal=None, port=None):
|
||||
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic']
|
||||
|
||||
if portal is not None:
|
||||
if port is not None:
|
||||
portal = '%s:%s' % (portal, port)
|
||||
cmd = '%s --portal %s' % (cmd, portal)
|
||||
if portal is not None and port is not None:
|
||||
cmd.append('--portal')
|
||||
cmd.append('%s:%s' % (portal, port))
|
||||
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def target_setmanual(module, target, portal=None, port=None):
|
||||
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual']
|
||||
|
||||
if portal is not None:
|
||||
if port is not None:
|
||||
portal = '%s:%s' % (portal, port)
|
||||
cmd = '%s --portal %s' % (cmd, portal)
|
||||
if portal is not None and port is not None:
|
||||
cmd.append('--portal')
|
||||
cmd.append('%s:%s' % (portal, port))
|
||||
|
||||
(rc, out, err) = module.run_command(cmd)
|
||||
|
||||
if rc > 0:
|
||||
module.fail_json(cmd=cmd, rc=rc, msg=err)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -298,6 +296,8 @@ def main():
|
||||
node_auth=dict(type='str', default='CHAP'),
|
||||
node_user=dict(type='str'),
|
||||
node_pass=dict(type='str', no_log=True),
|
||||
node_user_in=dict(type='str'),
|
||||
node_pass_in=dict(type='str', no_log=True),
|
||||
|
||||
# actions
|
||||
login=dict(type='bool', aliases=['state']),
|
||||
@@ -307,7 +307,8 @@ def main():
|
||||
show_nodes=dict(type='bool', default=False),
|
||||
),
|
||||
|
||||
required_together=[['node_user', 'node_pass']],
|
||||
required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']],
|
||||
required_if=[('discover', True, ['portal'])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
@@ -335,13 +336,10 @@ def main():
|
||||
cached = iscsi_get_cached_nodes(module, portal)
|
||||
|
||||
# return json dict
|
||||
result = {}
|
||||
result['changed'] = False
|
||||
result = {'changed': False}
|
||||
|
||||
if discover:
|
||||
if portal is None:
|
||||
module.fail_json(msg="Need to specify at least the portal (ip) to discover")
|
||||
elif check:
|
||||
if check:
|
||||
nodes = cached
|
||||
else:
|
||||
iscsi_discover(module, portal, port)
|
||||
@@ -376,13 +374,13 @@ def main():
|
||||
if (login and loggedon) or (not login and not loggedon):
|
||||
result['changed'] |= False
|
||||
if login:
|
||||
result['devicenodes'] = target_device_node(module, target)
|
||||
result['devicenodes'] = target_device_node(target)
|
||||
elif not check:
|
||||
if login:
|
||||
target_login(module, target, portal, port)
|
||||
# give udev some time
|
||||
time.sleep(1)
|
||||
result['devicenodes'] = target_device_node(module, target)
|
||||
result['devicenodes'] = target_device_node(target)
|
||||
else:
|
||||
target_logout(module, target)
|
||||
result['changed'] |= True
|
||||
|
||||
@@ -70,9 +70,7 @@ RETURN = '''
|
||||
'''
|
||||
|
||||
import os
|
||||
import glob
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
|
||||
module = None
|
||||
init_script = None
|
||||
@@ -81,15 +79,12 @@ init_script = None
|
||||
# ===============================
|
||||
# Check if service is enabled
|
||||
def is_enabled():
|
||||
(rc, out, err) = module.run_command("%s enabled" % init_script)
|
||||
if rc == 0:
|
||||
return True
|
||||
return False
|
||||
rc, dummy, dummy = module.run_command([init_script, 'enabled'])
|
||||
return rc == 0
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Main control flow
|
||||
|
||||
def main():
|
||||
global module, init_script
|
||||
# init
|
||||
@@ -98,22 +93,19 @@ def main():
|
||||
name=dict(required=True, type='str', aliases=['service']),
|
||||
state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
|
||||
enabled=dict(type='bool'),
|
||||
pattern=dict(type='str', required=False, default=None),
|
||||
pattern=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled']],
|
||||
required_one_of=[('state', 'enabled')],
|
||||
)
|
||||
|
||||
# initialize
|
||||
service = module.params['name']
|
||||
init_script = '/etc/init.d/' + service
|
||||
rc = 0
|
||||
out = err = ''
|
||||
result = {
|
||||
'name': service,
|
||||
'changed': False,
|
||||
}
|
||||
|
||||
# check if service exists
|
||||
if not os.path.exists(init_script):
|
||||
module.fail_json(msg='service %s does not exist' % service)
|
||||
@@ -129,13 +121,10 @@ def main():
|
||||
# Change enable/disable if needed
|
||||
if enabled != module.params['enabled']:
|
||||
result['changed'] = True
|
||||
if module.params['enabled']:
|
||||
action = 'enable'
|
||||
else:
|
||||
action = 'disable'
|
||||
action = 'enable' if module.params['enabled'] else 'disable'
|
||||
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
|
||||
rc, dummy, err = module.run_command([init_script, action])
|
||||
# openwrt init scripts can return a non-zero exit code on a successful 'enable'
|
||||
# command if the init script doesn't contain a STOP value, so we ignore the exit
|
||||
# code and explicitly check if the service is now in the desired state
|
||||
@@ -153,17 +142,13 @@ def main():
|
||||
psbin = module.get_bin_path('ps', True)
|
||||
|
||||
# this should be busybox ps, so we only want/need to the 'w' option
|
||||
(rc, psout, pserr) = module.run_command('%s w' % psbin)
|
||||
rc, psout, dummy = module.run_command([psbin, 'w'])
|
||||
# If rc is 0, set running as appropriate
|
||||
if rc == 0:
|
||||
lines = psout.split("\n")
|
||||
for line in lines:
|
||||
if module.params['pattern'] in line and "pattern=" not in line:
|
||||
# so as to not confuse ./hacking/test-module.py
|
||||
running = True
|
||||
break
|
||||
running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines)
|
||||
else:
|
||||
(rc, out, err) = module.run_command("%s running" % init_script)
|
||||
rc, dummy, dummy = module.run_command([init_script, 'running'])
|
||||
if rc == 0:
|
||||
running = True
|
||||
|
||||
@@ -187,7 +172,7 @@ def main():
|
||||
|
||||
if action:
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
|
||||
rc, dummy, err = module.run_command([init_script, action])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
|
||||
|
||||
|
||||
@@ -274,8 +274,7 @@ RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s
|
||||
(?P<control>\[.*\]|\S*)\s+
|
||||
(?P<path>\S*)\s*
|
||||
(?P<args>.*)\s*""", re.X)
|
||||
|
||||
RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
|
||||
RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)")
|
||||
|
||||
VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
|
||||
|
||||
@@ -358,11 +357,9 @@ class PamdRule(PamdLine):
|
||||
|
||||
# Method to check if a rule matches the type, control and path.
|
||||
def matches(self, rule_type, rule_control, rule_path, rule_args=None):
|
||||
if (rule_type == self.rule_type and
|
||||
return (rule_type == self.rule_type and
|
||||
rule_control == self.rule_control and
|
||||
rule_path == self.rule_path):
|
||||
return True
|
||||
return False
|
||||
rule_path == self.rule_path)
|
||||
|
||||
@classmethod
|
||||
def rule_from_string(cls, line):
|
||||
@@ -507,25 +504,25 @@ class PamdService(object):
|
||||
# Get a list of rules we want to change
|
||||
rules_to_find = self.get(rule_type, rule_control, rule_path)
|
||||
|
||||
new_args = parse_module_arguments(new_args)
|
||||
new_args = parse_module_arguments(new_args, return_none=True)
|
||||
|
||||
changes = 0
|
||||
for current_rule in rules_to_find:
|
||||
rule_changed = False
|
||||
if new_type:
|
||||
if(current_rule.rule_type != new_type):
|
||||
if current_rule.rule_type != new_type:
|
||||
rule_changed = True
|
||||
current_rule.rule_type = new_type
|
||||
if new_control:
|
||||
if(current_rule.rule_control != new_control):
|
||||
if current_rule.rule_control != new_control:
|
||||
rule_changed = True
|
||||
current_rule.rule_control = new_control
|
||||
if new_path:
|
||||
if(current_rule.rule_path != new_path):
|
||||
if current_rule.rule_path != new_path:
|
||||
rule_changed = True
|
||||
current_rule.rule_path = new_path
|
||||
if new_args:
|
||||
if(current_rule.rule_args != new_args):
|
||||
if new_args is not None:
|
||||
if current_rule.rule_args != new_args:
|
||||
rule_changed = True
|
||||
current_rule.rule_args = new_args
|
||||
|
||||
@@ -724,8 +721,9 @@ class PamdService(object):
|
||||
current_line = self._head
|
||||
|
||||
while current_line is not None:
|
||||
if not current_line.validate()[0]:
|
||||
return current_line.validate()
|
||||
curr_validate = current_line.validate()
|
||||
if not curr_validate[0]:
|
||||
return curr_validate
|
||||
current_line = current_line.next
|
||||
return True, "Module is valid"
|
||||
|
||||
@@ -750,22 +748,25 @@ class PamdService(object):
|
||||
return '\n'.join(lines) + '\n'
|
||||
|
||||
|
||||
def parse_module_arguments(module_arguments):
|
||||
# Return empty list if we have no args to parse
|
||||
if not module_arguments:
|
||||
return []
|
||||
elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
|
||||
def parse_module_arguments(module_arguments, return_none=False):
|
||||
# If args is None, return empty list by default.
|
||||
# But if return_none is True, then return None
|
||||
if module_arguments is None:
|
||||
return None if return_none else []
|
||||
if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
|
||||
return []
|
||||
|
||||
if not isinstance(module_arguments, list):
|
||||
module_arguments = [module_arguments]
|
||||
|
||||
parsed_args = list()
|
||||
# From this point on, module_arguments is guaranteed to be a list, empty or not
|
||||
parsed_args = []
|
||||
|
||||
re_clear_spaces = re.compile(r"\s*=\s*")
|
||||
for arg in module_arguments:
|
||||
for item in filter(None, RULE_ARG_REGEX.findall(arg)):
|
||||
if not item.startswith("["):
|
||||
re.sub("\\s*=\\s*", "=", item)
|
||||
re_clear_spaces.sub("=", item)
|
||||
parsed_args.append(item)
|
||||
|
||||
return parsed_args
|
||||
@@ -861,8 +862,7 @@ def main():
|
||||
fd.write(str(service))
|
||||
|
||||
except IOError:
|
||||
module.fail_json(msg='Unable to create temporary \
|
||||
file %s' % temp_file)
|
||||
module.fail_json(msg='Unable to create temporary file %s' % temp_file)
|
||||
|
||||
module.atomic_move(temp_file.name, os.path.realpath(fname))
|
||||
|
||||
|
||||
@@ -54,9 +54,12 @@ pids:
|
||||
sample: [100,200]
|
||||
'''
|
||||
|
||||
import abc
|
||||
import re
|
||||
from distutils.version import LooseVersion
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils import six
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
@@ -68,6 +71,100 @@ except ImportError:
|
||||
HAS_PSUTIL = False
|
||||
|
||||
|
||||
class PSAdapterError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PSAdapter(object):
|
||||
NAME_ATTRS = ('name', 'cmdline')
|
||||
PATTERN_ATTRS = ('name', 'exe', 'cmdline')
|
||||
|
||||
def __init__(self, psutil):
|
||||
self._psutil = psutil
|
||||
|
||||
@staticmethod
|
||||
def from_package(psutil):
|
||||
version = LooseVersion(psutil.__version__)
|
||||
if version < LooseVersion('2.0.0'):
|
||||
return PSAdapter100(psutil)
|
||||
elif version < LooseVersion('5.3.0'):
|
||||
return PSAdapter200(psutil)
|
||||
else:
|
||||
return PSAdapter530(psutil)
|
||||
|
||||
def get_pids_by_name(self, name):
|
||||
return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)]
|
||||
|
||||
def _process_iter(self, *attrs):
|
||||
return self._psutil.process_iter()
|
||||
|
||||
def _has_name(self, proc, name):
|
||||
attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS)
|
||||
return (compare_lower(attributes['name'], name) or
|
||||
attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name))
|
||||
|
||||
def _get_proc_attributes(self, proc, *attributes):
|
||||
return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes)
|
||||
|
||||
@staticmethod
|
||||
@abc.abstractmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
pass
|
||||
|
||||
def get_pids_by_pattern(self, pattern, ignore_case):
|
||||
flags = 0
|
||||
if ignore_case:
|
||||
flags |= re.I
|
||||
|
||||
try:
|
||||
regex = re.compile(pattern, flags)
|
||||
except re.error as e:
|
||||
raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e)))
|
||||
|
||||
return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)]
|
||||
|
||||
def _matches_regex(self, proc, regex):
|
||||
# See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information
|
||||
attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS)
|
||||
matches_name = regex.search(to_native(attributes['name']))
|
||||
matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe'])))
|
||||
matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline'])))
|
||||
|
||||
return any([matches_name, matches_exe, matches_cmd])
|
||||
|
||||
|
||||
class PSAdapter100(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter100, self).__init__(psutil)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
return getattr(proc, attribute)
|
||||
|
||||
|
||||
class PSAdapter200(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter200, self).__init__(psutil)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
method = getattr(proc, attribute)
|
||||
return method()
|
||||
|
||||
|
||||
class PSAdapter530(PSAdapter):
|
||||
def __init__(self, psutil):
|
||||
super(PSAdapter530, self).__init__(psutil)
|
||||
|
||||
def _process_iter(self, *attrs):
|
||||
return self._psutil.process_iter(attrs=attrs)
|
||||
|
||||
@staticmethod
|
||||
def _get_attribute_from_proc(proc, attribute):
|
||||
return proc.info[attribute]
|
||||
|
||||
|
||||
def compare_lower(a, b):
|
||||
if a is None or b is None:
|
||||
# this could just be "return False" but would lead to surprising behavior if both a and b are None
|
||||
@@ -76,38 +173,36 @@ def compare_lower(a, b):
|
||||
return a.lower() == b.lower()
|
||||
|
||||
|
||||
def get_pid(name):
|
||||
pids = []
|
||||
class Pids(object):
|
||||
def __init__(self, module):
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib('psutil'))
|
||||
|
||||
try:
|
||||
for proc in psutil.process_iter(attrs=['name', 'cmdline']):
|
||||
if compare_lower(proc.info['name'], name) or \
|
||||
proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
|
||||
pids.append(proc.pid)
|
||||
except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given)
|
||||
for proc in psutil.process_iter():
|
||||
try: # EL7
|
||||
proc_name, proc_cmdline = proc.name(), proc.cmdline()
|
||||
except TypeError: # EL6: 'str' object is not callable
|
||||
proc_name, proc_cmdline = proc.name, proc.cmdline
|
||||
if compare_lower(proc_name, name) or \
|
||||
proc_cmdline and compare_lower(proc_cmdline[0], name):
|
||||
pids.append(proc.pid)
|
||||
return pids
|
||||
self._ps = PSAdapter.from_package(psutil)
|
||||
|
||||
self._module = module
|
||||
self._name = module.params['name']
|
||||
self._pattern = module.params['pattern']
|
||||
self._ignore_case = module.params['ignore_case']
|
||||
|
||||
def get_matching_command_pids(pattern, ignore_case):
|
||||
flags = 0
|
||||
if ignore_case:
|
||||
flags |= re.I
|
||||
self._pids = []
|
||||
|
||||
regex = re.compile(pattern, flags)
|
||||
# See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information
|
||||
return [p.pid for p in psutil.process_iter(["name", "exe", "cmdline"])
|
||||
if regex.search(to_native(p.info["name"]))
|
||||
or (p.info["exe"] and regex.search(basename(to_native(p.info["exe"]))))
|
||||
or (p.info["cmdline"] and regex.search(to_native(' '.join(p.cmdline()))))
|
||||
]
|
||||
def execute(self):
|
||||
if self._name:
|
||||
self._pids = self._ps.get_pids_by_name(self._name)
|
||||
else:
|
||||
try:
|
||||
self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case)
|
||||
except PSAdapterError as e:
|
||||
self._module.fail_json(msg=to_native(e))
|
||||
|
||||
return self._module.exit_json(**self.result)
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return {
|
||||
'pids': self._pids,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
@@ -126,22 +221,7 @@ def main():
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib('psutil'))
|
||||
|
||||
name = module.params["name"]
|
||||
pattern = module.params["pattern"]
|
||||
ignore_case = module.params["ignore_case"]
|
||||
|
||||
if name:
|
||||
response = dict(pids=get_pid(name))
|
||||
else:
|
||||
try:
|
||||
response = dict(pids=get_matching_command_pids(pattern, ignore_case))
|
||||
except re.error as e:
|
||||
module.fail_json(msg="'%s' is not a valid regular expression: %s" % (pattern, to_native(e)))
|
||||
|
||||
module.exit_json(**response)
|
||||
Pids(module).execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -54,6 +54,8 @@ options:
|
||||
description:
|
||||
- Insert the corresponding rule as rule number NUM.
|
||||
- Note that ufw numbers rules starting with 1.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: int
|
||||
insert_relative_to:
|
||||
description:
|
||||
@@ -120,6 +122,8 @@ options:
|
||||
delete:
|
||||
description:
|
||||
- Delete rule.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: bool
|
||||
default: false
|
||||
interface:
|
||||
@@ -511,12 +515,12 @@ def main():
|
||||
'interface_in and interface_out')
|
||||
# Rules are constructed according to the long format
|
||||
#
|
||||
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
|
||||
# [proto protocol] [app application] [comment COMMENT]
|
||||
cmd.append([module.boolean(params['route']), 'route'])
|
||||
cmd.append([module.boolean(params['delete']), 'delete'])
|
||||
if params['insert'] is not None:
|
||||
if params['insert'] is not None and not params['delete']:
|
||||
relative_to_cmd = params['insert_relative_to']
|
||||
if relative_to_cmd == 'zero':
|
||||
insert_to = params['insert']
|
||||
@@ -526,8 +530,8 @@ def main():
|
||||
lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
|
||||
lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
|
||||
last_number = max([no for (no, ipv6) in lines]) if lines else 0
|
||||
has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
|
||||
has_ipv6 = any([ipv6 for (no, ipv6) in lines])
|
||||
has_ipv4 = any(not ipv6 for (no, ipv6) in lines)
|
||||
has_ipv6 = any(ipv6 for (no, ipv6) in lines)
|
||||
if relative_to_cmd == 'first-ipv4':
|
||||
relative_to = 1
|
||||
elif relative_to_cmd == 'last-ipv4':
|
||||
|
||||
@@ -315,7 +315,7 @@ except ImportError:
|
||||
#
|
||||
# @return vdolist A list of currently created VDO volumes.
|
||||
def inventory_vdos(module, vdocmd):
|
||||
rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
|
||||
rc, vdostatusout, err = module.run_command([vdocmd, "status"])
|
||||
|
||||
# if rc != 0:
|
||||
# module.fail_json(msg="Inventorying VDOs failed: %s"
|
||||
@@ -323,15 +323,13 @@ def inventory_vdos(module, vdocmd):
|
||||
|
||||
vdolist = []
|
||||
|
||||
if (rc == 2 and
|
||||
re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
|
||||
if rc == 2 and re.findall(r"vdoconf\.yml does not exist", err, re.MULTILINE):
|
||||
# If there is no /etc/vdoconf.yml file, assume there are no
|
||||
# VDO volumes. Return an empty list of VDO volumes.
|
||||
return vdolist
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Inventorying VDOs failed: %s"
|
||||
% vdostatusout, rc=rc, err=err)
|
||||
module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err)
|
||||
|
||||
vdostatusyaml = yaml.load(vdostatusout)
|
||||
if vdostatusyaml is None:
|
||||
@@ -346,7 +344,7 @@ def inventory_vdos(module, vdocmd):
|
||||
|
||||
|
||||
def list_running_vdos(module, vdocmd):
|
||||
rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
|
||||
rc, vdolistout, err = module.run_command([vdocmd, "list"])
|
||||
runningvdolist = filter(None, vdolistout.split('\n'))
|
||||
return runningvdolist
|
||||
|
||||
@@ -360,36 +358,30 @@ def list_running_vdos(module, vdocmd):
|
||||
#
|
||||
# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
|
||||
def start_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
|
||||
rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname])
|
||||
if rc == 0:
|
||||
module.log("started VDO volume %s" % vdoname)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def stop_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
|
||||
rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname])
|
||||
if rc == 0:
|
||||
module.log("stopped VDO volume %s" % vdoname)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def activate_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command("%s activate --name=%s"
|
||||
% (vdocmd, vdoname))
|
||||
rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname])
|
||||
if rc == 0:
|
||||
module.log("activated VDO volume %s" % vdoname)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def deactivate_vdo(module, vdoname, vdocmd):
|
||||
rc, out, err = module.run_command("%s deactivate --name=%s"
|
||||
% (vdocmd, vdoname))
|
||||
rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname])
|
||||
if rc == 0:
|
||||
module.log("deactivated VDO volume %s" % vdoname)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
@@ -397,32 +389,31 @@ def add_vdooptions(params):
|
||||
vdocmdoptions = ""
|
||||
options = []
|
||||
|
||||
if ('logicalsize' in params) and (params['logicalsize'] is not None):
|
||||
if params.get('logicalsize') is not None:
|
||||
options.append("--vdoLogicalSize=" + params['logicalsize'])
|
||||
|
||||
if (('blockmapcachesize' in params) and
|
||||
(params['blockmapcachesize'] is not None)):
|
||||
if params.get('blockmapcachesize') is not None:
|
||||
options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
|
||||
|
||||
if ('readcache' in params) and (params['readcache'] == 'enabled'):
|
||||
if params.get('readcache') == 'enabled':
|
||||
options.append("--readCache=enabled")
|
||||
|
||||
if ('readcachesize' in params) and (params['readcachesize'] is not None):
|
||||
if params.get('readcachesize') is not None:
|
||||
options.append("--readCacheSize=" + params['readcachesize'])
|
||||
|
||||
if ('slabsize' in params) and (params['slabsize'] is not None):
|
||||
if params.get('slabsize') is not None:
|
||||
options.append("--vdoSlabSize=" + params['slabsize'])
|
||||
|
||||
if ('emulate512' in params) and (params['emulate512']):
|
||||
if params.get('emulate512'):
|
||||
options.append("--emulate512=enabled")
|
||||
|
||||
if ('indexmem' in params) and (params['indexmem'] is not None):
|
||||
if params.get('indexmem') is not None:
|
||||
options.append("--indexMem=" + params['indexmem'])
|
||||
|
||||
if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
|
||||
if params.get('indexmode') == 'sparse':
|
||||
options.append("--sparseIndex=enabled")
|
||||
|
||||
if ('force' in params) and (params['force']):
|
||||
if params.get('force'):
|
||||
options.append("--force")
|
||||
|
||||
# Entering an invalid thread config results in a cryptic
|
||||
@@ -431,23 +422,21 @@ def add_vdooptions(params):
|
||||
# output a more helpful message, but one would have to log
|
||||
# onto that system to read the error. For now, heed the thread
|
||||
# limit warnings in the DOCUMENTATION section above.
|
||||
if ('ackthreads' in params) and (params['ackthreads'] is not None):
|
||||
if params.get('ackthreads') is not None:
|
||||
options.append("--vdoAckThreads=" + params['ackthreads'])
|
||||
|
||||
if ('biothreads' in params) and (params['biothreads'] is not None):
|
||||
if params.get('biothreads') is not None:
|
||||
options.append("--vdoBioThreads=" + params['biothreads'])
|
||||
|
||||
if ('cputhreads' in params) and (params['cputhreads'] is not None):
|
||||
if params.get('cputhreads') is not None:
|
||||
options.append("--vdoCpuThreads=" + params['cputhreads'])
|
||||
|
||||
if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
|
||||
if params.get('logicalthreads') is not None:
|
||||
options.append("--vdoLogicalThreads=" + params['logicalthreads'])
|
||||
|
||||
if (('physicalthreads' in params) and
|
||||
(params['physicalthreads'] is not None)):
|
||||
if params.get('physicalthreads') is not None:
|
||||
options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
|
||||
|
||||
vdocmdoptions = ' '.join(options)
|
||||
return vdocmdoptions
|
||||
|
||||
|
||||
@@ -531,31 +520,24 @@ def run_module():
|
||||
# Since this is a creation of a new VDO volume, it will contain all
|
||||
# all of the parameters given by the playbook; the rest will
|
||||
# assume default values.
|
||||
options = module.params
|
||||
vdocmdoptions = add_vdooptions(options)
|
||||
rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
|
||||
% (vdocmd, desiredvdo, device,
|
||||
vdocmdoptions))
|
||||
vdocmdoptions = add_vdooptions(module.params)
|
||||
rc, out, err = module.run_command(
|
||||
[vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Creating VDO %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err)
|
||||
|
||||
if (module.params['compression'] == 'disabled'):
|
||||
rc, out, err = module.run_command("%s disableCompression --name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
if module.params['compression'] == 'disabled':
|
||||
rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo])
|
||||
|
||||
if ((module.params['deduplication'] is not None) and
|
||||
module.params['deduplication'] == 'disabled'):
|
||||
rc, out, err = module.run_command("%s disableDeduplication "
|
||||
"--name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
if module.params['deduplication'] == 'disabled':
|
||||
rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo])
|
||||
|
||||
if module.params['activated'] == 'no':
|
||||
if module.params['activated'] is False:
|
||||
deactivate_vdo(module, desiredvdo, vdocmd)
|
||||
|
||||
if module.params['running'] == 'no':
|
||||
if module.params['running'] is False:
|
||||
stop_vdo(module, desiredvdo, vdocmd)
|
||||
|
||||
# Print a post-run list of VDO volumes in the result object.
|
||||
@@ -564,8 +546,8 @@ def run_module():
|
||||
module.exit_json(**result)
|
||||
|
||||
# Modify the current parameters of a VDO that exists.
|
||||
if (desiredvdo in vdolist) and (state == 'present'):
|
||||
rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
|
||||
if desiredvdo in vdolist and state == 'present':
|
||||
rc, vdostatusoutput, err = module.run_command([vdocmd, "status"])
|
||||
vdostatusyaml = yaml.load(vdostatusoutput)
|
||||
|
||||
# An empty dictionary to contain dictionaries of VDO statistics
|
||||
@@ -630,7 +612,7 @@ def run_module():
|
||||
diffparams = {}
|
||||
|
||||
# Check for differences between the playbook parameters and the
|
||||
# current parameters. This will need a comparison function;
|
||||
# current parameters. This will need a comparison function;
|
||||
# since AnsibleModule params are all strings, compare them as
|
||||
# strings (but if it's None; skip).
|
||||
for key in currentparams.keys():
|
||||
@@ -641,10 +623,7 @@ def run_module():
|
||||
if diffparams:
|
||||
vdocmdoptions = add_vdooptions(diffparams)
|
||||
if vdocmdoptions:
|
||||
rc, out, err = module.run_command("%s modify --name=%s %s"
|
||||
% (vdocmd,
|
||||
desiredvdo,
|
||||
vdocmdoptions))
|
||||
rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
@@ -653,107 +632,36 @@ def run_module():
|
||||
|
||||
if 'deduplication' in diffparams.keys():
|
||||
dedupemod = diffparams['deduplication']
|
||||
if dedupemod == 'disabled':
|
||||
rc, out, err = module.run_command("%s "
|
||||
"disableDeduplication "
|
||||
"--name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication"
|
||||
rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo])
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing deduplication on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
|
||||
if dedupemod == 'enabled':
|
||||
rc, out, err = module.run_command("%s "
|
||||
"enableDeduplication "
|
||||
"--name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing deduplication on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
|
||||
if 'compression' in diffparams.keys():
|
||||
compressmod = diffparams['compression']
|
||||
if compressmod == 'disabled':
|
||||
rc, out, err = module.run_command("%s disableCompression "
|
||||
"--name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing compression on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
|
||||
if compressmod == 'enabled':
|
||||
rc, out, err = module.run_command("%s enableCompression "
|
||||
"--name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing compression on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression"
|
||||
rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo])
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
|
||||
if 'writepolicy' in diffparams.keys():
|
||||
writepolmod = diffparams['writepolicy']
|
||||
if writepolmod == 'auto':
|
||||
rc, out, err = module.run_command("%s "
|
||||
"changeWritePolicy "
|
||||
"--name=%s "
|
||||
"--writePolicy=%s"
|
||||
% (vdocmd,
|
||||
desiredvdo,
|
||||
writepolmod))
|
||||
rc, out, err = module.run_command([
|
||||
vdocmd,
|
||||
"changeWritePolicy",
|
||||
"--name=%s" % desiredvdo,
|
||||
"--writePolicy=%s" % writepolmod,
|
||||
])
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing write policy on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
|
||||
if writepolmod == 'sync':
|
||||
rc, out, err = module.run_command("%s "
|
||||
"changeWritePolicy "
|
||||
"--name=%s "
|
||||
"--writePolicy=%s"
|
||||
% (vdocmd,
|
||||
desiredvdo,
|
||||
writepolmod))
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing write policy on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
|
||||
if writepolmod == 'async':
|
||||
rc, out, err = module.run_command("%s "
|
||||
"changeWritePolicy "
|
||||
"--name=%s "
|
||||
"--writePolicy=%s"
|
||||
% (vdocmd,
|
||||
desiredvdo,
|
||||
writepolmod))
|
||||
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing write policy on "
|
||||
"VDO volume %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
|
||||
|
||||
# Process the size parameters, to determine of a growPhysical or
|
||||
# growLogical operation needs to occur.
|
||||
@@ -771,19 +679,15 @@ def run_module():
|
||||
|
||||
diffsizeparams = {}
|
||||
for key in sizeparams.keys():
|
||||
if module.params[key] is not None:
|
||||
if str(sizeparams[key]) != module.params[key]:
|
||||
diffsizeparams[key] = module.params[key]
|
||||
if module.params[key] is not None and str(sizeparams[key]) != module.params[key]:
|
||||
diffsizeparams[key] = module.params[key]
|
||||
|
||||
if module.params['growphysical']:
|
||||
physdevice = module.params['device']
|
||||
rc, devsectors, err = module.run_command("blockdev --getsz %s"
|
||||
% (physdevice))
|
||||
rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice])
|
||||
devblocks = (int(devsectors) / 8)
|
||||
dmvdoname = ('/dev/mapper/' + desiredvdo)
|
||||
currentvdostats = (processedvdos[desiredvdo]
|
||||
['VDO statistics']
|
||||
[dmvdoname])
|
||||
currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname]
|
||||
currentphysblocks = currentvdostats['physical blocks']
|
||||
|
||||
# Set a growPhysical threshold to grow only when there is
|
||||
@@ -795,34 +699,25 @@ def run_module():
|
||||
|
||||
if currentphysblocks > growthresh:
|
||||
result['changed'] = True
|
||||
rc, out, err = module.run_command("%s growPhysical --name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo])
|
||||
|
||||
if 'logicalsize' in diffsizeparams.keys():
|
||||
result['changed'] = True
|
||||
vdocmdoptions = ("--vdoLogicalSize=" +
|
||||
diffsizeparams['logicalsize'])
|
||||
rc, out, err = module.run_command("%s growLogical --name=%s %s"
|
||||
% (vdocmd,
|
||||
desiredvdo,
|
||||
vdocmdoptions))
|
||||
rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']])
|
||||
|
||||
vdoactivatestatus = processedvdos[desiredvdo]['Activate']
|
||||
|
||||
if ((module.params['activated'] == 'no') and
|
||||
(vdoactivatestatus == 'enabled')):
|
||||
if module.params['activated'] is False and vdoactivatestatus == 'enabled':
|
||||
deactivate_vdo(module, desiredvdo, vdocmd)
|
||||
if not result['changed']:
|
||||
result['changed'] = True
|
||||
|
||||
if ((module.params['activated'] == 'yes') and
|
||||
(vdoactivatestatus == 'disabled')):
|
||||
if module.params['activated'] and vdoactivatestatus == 'disabled':
|
||||
activate_vdo(module, desiredvdo, vdocmd)
|
||||
if not result['changed']:
|
||||
result['changed'] = True
|
||||
|
||||
if ((module.params['running'] == 'no') and
|
||||
(desiredvdo in runningvdolist)):
|
||||
if module.params['running'] is False and desiredvdo in runningvdolist:
|
||||
stop_vdo(module, desiredvdo, vdocmd)
|
||||
if not result['changed']:
|
||||
result['changed'] = True
|
||||
@@ -834,10 +729,7 @@ def run_module():
|
||||
# the activate_vdo() operation succeeded, as 'vdoactivatestatus'
|
||||
# will have the activated status prior to the activate_vdo()
|
||||
# call.
|
||||
if (((vdoactivatestatus == 'enabled') or
|
||||
(module.params['activated'] == 'yes')) and
|
||||
(module.params['running'] == 'yes') and
|
||||
(desiredvdo not in runningvdolist)):
|
||||
if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist:
|
||||
start_vdo(module, desiredvdo, vdocmd)
|
||||
if not result['changed']:
|
||||
result['changed'] = True
|
||||
@@ -850,14 +742,12 @@ def run_module():
|
||||
module.exit_json(**result)
|
||||
|
||||
# Remove a desired VDO that currently exists.
|
||||
if (desiredvdo in vdolist) and (state == 'absent'):
|
||||
rc, out, err = module.run_command("%s remove --name=%s"
|
||||
% (vdocmd, desiredvdo))
|
||||
if desiredvdo in vdolist and state == 'absent':
|
||||
rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo])
|
||||
if rc == 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="Removing VDO %s failed."
|
||||
% desiredvdo, rc=rc, err=err)
|
||||
module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err)
|
||||
|
||||
# Print a post-run list of VDO volumes in the result object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
@@ -869,8 +759,7 @@ def run_module():
|
||||
# not exist. Print a post-run list of VDO volumes in the result
|
||||
# object.
|
||||
vdolist = inventory_vdos(module, vdocmd)
|
||||
module.log("received request to remove non-existent VDO volume %s"
|
||||
% desiredvdo)
|
||||
module.log("received request to remove non-existent VDO volume %s" % desiredvdo)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
@@ -272,8 +272,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper):
|
||||
|
||||
|
||||
def main():
|
||||
xfconf = XFConfProperty()
|
||||
xfconf.run()
|
||||
XFConfProperty.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -202,15 +202,17 @@ def _set_state(module, state):
|
||||
result=success_msg,
|
||||
warnings=module.warnings)
|
||||
|
||||
a2mod_binary = [module.get_bin_path(a2mod_binary)]
|
||||
if a2mod_binary is None:
|
||||
a2mod_binary_path = module.get_bin_path(a2mod_binary)
|
||||
if a2mod_binary_path is None:
|
||||
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
|
||||
|
||||
a2mod_binary_cmd = [a2mod_binary_path]
|
||||
|
||||
if not want_enabled and force:
|
||||
# force exists only for a2dismod on debian
|
||||
a2mod_binary.append('-f')
|
||||
a2mod_binary_cmd.append('-f')
|
||||
|
||||
result, stdout, stderr = module.run_command(a2mod_binary + [name])
|
||||
result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
|
||||
|
||||
if _module_is_enabled(module) == want_enabled:
|
||||
module.exit_json(changed=True,
|
||||
|
||||
@@ -62,7 +62,7 @@ options:
|
||||
clear:
|
||||
description:
|
||||
- Clear the existing files before trying to copy or link the original file.
|
||||
- Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
|
||||
- Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically.
|
||||
required: false
|
||||
default: no
|
||||
type: bool
|
||||
@@ -109,9 +109,9 @@ options:
|
||||
required: false
|
||||
aliases: [test_runner]
|
||||
notes:
|
||||
- C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
|
||||
- C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter
|
||||
is specified.
|
||||
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
|
||||
- This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already
|
||||
exist at the given location.
|
||||
- This module assumes English error messages for the C(createcachetable) command to detect table existence,
|
||||
unfortunately.
|
||||
@@ -158,6 +158,7 @@ EXAMPLES = """
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shlex
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -273,61 +274,65 @@ def main():
|
||||
),
|
||||
)
|
||||
|
||||
command = module.params['command']
|
||||
command_split = shlex.split(module.params['command'])
|
||||
command_bin = command_split[0]
|
||||
project_path = module.params['project_path']
|
||||
virtualenv = module.params['virtualenv']
|
||||
|
||||
for param in specific_params:
|
||||
value = module.params[param]
|
||||
if value and param not in command_allowed_param_map[command]:
|
||||
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
|
||||
if value and param not in command_allowed_param_map[command_bin]:
|
||||
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin))
|
||||
|
||||
for param in command_required_param_map.get(command, ()):
|
||||
for param in command_required_param_map.get(command_bin, ()):
|
||||
if not module.params[param]:
|
||||
module.fail_json(msg='%s param is required for command=%s' % (param, command))
|
||||
module.fail_json(msg='%s param is required for command=%s' % (param, command_bin))
|
||||
|
||||
_ensure_virtualenv(module)
|
||||
|
||||
cmd = ["./manage.py", command]
|
||||
run_cmd_args = ["./manage.py"] + command_split
|
||||
|
||||
if command in noinput_commands:
|
||||
cmd.append("--noinput")
|
||||
if command_bin in noinput_commands and '--noinput' not in command_split:
|
||||
run_cmd_args.append("--noinput")
|
||||
|
||||
for param in general_params:
|
||||
if module.params[param]:
|
||||
cmd.append('--%s=%s' % (param, module.params[param]))
|
||||
run_cmd_args.append('--%s=%s' % (param, module.params[param]))
|
||||
|
||||
for param in specific_boolean_params:
|
||||
if module.params[param]:
|
||||
cmd.append('--%s' % param)
|
||||
run_cmd_args.append('--%s' % param)
|
||||
|
||||
# these params always get tacked on the end of the command
|
||||
for param in end_of_command_params:
|
||||
if module.params[param]:
|
||||
cmd.append(module.params[param])
|
||||
if param in ('fixtures', 'apps'):
|
||||
run_cmd_args.extend(shlex.split(module.params[param]))
|
||||
else:
|
||||
run_cmd_args.append(module.params[param])
|
||||
|
||||
rc, out, err = module.run_command(cmd, cwd=project_path)
|
||||
rc, out, err = module.run_command(run_cmd_args, cwd=project_path)
|
||||
if rc != 0:
|
||||
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
|
||||
if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err:
|
||||
out = 'already exists.'
|
||||
else:
|
||||
if "Unknown command:" in err:
|
||||
_fail(module, cmd, err, "Unknown django command: %s" % command)
|
||||
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
|
||||
_fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin)
|
||||
_fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path)
|
||||
|
||||
changed = False
|
||||
|
||||
lines = out.split('\n')
|
||||
filt = globals().get(command + "_filter_output", None)
|
||||
filt = globals().get(command_bin + "_filter_output", None)
|
||||
if filt:
|
||||
filtered_output = list(filter(filt, lines))
|
||||
if len(filtered_output):
|
||||
changed = True
|
||||
check_changed = globals().get("{0}_check_changed".format(command), None)
|
||||
check_changed = globals().get("{0}_check_changed".format(command_bin), None)
|
||||
if check_changed:
|
||||
changed = check_changed(out)
|
||||
|
||||
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path,
|
||||
module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path,
|
||||
virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
|
||||
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ def main():
|
||||
# Clean up old failed deployment
|
||||
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
|
||||
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
if is_failed(deploy_path, deployment):
|
||||
@@ -153,7 +153,7 @@ def main():
|
||||
if state == 'present' and deployed:
|
||||
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
|
||||
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
deployed = False
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
|
||||
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rundeck_job_executions_info
|
||||
short_description: Query executions for a Rundeck job
|
||||
description:
|
||||
- This module gets the list of executions for a specified Rundeck job.
|
||||
author: "Phillipe Smith (@phsmith)"
|
||||
version_added: 3.8.0
|
||||
options:
|
||||
job_id:
|
||||
type: str
|
||||
description:
|
||||
- The job unique ID.
|
||||
required: true
|
||||
status:
|
||||
type: str
|
||||
description:
|
||||
- The job status to filter.
|
||||
choices: [succeeded, failed, aborted, running]
|
||||
max:
|
||||
type: int
|
||||
description:
|
||||
- Max results to return.
|
||||
default: 20
|
||||
offset:
|
||||
type: int
|
||||
description:
|
||||
- The start point to return the results.
|
||||
default: 0
|
||||
extends_documentation_fragment:
|
||||
- community.general.rundeck
|
||||
- url
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Rundeck job executions info
|
||||
community.general.rundeck_job_executions_info:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
register: rundeck_job_executions_info
|
||||
|
||||
- name: Show Rundeck job executions info
|
||||
ansible.builtin.debug:
|
||||
var: rundeck_job_executions_info.executions
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
paging:
|
||||
description: Results pagination info.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
count:
|
||||
description: Number of results in the response.
|
||||
type: int
|
||||
returned: success
|
||||
total:
|
||||
description: Total number of results.
|
||||
type: int
|
||||
returned: success
|
||||
offset:
|
||||
description: Offset from first of all results.
|
||||
type: int
|
||||
returned: success
|
||||
max:
|
||||
description: Maximum number of results per page.
|
||||
type: int
|
||||
returned: success
|
||||
sample: {
|
||||
"count": 20,
|
||||
"total": 100,
|
||||
"offset": 0,
|
||||
"max": 20
|
||||
}
|
||||
executions:
|
||||
description: Job executions list.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"id": 1,
|
||||
"href": "https://rundeck.example.org/api/39/execution/1",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
|
||||
"status": "succeeded",
|
||||
"project": "myproject",
|
||||
"executionType": "user",
|
||||
"user": "admin",
|
||||
"date-started": {
|
||||
"unixtime": 1633525515026,
|
||||
"date": "2021-10-06T13:05:15Z"
|
||||
},
|
||||
"date-ended": {
|
||||
"unixtime": 1633525518386,
|
||||
"date": "2021-10-06T13:05:18Z"
|
||||
},
|
||||
"job": {
|
||||
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"averageDuration": 6381,
|
||||
"name": "Test",
|
||||
"group": "",
|
||||
"project": "myproject",
|
||||
"description": "",
|
||||
"options": {
|
||||
"exit_code": "0"
|
||||
},
|
||||
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
|
||||
},
|
||||
"description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
|
||||
"argstring": "-exit_code 0",
|
||||
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
# Modules import
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request
|
||||
)
|
||||
|
||||
|
||||
class RundeckJobExecutionsInfo(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = self.module.params["url"]
|
||||
self.api_version = self.module.params["api_version"]
|
||||
self.job_id = self.module.params["job_id"]
|
||||
self.offset = self.module.params["offset"]
|
||||
self.max = self.module.params["max"]
|
||||
self.status = self.module.params["status"] or ""
|
||||
|
||||
def job_executions(self):
|
||||
response, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="job/%s/executions?offset=%s&max=%s&status=%s"
|
||||
% (quote(self.job_id), self.offset, self.max, self.status),
|
||||
method="GET"
|
||||
)
|
||||
|
||||
if info["status"] != 200:
|
||||
self.module.fail_json(
|
||||
msg=info["msg"],
|
||||
executions=response
|
||||
)
|
||||
|
||||
self.module.exit_json(msg="Executions info result", **response)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
job_id=dict(required=True, type="str"),
|
||||
offset=dict(type="int", default=0),
|
||||
max=dict(type="int", default=20),
|
||||
status=dict(
|
||||
type="str",
|
||||
choices=["succeeded", "failed", "aborted", "running"]
|
||||
)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if module.params["api_version"] < 14:
|
||||
module.fail_json(msg="API version should be at least 14")
|
||||
|
||||
rundeck = RundeckJobExecutionsInfo(module)
|
||||
rundeck.job_executions()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user