mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 01:46:53 +00:00
Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5bd5de4281 | ||
|
|
4aebefcf9e | ||
|
|
62f9a5b0a9 | ||
|
|
3d03eda99e | ||
|
|
c01ce10b4b | ||
|
|
16aa776c93 | ||
|
|
d7d1659e34 | ||
|
|
5b9b99384f | ||
|
|
f898279c8c | ||
|
|
2215c6d360 | ||
|
|
ca3948858a | ||
|
|
f14e566cc7 | ||
|
|
a2c93f5e99 | ||
|
|
67a2abcab2 | ||
|
|
2e4864db7f | ||
|
|
1f0b2a5173 | ||
|
|
25482000f0 | ||
|
|
c0f3aa14cf | ||
|
|
1ef104be61 | ||
|
|
773df88a41 | ||
|
|
d77e256088 | ||
|
|
2917389779 | ||
|
|
59af80235b | ||
|
|
aec52198e3 | ||
|
|
cbe4490c9e | ||
|
|
9de059b44d | ||
|
|
c72a23a5f1 | ||
|
|
0b9d9c0fdb | ||
|
|
67eaf9405f | ||
|
|
5de05a6243 | ||
|
|
46b4b9a6de | ||
|
|
10146aae1c | ||
|
|
d2ec7053c5 | ||
|
|
51fcacae08 | ||
|
|
29211b970c | ||
|
|
5c1fa53558 | ||
|
|
2348f3d439 | ||
|
|
46a051d168 | ||
|
|
b2212bc8ef | ||
|
|
e05e3aed67 | ||
|
|
a13541299e | ||
|
|
221067e708 |
@@ -68,6 +68,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
@@ -117,7 +130,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
@@ -125,6 +137,22 @@ stages:
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: '3.10'
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
@@ -150,13 +178,8 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
@@ -196,6 +219,23 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
@@ -204,8 +244,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
@@ -227,14 +265,6 @@ stages:
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -248,6 +278,8 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: FreeBSD 12.0
|
||||
test: freebsd/12.0
|
||||
groups:
|
||||
@@ -263,8 +295,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: CentOS 8
|
||||
@@ -285,6 +315,28 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
@@ -297,10 +349,8 @@ stages:
|
||||
test: centos8
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
@@ -312,12 +362,8 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
@@ -331,8 +377,6 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: openSUSE 15 py3
|
||||
@@ -350,6 +394,17 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Cloud_2_12
|
||||
displayName: Cloud 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
@@ -361,7 +416,6 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
@@ -372,7 +426,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 3.5
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
@@ -382,7 +436,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- test: 2.7
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
@@ -390,21 +444,26 @@ stages:
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
14
.github/BOTMETA.yml
vendored
14
.github/BOTMETA.yml
vendored
@@ -1,3 +1,4 @@
|
||||
notifications: true
|
||||
automerge: true
|
||||
files:
|
||||
plugins/:
|
||||
@@ -48,6 +49,9 @@ files:
|
||||
maintainers: dagwieers
|
||||
$callbacks/diy.py:
|
||||
maintainers: theque5t
|
||||
$callbacks/elastic.py:
|
||||
maintainers: v1v
|
||||
keywords: apm observability
|
||||
$callbacks/hipchat.py: {}
|
||||
$callbacks/jabber.py: {}
|
||||
$callbacks/loganalytics.py:
|
||||
@@ -153,6 +157,10 @@ files:
|
||||
$inventories/nmap.py: {}
|
||||
$inventories/online.py:
|
||||
maintainers: sieben
|
||||
$inventories/opennebula.py:
|
||||
maintainers: feldsam
|
||||
labels: cloud opennebula
|
||||
keywords: opennebula dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/icinga2.py:
|
||||
@@ -753,6 +761,8 @@ files:
|
||||
ignore: jle64
|
||||
$modules/packaging/language/pip_package_info.py:
|
||||
maintainers: bcoca matburt maxamillion
|
||||
$modules/packaging/language/pipx.py:
|
||||
maintainers: russoz
|
||||
$modules/packaging/language/yarn.py:
|
||||
maintainers: chrishoffman verkaufer
|
||||
$modules/packaging/os/apk.py:
|
||||
@@ -1153,6 +1163,10 @@ files:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_project.py:
|
||||
maintainers: nerzhul
|
||||
$modules/web_infrastructure/rundeck_job_run.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/rundeck_job_executions_info.py:
|
||||
maintainers: phsmith
|
||||
$modules/web_infrastructure/sophos_utm/:
|
||||
maintainers: $team_e_spirit
|
||||
keywords: sophos utm
|
||||
|
||||
@@ -6,6 +6,81 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 2.0.0.
|
||||
|
||||
v3.8.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release. Please note that this is the last minor 3.x.0 release; afterwards there will only be bugfix releases 3.8.y.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command (per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded separately. Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running under non-root user. The deployment file is copied with file content only. The file permission is set to ``440`` and belongs to root user. When the JBoss ``WildFly`` server is running under non-root user, it is unable to read the deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=`` and ``state=present`` only the first run was successful, future runs failed due to missing checks prior starting zypper. Usage of ``state=absent`` in combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, https://github.com/ansible-collections/community.general/issues/3466).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Callback
|
||||
~~~~~~~~
|
||||
|
||||
- elastic - Create distributed traces for each Ansible task in Elastic APM
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- opennebula - OpenNebula inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_tasks_info - Retrieve information about one or more Proxmox VE tasks
|
||||
|
||||
Packaging
|
||||
~~~~~~~~~
|
||||
|
||||
language
|
||||
^^^^^^^^
|
||||
|
||||
- pipx - Manages applications installed with pipx
|
||||
|
||||
Web Infrastructure
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- rundeck_job_executions_info - Query executions for a Rundeck job
|
||||
- rundeck_job_run - Run a Rundeck job
|
||||
|
||||
v3.7.0
|
||||
======
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
|
||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
||||
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
|
||||
|
||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
||||
|
||||
@@ -42,7 +43,12 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
|
||||
can only be used by very few people should better be added to more specialized collections.
|
||||
|
||||
2. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
|
||||
That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
|
||||
of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
|
||||
such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
|
||||
|
||||
3. When creating a new module or plugin, please make sure that you follow various guidelines:
|
||||
|
||||
- Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
|
||||
- Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
|
||||
@@ -52,7 +58,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
|
||||
which run in CI.
|
||||
|
||||
3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link
|
||||
from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use
|
||||
subdirectories.)
|
||||
|
||||
@@ -60,7 +66,7 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
|
||||
(`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
|
||||
than the action plugin has in `plugins/action/`.
|
||||
|
||||
4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
|
||||
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
|
||||
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
|
||||
@@ -1835,3 +1835,108 @@ releases:
|
||||
name: icinga2
|
||||
namespace: null
|
||||
release_date: '2021-09-21'
|
||||
3.8.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys
|
||||
(https://github.com/ansible-collections/community.general/pull/3473).
|
||||
- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication``
|
||||
on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
|
||||
- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``,
|
||||
``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
|
||||
- jboss - fix the deployment file permission issue when Jboss server is running
|
||||
under non-root user. The deployment file is copied with file content only.
|
||||
The file permission is set to ``440`` and belongs to root user. When the JBoss
|
||||
``WildFly`` server is running under non-root user, it is unable to read the
|
||||
deployment file (https://github.com/ansible-collections/community.general/pull/3426).
|
||||
- keycloak_authentication - fix bug, the requirement was always on ``DISABLED``
|
||||
when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
|
||||
- keycloak_identity_provider - fix change detection when updating identity provider
|
||||
mappers (https://github.com/ansible-collections/community.general/pull/3538,
|
||||
https://github.com/ansible-collections/community.general/issues/3537).
|
||||
- keycloak_role - quote role name when used in URL path to avoid errors when
|
||||
role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535,
|
||||
https://github.com/ansible-collections/community.general/pull/3536).
|
||||
- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to
|
||||
fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
|
||||
- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
|
||||
- opentelemetry callback plugin - validated the task result exception without
|
||||
crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450,
|
||||
https://github.com/ansible/ansible/issues/75726).
|
||||
- yaml callback plugin - avoid modifying PyYAML so that other plugins using
|
||||
it on the controller, like the ``to_yaml`` filter, do not produce different
|
||||
output (https://github.com/ansible-collections/community.general/issues/3471,
|
||||
https://github.com/ansible-collections/community.general/pull/3478).
|
||||
- zypper_repository - when an URL to a .repo file was provided in option ``repo=``
|
||||
and ``state=present`` only the first run was successful, future runs failed
|
||||
due to missing checks prior starting zypper. Usage of ``state=absent`` in
|
||||
combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791,
|
||||
https://github.com/ansible-collections/community.general/issues/3466).
|
||||
minor_changes:
|
||||
- mail - added the ``ehlohost`` parameter which allows for manual override of
|
||||
the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
|
||||
- nmcli - the option ``routing_rules4`` can now be specified as a list of strings,
|
||||
instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
|
||||
- open-iscsi - adding support for mutual authentication between target and initiator
|
||||
(https://github.com/ansible-collections/community.general/pull/3422).
|
||||
- opentelemetry callback plugin - added option ``enable_from_environment`` to
|
||||
support enabling the plugin only if the given environment variable exists
|
||||
and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
|
||||
- opentelemetry callback plugin - enriched the stacktrace information with the
|
||||
``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
|
||||
- pkgng - packages being installed (or upgraded) are acted on in one command
|
||||
(per action) (https://github.com/ansible-collections/community.general/issues/2265).
|
||||
- pkgng - status message specifies number of packages installed and/or upgraded
|
||||
separately. Previously, all changes were reported as one count of packages
|
||||
"added" (https://github.com/ansible-collections/community.general/pull/3393).
|
||||
- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
|
||||
- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert``
|
||||
is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
|
||||
release_summary: Regular feature and bugfix release. Please note that this is
|
||||
the last minor 3.x.0 release; afterwards there will only be bugfix releases
|
||||
3.8.y.
|
||||
fragments:
|
||||
- 2692-logstash-callback-plugin-replacing_options.yml
|
||||
- 3.8.0.yml
|
||||
- 3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml
|
||||
- 3393-pkgng-many_packages_one_command.yml
|
||||
- 3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml
|
||||
- 3401-nmcli-needs-type.yml
|
||||
- 3422-open-iscsi-mutual-authentication-support.yaml
|
||||
- 3425-mail_add_configurable_ehlo_hostname.yml
|
||||
- 3426-copy-permissions-along-with-file-for-jboss-module.yml
|
||||
- 3450-callback_opentelemetry-exception_handling.yml
|
||||
- 3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml
|
||||
- 3473-gitlab_deploy_key-fix_idempotency.yml
|
||||
- 3474-zypper_repository_improve_repo_file_idempotency.yml
|
||||
- 3478-yaml-callback.yml
|
||||
- 3496-callback_opentelemetry-enrich_stacktraces.yml
|
||||
- 3498-callback_opentelemetry-only_in_ci.yml
|
||||
- 3500-macports-add-stdout-and-stderr-to-status.yaml
|
||||
- 3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml
|
||||
- 3536-quote-role-name-in-url.yml
|
||||
- 3538-fix-keycloak-idp-mappers-change-detection.yml
|
||||
- 3540-terraform_add_parallelism_parameter.yml
|
||||
modules:
|
||||
- description: Manages applications installed with pipx
|
||||
name: pipx
|
||||
namespace: packaging.language
|
||||
- description: Retrieve information about one or more Proxmox VE tasks
|
||||
name: proxmox_tasks_info
|
||||
namespace: cloud.misc
|
||||
- description: Query executions for a Rundeck job
|
||||
name: rundeck_job_executions_info
|
||||
namespace: web_infrastructure
|
||||
- description: Run a Rundeck job
|
||||
name: rundeck_job_run
|
||||
namespace: web_infrastructure
|
||||
plugins:
|
||||
callback:
|
||||
- description: Create distributed traces for each Ansible task in Elastic APM
|
||||
name: elastic
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: OpenNebula inventory source
|
||||
name: opennebula
|
||||
namespace: null
|
||||
release_date: '2021-10-12'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.7.0
|
||||
version: 3.8.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
408
plugins/callback/elastic.py
Normal file
408
plugins/callback/elastic.py
Normal file
@@ -0,0 +1,408 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
|
||||
name: elastic
|
||||
type: notification
|
||||
short_description: Create distributed traces for each Ansible task in Elastic APM
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- This callback creates distributed traces for each Ansible task in Elastic APM.
|
||||
- You can configure the plugin with environment variables.
|
||||
- See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
|
||||
options:
|
||||
hide_task_arguments:
|
||||
default: false
|
||||
type: bool
|
||||
description:
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
apm_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
description:
|
||||
- The service name resource attribute.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVICE_NAME
|
||||
apm_server_url:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server and its environment variables.
|
||||
env:
|
||||
- name: ELASTIC_APM_SERVER_URL
|
||||
apm_secret_token:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM server token
|
||||
env:
|
||||
- name: ELASTIC_APM_SECRET_TOKEN
|
||||
apm_api_key:
|
||||
type: str
|
||||
description:
|
||||
- Use the APM API key
|
||||
env:
|
||||
- name: ELASTIC_APM_API_KEY
|
||||
apm_verify_server_cert:
|
||||
default: true
|
||||
type: bool
|
||||
description:
|
||||
- Verifies the SSL certificate if an HTTPS connection.
|
||||
env:
|
||||
- name: ELASTIC_APM_VERIFY_SERVER_CERT
|
||||
traceparent:
|
||||
type: str
|
||||
description:
|
||||
- The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- elastic-apm (Python library)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Enable the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callbacks_enabled = community.general.elastic
|
||||
|
||||
Set the environment variable:
|
||||
export ELASTIC_APM_SERVER_URL=<your APM server URL)>
|
||||
export ELASTIC_APM_SERVICE_NAME=your_service_name
|
||||
export ELASTIC_APM_API_KEY=your_APM_API_KEY
|
||||
'''
|
||||
|
||||
import getpass
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleRuntimeError
|
||||
from ansible.module_utils.six import raise_from
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
|
||||
except ImportError as imp_exc:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, path, play, action, args):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
self.start = time.time()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
def add_host(self, host):
|
||||
if host.uuid in self.host_data:
|
||||
if host.status == 'included':
|
||||
# concatenate task include output from multiple items
|
||||
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
|
||||
else:
|
||||
return
|
||||
|
||||
self.host_data[host.uuid] = host
|
||||
|
||||
|
||||
class HostData:
|
||||
"""
|
||||
Data about an individual host.
|
||||
"""
|
||||
|
||||
def __init__(self, uuid, name, status, result):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
self.finish = time.time()
|
||||
|
||||
|
||||
class ElasticSource(object):
|
||||
def __init__(self, display):
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = None
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
try:
|
||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||
except Exception as e:
|
||||
self.ip_address = None
|
||||
self.user = getpass.getuser()
|
||||
|
||||
self._display = display
|
||||
|
||||
def start_task(self, tasks_data, hide_task_arguments, play_name, task):
|
||||
""" record the start of a task for one or more hosts """
|
||||
|
||||
uuid = task._uuid
|
||||
|
||||
if uuid in tasks_data:
|
||||
return
|
||||
|
||||
name = task.get_name().strip()
|
||||
path = task.get_path()
|
||||
action = task.action
|
||||
args = None
|
||||
|
||||
if not task.no_log and not hide_task_arguments:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
|
||||
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
|
||||
|
||||
def finish_task(self, tasks_data, status, result):
|
||||
""" record the results of a task for a single host """
|
||||
|
||||
task_uuid = result._task._uuid
|
||||
|
||||
if hasattr(result, '_host') and result._host is not None:
|
||||
host_uuid = result._host._uuid
|
||||
host_name = result._host.name
|
||||
else:
|
||||
host_uuid = 'include'
|
||||
host_name = 'include'
|
||||
|
||||
task = tasks_data[task_uuid]
|
||||
|
||||
if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
task.add_host(HostData(host_uuid, host_name, status, result))
|
||||
|
||||
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
|
||||
apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
""" generate distributed traces from the collected TaskData and HostData """
|
||||
|
||||
tasks = []
|
||||
parent_start_time = None
|
||||
for task_uuid, task in tasks_data.items():
|
||||
if parent_start_time is None:
|
||||
parent_start_time = task.start
|
||||
tasks.append(task)
|
||||
|
||||
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
|
||||
if apm_cli:
|
||||
instrument() # Only call this once, as early as possible.
|
||||
if traceparent:
|
||||
parent = trace_parent_from_string(traceparent)
|
||||
apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
|
||||
else:
|
||||
apm_cli.begin_transaction("Session", start=parent_start_time)
|
||||
# Populate trace metadata attributes
|
||||
if self.ansible_version is not None:
|
||||
label(ansible_version=self.ansible_version)
|
||||
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
|
||||
if self.ip_address is not None:
|
||||
label(ansible_host_ip=self.ip_address)
|
||||
|
||||
for task_data in tasks:
|
||||
for host_uuid, host_data in task_data.host_data.items():
|
||||
self.create_span_data(apm_cli, task_data, host_data)
|
||||
|
||||
apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
|
||||
|
||||
def create_span_data(self, apm_cli, task_data, host_data):
|
||||
""" create the span with the given TaskData and HostData """
|
||||
|
||||
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
|
||||
|
||||
message = "success"
|
||||
status = "success"
|
||||
if host_data.status == 'included':
|
||||
rc = 0
|
||||
else:
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
if res.get('exception') is not None:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
status = "failure"
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
else:
|
||||
message = 'skipped'
|
||||
status = "unknown"
|
||||
|
||||
with capture_span(task_data.name,
|
||||
start=task_data.start,
|
||||
span_type="ansible.task.run",
|
||||
duration=host_data.finish - task_data.start,
|
||||
labels={"ansible.task.args": task_data.args,
|
||||
"ansible.task.message": message,
|
||||
"ansible.task.module": task_data.action,
|
||||
"ansible.task.name": name,
|
||||
"ansible.task.result": rc,
|
||||
"ansible.task.host.name": host_data.name,
|
||||
"ansible.task.host.status": host_data.status}) as span:
|
||||
span.outcome = status
|
||||
if 'failure' in status:
|
||||
exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, message))
|
||||
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
|
||||
|
||||
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
|
||||
if apm_server_url:
|
||||
return Client(service_name=apm_service_name,
|
||||
server_url=apm_server_url,
|
||||
verify_server_cert=False,
|
||||
secret_token=apm_secret_token,
|
||||
api_key=apm_api_key,
|
||||
use_elastic_traceparent_header=True,
|
||||
debug=True)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
This callback creates distributed traces with Elastic APM.
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.elastic'
|
||||
CALLBACK_NEEDS_ENABLED = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.hide_task_arguments = None
|
||||
self.apm_service_name = None
|
||||
self.ansible_playbook = None
|
||||
self.traceparent = False
|
||||
self.play_name = None
|
||||
self.tasks_data = None
|
||||
self.errors = 0
|
||||
self.disabled = False
|
||||
|
||||
if ELASTIC_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `elastic-apm` must be installed to use this plugin'),
|
||||
ELASTIC_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.elastic = ElasticSource(display=self._display)
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys,
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.apm_service_name = self.get_option('apm_service_name')
|
||||
if not self.apm_service_name:
|
||||
self.apm_service_name = 'ansible'
|
||||
|
||||
self.apm_server_url = self.get_option('apm_server_url')
|
||||
self.apm_secret_token = self.get_option('apm_secret_token')
|
||||
self.apm_api_key = self.get_option('apm_api_key')
|
||||
self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
|
||||
self.traceparent = self.get_option('traceparent')
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play_name = play.get_name()
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.elastic.start_task(
|
||||
self.tasks_data,
|
||||
self.hide_task_arguments,
|
||||
self.play_name,
|
||||
task
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'skipped',
|
||||
result
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
self.elastic.finish_task(
|
||||
self.tasks_data,
|
||||
'included',
|
||||
included_file
|
||||
)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
if self.errors == 0:
|
||||
status = "success"
|
||||
else:
|
||||
status = "failure"
|
||||
self.elastic.generate_distributed_traces(
|
||||
self.tasks_data,
|
||||
status,
|
||||
time.time(),
|
||||
self.traceparent,
|
||||
self.apm_service_name,
|
||||
self.apm_server_url,
|
||||
self.apm_verify_server_cert,
|
||||
self.apm_secret_token,
|
||||
self.apm_api_key
|
||||
)
|
||||
|
||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||
self.errors += 1
|
||||
@@ -94,6 +94,7 @@ ansible.cfg: |
|
||||
|
||||
import os
|
||||
import json
|
||||
from ansible import context
|
||||
import socket
|
||||
import uuid
|
||||
import logging
|
||||
@@ -152,11 +153,11 @@ class CallbackModule(CallbackBase):
|
||||
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||
self.ls_pre_command).read()
|
||||
|
||||
if self._options is not None:
|
||||
self.base_data['ansible_checkmode'] = self._options.check
|
||||
self.base_data['ansible_tags'] = self._options.tags
|
||||
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||
self.base_data['inventory'] = self._options.inventory
|
||||
if context.CLIARGS is not None:
|
||||
self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
|
||||
self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
|
||||
self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
|
||||
self.base_data['inventory'] = context.CLIARGS.get('inventory')
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
|
||||
@@ -23,6 +23,17 @@ DOCUMENTATION = '''
|
||||
- Hide the arguments for a task.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
|
||||
enable_from_environment:
|
||||
type: str
|
||||
description:
|
||||
- Whether to enable this callback only if the given environment variable exists and it is set to C(true).
|
||||
- This is handy when you use Configuration as Code and want to send distributed traces
|
||||
if running in the CI rather when running Ansible locally.
|
||||
- For such, it evaluates the given I(enable_from_environment) value as environment variable
|
||||
and if set to true this plugin will be enabled.
|
||||
env:
|
||||
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
|
||||
version_added: 3.8.0
|
||||
otel_service_name:
|
||||
default: ansible
|
||||
type: str
|
||||
@@ -38,9 +49,9 @@ DOCUMENTATION = '''
|
||||
env:
|
||||
- name: TRACEPARENT
|
||||
requirements:
|
||||
- opentelemetry-api (python lib)
|
||||
- opentelemetry-exporter-otlp (python lib)
|
||||
- opentelemetry-sdk (python lib)
|
||||
- opentelemetry-api (Python library)
|
||||
- opentelemetry-exporter-otlp (Python library)
|
||||
- opentelemetry-sdk (Python library)
|
||||
'''
|
||||
|
||||
|
||||
@@ -63,6 +74,7 @@ import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from os.path import basename
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
@@ -88,18 +100,6 @@ except ImportError as imp_exc:
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
try:
|
||||
from ordereddict import OrderedDict
|
||||
except ImportError as imp_exc:
|
||||
ORDER_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ORDER_LIBRARY_IMPORT_ERROR = None
|
||||
else:
|
||||
ORDER_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
@@ -253,15 +253,10 @@ class OpenTelemetrySource(object):
|
||||
res = host_data.result._result
|
||||
rc = res.get('rc', 0)
|
||||
if host_data.status == 'failed':
|
||||
if 'exception' in res:
|
||||
message = res['exception'].strip().split('\n')[-1]
|
||||
elif 'msg' in res:
|
||||
message = res['msg']
|
||||
else:
|
||||
message = 'failed'
|
||||
message = self.get_error_message(res)
|
||||
status = Status(status_code=StatusCode.ERROR, description=message)
|
||||
# Record an exception with the task message
|
||||
span.record_exception(BaseException(message))
|
||||
span.record_exception(BaseException(self.enrich_error_message(res)))
|
||||
elif host_data.status == 'skipped':
|
||||
if 'skip_reason' in res:
|
||||
message = res['skip_reason']
|
||||
@@ -288,6 +283,24 @@ class OpenTelemetrySource(object):
|
||||
if attributeValue is not None:
|
||||
span.set_attribute(attributeName, attributeValue)
|
||||
|
||||
@staticmethod
|
||||
def get_error_message(result):
|
||||
if result.get('exception') is not None:
|
||||
return OpenTelemetrySource._last_line(result['exception'])
|
||||
return result.get('msg', 'failed')
|
||||
|
||||
@staticmethod
|
||||
def _last_line(text):
|
||||
lines = text.strip().split('\n')
|
||||
return lines[-1]
|
||||
|
||||
@staticmethod
|
||||
def enrich_error_message(result):
|
||||
message = result.get('msg', 'failed')
|
||||
exception = result.get('exception')
|
||||
stderr = result.get('stderr')
|
||||
return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
@@ -315,12 +328,7 @@ class CallbackModule(CallbackBase):
|
||||
AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
|
||||
OTEL_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
if ORDER_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('The `ordereddict` must be installed to use this plugin'),
|
||||
ORDER_LIBRARY_IMPORT_ERROR)
|
||||
else:
|
||||
self.tasks_data = OrderedDict()
|
||||
self.tasks_data = OrderedDict()
|
||||
|
||||
self.opentelemetry = OpenTelemetrySource(display=self._display)
|
||||
|
||||
@@ -329,6 +337,12 @@ class CallbackModule(CallbackBase):
|
||||
var_options=var_options,
|
||||
direct=direct)
|
||||
|
||||
environment_variable = self.get_option('enable_from_environment')
|
||||
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
|
||||
self.disabled = True
|
||||
self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
|
||||
"Disabling the `opentelemetry` callback plugin.".format(environment_variable))
|
||||
|
||||
self.hide_task_arguments = self.get_option('hide_task_arguments')
|
||||
|
||||
self.otel_service_name = self.get_option('otel_service_name')
|
||||
|
||||
@@ -42,28 +42,29 @@ def should_use_block(value):
|
||||
return False
|
||||
|
||||
|
||||
def my_represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
class MyDumper(AnsibleDumper):
|
||||
def represent_scalar(self, tag, value, style=None):
|
||||
"""Uses block style for multi-line strings"""
|
||||
if style is None:
|
||||
if should_use_block(value):
|
||||
style = '|'
|
||||
# we care more about readable than accuracy, so...
|
||||
# ...no trailing space
|
||||
value = value.rstrip()
|
||||
# ...and non-printable characters
|
||||
value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
|
||||
# ...tabs prevent blocks from expanding
|
||||
value = value.expandtabs()
|
||||
# ...and odd bits of whitespace
|
||||
value = re.sub(r'[\x0b\x0c\r]', '', value)
|
||||
# ...as does trailing space
|
||||
value = re.sub(r' +\n', '\n', value)
|
||||
else:
|
||||
style = self.default_style
|
||||
node = yaml.representer.ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
|
||||
|
||||
class CallbackModule(Default):
|
||||
@@ -79,7 +80,6 @@ class CallbackModule(Default):
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
@@ -121,7 +121,7 @@ class CallbackModule(Default):
|
||||
|
||||
if abridged_result:
|
||||
dumped += '\n'
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
||||
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
|
||||
|
||||
# indent by a couple of spaces
|
||||
dumped = '\n '.join(dumped.split('\n')).rstrip()
|
||||
|
||||
31
plugins/doc_fragments/rundeck.py
Normal file
31
plugins/doc_fragments/rundeck.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck instance URL.
|
||||
required: true
|
||||
api_version:
|
||||
type: int
|
||||
description:
|
||||
- Rundeck API version to be used.
|
||||
- API version must be at least 14.
|
||||
default: 39
|
||||
api_token:
|
||||
type: str
|
||||
description:
|
||||
- Rundeck User API Token.
|
||||
required: true
|
||||
'''
|
||||
239
plugins/inventory/opennebula.py
Normal file
239
plugins/inventory/opennebula.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: opennebula
|
||||
author:
|
||||
- Kristian Feldsam (@feldsam)
|
||||
short_description: OpenNebula inventory source
|
||||
version_added: "3.8.0"
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from OpenNebula cloud.
|
||||
- Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
|
||||
to set parameter values.
|
||||
- Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'opennebula' plugin.
|
||||
type: string
|
||||
required: true
|
||||
choices: [ community.general.opennebula ]
|
||||
api_url:
|
||||
description:
|
||||
- URL of the OpenNebula RPC server.
|
||||
- It is recommended to use HTTPS so that the username/password are not
|
||||
transferred over the network unencrypted.
|
||||
- If not set then the value of the C(ONE_URL) environment variable is used.
|
||||
env:
|
||||
- name: ONE_URL
|
||||
required: True
|
||||
type: string
|
||||
api_username:
|
||||
description:
|
||||
- Name of the user to login into the OpenNebula RPC server. If not set
|
||||
then the value of the C(ONE_USERNAME) environment variable is used.
|
||||
env:
|
||||
- name: ONE_USERNAME
|
||||
type: string
|
||||
api_password:
|
||||
description:
|
||||
- Password or a token of the user to login into OpenNebula RPC server.
|
||||
- If not set, the value of the C(ONE_PASSWORD) environment variable is used.
|
||||
env:
|
||||
- name: ONE_PASSWORD
|
||||
required: False
|
||||
type: string
|
||||
api_authfile:
|
||||
description:
|
||||
- If both I(api_username) or I(api_password) are not set, then it will try
|
||||
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
|
||||
- Set environment variable C(ONE_AUTH) to override this path.
|
||||
env:
|
||||
- name: ONE_AUTH
|
||||
required: False
|
||||
type: string
|
||||
hostname:
|
||||
description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
|
||||
type: string
|
||||
default: v4_first_ip
|
||||
choices:
|
||||
- v4_first_ip
|
||||
- v6_first_ip
|
||||
- name
|
||||
filter_by_label:
|
||||
description: Only return servers filtered by this label.
|
||||
type: string
|
||||
group_by_labels:
|
||||
description: Create host groups by vm labels
|
||||
type: bool
|
||||
default: True
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# inventory_opennebula.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
|
||||
|
||||
# Pass a label filter to the API
|
||||
plugin: community.general.opennebula
|
||||
api_url: https://opennebula:2633/RPC2
|
||||
filter_by_label: Cache
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyone
|
||||
|
||||
HAS_PYONE = True
|
||||
except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from collections import namedtuple
|
||||
import os
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
NAME = 'community.general.opennebula'
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('opennebula.yaml', 'opennebula.yml')):
|
||||
valid = True
|
||||
return valid
|
||||
|
||||
def _get_connection_info(self):
|
||||
url = self.get_option('api_url')
|
||||
username = self.get_option('api_username')
|
||||
password = self.get_option('api_password')
|
||||
authfile = self.get_option('api_authfile')
|
||||
|
||||
if not username and not password:
|
||||
if authfile is None:
|
||||
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
|
||||
try:
|
||||
with open(authfile, "r") as fp:
|
||||
authstring = fp.read().rstrip()
|
||||
username, password = authstring.split(":")
|
||||
except (OSError, IOError):
|
||||
raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
except Exception:
|
||||
raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
|
||||
|
||||
auth_params = namedtuple('auth', ('url', 'username', 'password'))
|
||||
|
||||
return auth_params(url=url, username=username, password=password)
|
||||
|
||||
def _get_vm_ipv4(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
return net['IP']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_ipv6(self, vm):
|
||||
nic = vm.TEMPLATE.get('NIC')
|
||||
|
||||
if isinstance(nic, dict):
|
||||
nic = [nic]
|
||||
|
||||
for net in nic:
|
||||
if net.get('IP6_GLOBAL'):
|
||||
return net['IP6_GLOBAL']
|
||||
|
||||
return False
|
||||
|
||||
def _get_vm_pool(self):
|
||||
auth = self._get_connection_info()
|
||||
|
||||
if not (auth.username and auth.password):
|
||||
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
|
||||
else:
|
||||
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
|
||||
# get hosts (VMs)
|
||||
try:
|
||||
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
|
||||
|
||||
return vm_pool
|
||||
|
||||
def _retrieve_servers(self, label_filter=None):
|
||||
vm_pool = self._get_vm_pool()
|
||||
|
||||
result = []
|
||||
|
||||
# iterate over hosts
|
||||
for vm in vm_pool.VM:
|
||||
server = vm.USER_TEMPLATE
|
||||
|
||||
labels = []
|
||||
if vm.USER_TEMPLATE.get('LABELS'):
|
||||
labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
|
||||
labels = ''.join(labels)
|
||||
labels = labels.replace(' ', '_')
|
||||
labels = labels.replace('-', '_')
|
||||
labels = labels.split(',')
|
||||
|
||||
# filter by label
|
||||
if label_filter is not None:
|
||||
if label_filter not in labels:
|
||||
continue
|
||||
|
||||
server['name'] = vm.NAME
|
||||
server['LABELS'] = labels
|
||||
server['v4_first_ip'] = self._get_vm_ipv4(vm)
|
||||
server['v6_first_ip'] = self._get_vm_ipv6(vm)
|
||||
|
||||
result.append(server)
|
||||
|
||||
return result
|
||||
|
||||
def _populate(self):
|
||||
hostname_preference = self.get_option('hostname')
|
||||
group_by_labels = self.get_option('group_by_labels')
|
||||
|
||||
# Add a top group 'one'
|
||||
self.inventory.add_group(group='all')
|
||||
|
||||
filter_by_label = self.get_option('filter_by_label')
|
||||
for server in self._retrieve_servers(filter_by_label):
|
||||
# check for labels
|
||||
if group_by_labels and server['LABELS']:
|
||||
for label in server['LABELS']:
|
||||
self.inventory.add_group(group=label)
|
||||
self.inventory.add_host(host=server['name'], group=label)
|
||||
|
||||
self.inventory.add_host(host=server['name'], group='all')
|
||||
|
||||
for attribute, value in server.items():
|
||||
self.inventory.set_variable(server['name'], attribute, value)
|
||||
|
||||
if hostname_preference != 'name':
|
||||
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
|
||||
|
||||
if server.get('SSH_PORT'):
|
||||
self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_PYONE:
|
||||
raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
self._populate()
|
||||
@@ -1031,7 +1031,7 @@ class KeycloakAPI(object):
|
||||
:param name: Name of the role to fetch.
|
||||
:param realm: Realm in which the role resides; default 'master'.
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1065,7 +1065,7 @@ class KeycloakAPI(object):
|
||||
:param rolerep: A RoleRepresentation of the updated role.
|
||||
:return HTTPResponse object on success
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=rolerep['name'])
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1079,7 +1079,7 @@ class KeycloakAPI(object):
|
||||
:param name: The name of the role.
|
||||
:param realm: The realm in which this role resides, default "master".
|
||||
"""
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name)
|
||||
role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
@@ -1122,7 +1122,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
@@ -1168,7 +1168,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=rolerep['name'])
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
|
||||
try:
|
||||
return open_url(role_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(rolerep), validate_certs=self.validate_certs)
|
||||
@@ -1187,7 +1187,7 @@ class KeycloakAPI(object):
|
||||
if cid is None:
|
||||
self.module.fail_json(msg='Could not find client %s in realm %s'
|
||||
% (clientid, realm))
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name)
|
||||
role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
|
||||
try:
|
||||
return open_url(role_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
94
plugins/module_utils/rundeck.py
Normal file
94
plugins/module_utils/rundeck.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url, url_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def api_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via Rundeck API
|
||||
'''
|
||||
api_argument_spec = url_argument_spec()
|
||||
api_argument_spec.update(dict(
|
||||
url=dict(required=True, type="str"),
|
||||
api_version=dict(type="int", default=39),
|
||||
api_token=dict(required=True, type="str", no_log=True)
|
||||
))
|
||||
|
||||
return api_argument_spec
|
||||
|
||||
|
||||
def api_request(module, endpoint, data=None, method="GET"):
|
||||
"""Manages Rundeck API requests via HTTP(S)
|
||||
|
||||
:arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
|
||||
:arg endpoint: The API endpoint to be used.
|
||||
:kwarg data: The data to be sent (in case of POST/PUT).
|
||||
:kwarg method: "POST", "PUT", etc.
|
||||
|
||||
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
|
||||
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
|
||||
occurred then ``info['body']`` contains the error response data::
|
||||
|
||||
Example::
|
||||
|
||||
data={...}
|
||||
resp, info = fetch_url(module,
|
||||
"http://rundeck.example.org",
|
||||
data=module.jsonify(data),
|
||||
method="POST")
|
||||
status_code = info["status"]
|
||||
body = resp.read()
|
||||
if status_code >= 400 :
|
||||
body = info['body']
|
||||
"""
|
||||
|
||||
response, info = fetch_url(
|
||||
module=module,
|
||||
url="%s/api/%s/%s" % (
|
||||
module.params["url"],
|
||||
module.params["api_version"],
|
||||
endpoint
|
||||
),
|
||||
data=json.dumps(data),
|
||||
method=method,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"X-Rundeck-Auth-Token": module.params["api_token"]
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] == 403:
|
||||
module.fail_json(msg="Token authorization failed",
|
||||
execution_info=json.loads(info["body"]))
|
||||
if info["status"] == 409:
|
||||
module.fail_json(msg="Job executions limit reached",
|
||||
execution_info=json.loads(info["body"]))
|
||||
elif info["status"] >= 500:
|
||||
module.fail_json(msg="Rundeck API error",
|
||||
execution_info=json.loads(info["body"]))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
json_response = json.loads(content)
|
||||
return json_response, info
|
||||
except AttributeError as error:
|
||||
module.fail_json(msg="Rundeck API request error",
|
||||
exception=to_native(error),
|
||||
execution_info=info)
|
||||
except ValueError as error:
|
||||
module.fail_json(
|
||||
msg="No valid JSON response",
|
||||
exception=to_native(error),
|
||||
execution_info=content
|
||||
)
|
||||
@@ -23,14 +23,14 @@ options:
|
||||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- The architecture for the container (e.g. "x86_64" or "i686").
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the container (e.g. {"limits.cpu": "2"}).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
- If the container already exists and its "config" values in metadata
|
||||
obtained from GET /1.0/containers/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
||||
@@ -51,20 +51,20 @@ options:
|
||||
version_added: 3.7.0
|
||||
profiles:
|
||||
description:
|
||||
- Profile to be used by the container
|
||||
- Profile to be used by the container.
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the container
|
||||
(e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
|
||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the container is ephemeral (e.g. true or false).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
@@ -76,7 +76,7 @@ options:
|
||||
"protocol": "lxd",
|
||||
"alias": "ubuntu/xenial/amd64" }).'
|
||||
- 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
|
||||
- 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
|
||||
required: false
|
||||
type: dict
|
||||
state:
|
||||
@@ -152,10 +152,10 @@ options:
|
||||
trust_password:
|
||||
description:
|
||||
- The client trusted password.
|
||||
- You need to set this password on the LXD server before
|
||||
running this module using the following command.
|
||||
lxc config set core.trust_password <some random password>
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
|
||||
- 'You need to set this password on the LXD server before
|
||||
running this module using the following command:
|
||||
C(lxc config set core.trust_password <some random password>).
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
|
||||
- If trust_password is set, this module send a request for
|
||||
authentication before sending any requests.
|
||||
required: false
|
||||
|
||||
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
186
plugins/modules/cloud/misc/proxmox_tasks_info.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_tasks_info
|
||||
short_description: Retrieve information about one or more Proxmox VE tasks
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Retrieve information about one or more Proxmox VE tasks.
|
||||
author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
|
||||
options:
|
||||
node:
|
||||
description:
|
||||
- Node where to get tasks.
|
||||
required: true
|
||||
type: str
|
||||
task:
|
||||
description:
|
||||
- Return specific task.
|
||||
aliases: ['upid', 'name']
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
node: node01
|
||||
register: result
|
||||
|
||||
- name: Retrieve information about specific tasks on node01
|
||||
community.general.proxmox_task_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
api_password: '{{ password | default(omit) }}'
|
||||
api_token_id: '{{ token_id | default(omit) }}'
|
||||
api_token_secret: '{{ token_secret | default(omit) }}'
|
||||
task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
|
||||
node: node01
|
||||
register: proxmox_tasks
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
proxmox_tasks:
|
||||
description: List of tasks.
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
id:
|
||||
description: ID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
node:
|
||||
description: Node name.
|
||||
returned: on success
|
||||
type: str
|
||||
pid:
|
||||
description: PID of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
pstart:
|
||||
description: pastart of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
starttime:
|
||||
description: Starting time of the task.
|
||||
returned: on success
|
||||
type: int
|
||||
type:
|
||||
description: Type of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
upid:
|
||||
description: UPID of the task.
|
||||
returned: on success
|
||||
type: str
|
||||
user:
|
||||
description: User that owns the task.
|
||||
returned: on success
|
||||
type: str
|
||||
endtime:
|
||||
description: Endtime of the task.
|
||||
returned: on success, can be absent
|
||||
type: int
|
||||
status:
|
||||
description: Status of the task.
|
||||
returned: on success, can be absent
|
||||
type: str
|
||||
failed:
|
||||
description: If the task failed.
|
||||
returned: when status is defined
|
||||
type: bool
|
||||
msg:
|
||||
description: Short message.
|
||||
returned: on failure
|
||||
type: str
|
||||
sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
|
||||
|
||||
|
||||
class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
|
||||
def get_task(self, upid, node):
|
||||
tasks = self.get_tasks(node)
|
||||
for task in tasks:
|
||||
if task.info['upid'] == upid:
|
||||
return [task]
|
||||
|
||||
def get_tasks(self, node):
|
||||
tasks = self.proxmox_api.nodes(node).tasks.get()
|
||||
return [ProxmoxTask(task) for task in tasks]
|
||||
|
||||
|
||||
class ProxmoxTask:
|
||||
def __init__(self, task):
|
||||
self.info = dict()
|
||||
for k, v in task.items():
|
||||
if k == 'status' and isinstance(v, str):
|
||||
self.info[k] = v
|
||||
if v != 'OK':
|
||||
self.info['failed'] = True
|
||||
else:
|
||||
self.info[k] = v
|
||||
|
||||
|
||||
def proxmox_task_info_argument_spec():
|
||||
return dict(
|
||||
task=dict(type='str', aliases=['upid', 'name'], required=False),
|
||||
node=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
task_info_args = proxmox_task_info_argument_spec()
|
||||
module_args.update(task_info_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret'),
|
||||
('api_user', 'api_password')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
supports_check_mode=True)
|
||||
result = dict(changed=False)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib(
|
||||
'proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
proxmox = ProxmoxTaskInfoAnsible(module)
|
||||
upid = module.params['task']
|
||||
node = module.params['node']
|
||||
if upid:
|
||||
tasks = proxmox.get_task(upid=upid, node=node)
|
||||
else:
|
||||
tasks = proxmox.get_tasks(node=node)
|
||||
if tasks is not None:
|
||||
result['proxmox_tasks'] = [task.info for task in tasks]
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
|
||||
upid, node)
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -137,6 +137,11 @@ options:
|
||||
type: bool
|
||||
default: false
|
||||
version_added: '3.3.0'
|
||||
parallelism:
|
||||
description:
|
||||
- Restrict concurrent operations when Terraform applies the plan.
|
||||
type: int
|
||||
version_added: '3.8.0'
|
||||
notes:
|
||||
- To just run a `terraform plan`, use check mode.
|
||||
requirements: [ "terraform" ]
|
||||
@@ -363,6 +368,7 @@ def main():
|
||||
init_reconfigure=dict(type='bool', default=False),
|
||||
overwrite_init=dict(type='bool', default=True),
|
||||
check_destroy=dict(type='bool', default=False),
|
||||
parallelism=dict(type='int'),
|
||||
),
|
||||
required_if=[('state', 'planned', ['plan_file'])],
|
||||
supports_check_mode=True,
|
||||
@@ -415,6 +421,9 @@ def main():
|
||||
elif state == 'absent':
|
||||
command.extend(DESTROY_ARGS)
|
||||
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
|
||||
@@ -195,7 +195,6 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
:param kc: Keycloak API access.
|
||||
:param config: Representation of the authentication flow including it's executions.
|
||||
:param realm: Realm
|
||||
:return: True if executions have been modified. False otherwise.
|
||||
:return: tuple (changed, dict(before, after)
|
||||
WHERE
|
||||
bool changed indicates if changes have been made
|
||||
@@ -235,10 +234,14 @@ def create_or_update_executions(kc, config, realm='master'):
|
||||
elif new_exec["providerId"] is not None:
|
||||
kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
elif new_exec["displayName"] is not None:
|
||||
kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm)
|
||||
exec_found = True
|
||||
exec_index = new_exec_index
|
||||
id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
|
||||
after += str(new_exec) + '\n'
|
||||
if exec_found:
|
||||
changed = True
|
||||
|
||||
@@ -295,6 +295,20 @@ EXAMPLES = '''
|
||||
clientAuthMethod: client_secret_post
|
||||
clientId: my-client
|
||||
clientSecret: secret
|
||||
syncMode: FORCE
|
||||
mappers:
|
||||
- name: first_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: first_name
|
||||
user.attribute: first_name
|
||||
syncMode: INHERIT
|
||||
- name: last_name
|
||||
identityProviderMapper: oidc-user-attribute-idp-mapper
|
||||
config:
|
||||
claim: last_name
|
||||
user.attribute: last_name
|
||||
syncMode: INHERIT
|
||||
|
||||
- name: Create SAML identity provider, authentication with credentials
|
||||
community.general.keycloak_identity_provider:
|
||||
@@ -313,6 +327,14 @@ EXAMPLES = '''
|
||||
singleSignOnServiceUrl: https://idp.example.com/login
|
||||
wantAuthnRequestsSigned: true
|
||||
wantAssertionsSigned: true
|
||||
mappers:
|
||||
- name: roles
|
||||
identityProviderMapper: saml-user-attribute-idp-mapper
|
||||
config:
|
||||
user.attribute: roles
|
||||
attribute.friendly.name: User Roles
|
||||
attribute.name: roles
|
||||
syncMode: INHERIT
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -400,15 +422,15 @@ end_state:
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize(idp):
|
||||
result = idp.copy()
|
||||
if 'config' in result:
|
||||
result['config'] = sanitize(result['config'])
|
||||
if 'clientSecret' in result:
|
||||
result['clientSecret'] = '**********'
|
||||
return result
|
||||
idpcopy = deepcopy(idp)
|
||||
if 'config' in idpcopy:
|
||||
if 'clientSecret' in idpcopy['config']:
|
||||
idpcopy['clientSecret'] = '**********'
|
||||
return idpcopy
|
||||
|
||||
|
||||
def get_identity_provider_with_mappers(kc, alias, realm):
|
||||
@@ -493,18 +515,29 @@ def main():
|
||||
changeset[camel(param)] = new_param_value
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
changeset['mappers'] = before_idp.get('mappers', list())
|
||||
if module.params.get('mappers') is not None:
|
||||
for new_mapper in module.params.get('mappers'):
|
||||
old_mapper = next((x for x in changeset['mappers'] if x['name'] == new_mapper['name']), None)
|
||||
new_mapper = dict((k, v) for k, v in new_mapper.items() if new_mapper[k] is not None)
|
||||
if old_mapper is not None:
|
||||
old_mapper.update(new_mapper)
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
|
||||
if before_idp == dict():
|
||||
old_mapper = dict()
|
||||
elif change.get('id') is not None:
|
||||
old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm)
|
||||
if old_mapper is None:
|
||||
old_mapper = dict()
|
||||
else:
|
||||
found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']]
|
||||
if len(found) == 1:
|
||||
old_mapper = found[0]
|
||||
else:
|
||||
old_mapper = dict()
|
||||
new_mapper = old_mapper.copy()
|
||||
new_mapper.update(change)
|
||||
if new_mapper != old_mapper:
|
||||
if changeset.get('mappers') is None:
|
||||
changeset['mappers'] = list()
|
||||
changeset['mappers'].append(new_mapper)
|
||||
# remove mappers if not present in module params
|
||||
changeset['mappers'] = [x for x in changeset['mappers']
|
||||
if [y for y in module.params.get('mappers', []) if y['name'] == x['name']] != []]
|
||||
|
||||
# prepare the new representation
|
||||
updated_idp = before_idp.copy()
|
||||
@@ -538,6 +571,8 @@ def main():
|
||||
mappers = updated_idp.pop('mappers', [])
|
||||
kc.create_identity_provider(updated_idp, realm)
|
||||
for mapper in mappers:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
after_idp = get_identity_provider_with_mappers(kc, alias, realm)
|
||||
|
||||
@@ -572,6 +607,8 @@ def main():
|
||||
if mapper.get('id') is not None:
|
||||
kc.update_identity_provider_mapper(mapper, alias, realm)
|
||||
else:
|
||||
if mapper.get('identityProviderAlias') is None:
|
||||
mapper['identityProviderAlias'] = alias
|
||||
kc.create_identity_provider_mapper(mapper, alias, realm)
|
||||
for mapper in [x for x in before_idp['mappers']
|
||||
if [y for y in updated_mappers if y["name"] == x['name']] == []]:
|
||||
|
||||
@@ -100,7 +100,8 @@ options:
|
||||
routing_rules4:
|
||||
description:
|
||||
- Is the same as in an C(ip route add) command, except always requires specifying a priority.
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 3.3.0
|
||||
never_default4:
|
||||
description:
|
||||
@@ -1470,6 +1471,7 @@ class Nmcli(object):
|
||||
elif setting in ('ipv4.dns',
|
||||
'ipv4.dns-search',
|
||||
'ipv4.routes',
|
||||
'ipv4.routing-rules',
|
||||
'ipv4.route-metric'
|
||||
'ipv6.dns',
|
||||
'ipv6.dns-search',
|
||||
@@ -1758,7 +1760,7 @@ def main():
|
||||
gw4_ignore_auto=dict(type='bool', default=False),
|
||||
routes4=dict(type='list', elements='str'),
|
||||
route_metric4=dict(type='int'),
|
||||
routing_rules4=dict(type='str'),
|
||||
routing_rules4=dict(type='list', elements='str'),
|
||||
never_default4=dict(type='bool', default=False),
|
||||
dns4=dict(type='list', elements='str'),
|
||||
dns4_search=dict(type='list', elements='str'),
|
||||
|
||||
@@ -125,6 +125,11 @@ options:
|
||||
- Sets the timeout in seconds for connection attempts.
|
||||
type: int
|
||||
default: 20
|
||||
ehlohost:
|
||||
description:
|
||||
- Allows for manual specification of host for EHLO.
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -189,6 +194,16 @@ EXAMPLES = r'''
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
|
||||
- name: Sending an e-mail using StartTLS, remote server, custom EHLO
|
||||
community.general.mail:
|
||||
host: some.smtp.host.tld
|
||||
port: 25
|
||||
ehlohost: my-resolvable-hostname.tld
|
||||
to: John Smith <john.smith@example.com>
|
||||
subject: Ansible-report
|
||||
body: System {{ ansible_hostname }} has been successfully provisioned.
|
||||
secure: starttls
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -215,6 +230,7 @@ def main():
|
||||
password=dict(type='str', no_log=True),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=25),
|
||||
ehlohost=dict(type='str', default=None),
|
||||
sender=dict(type='str', default='root', aliases=['from']),
|
||||
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
|
||||
cc=dict(type='list', elements='str', default=[]),
|
||||
@@ -235,6 +251,7 @@ def main():
|
||||
password = module.params.get('password')
|
||||
host = module.params.get('host')
|
||||
port = module.params.get('port')
|
||||
local_hostname = module.params.get('ehlohost')
|
||||
sender = module.params.get('sender')
|
||||
recipients = module.params.get('to')
|
||||
copies = module.params.get('cc')
|
||||
@@ -259,9 +276,9 @@ def main():
|
||||
if secure != 'never':
|
||||
try:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP_SSL(timeout=timeout)
|
||||
smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
secure_state = True
|
||||
except ssl.SSLError as e:
|
||||
@@ -273,9 +290,9 @@ def main():
|
||||
|
||||
if not secure_state:
|
||||
if PY3:
|
||||
smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
|
||||
smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
|
||||
else:
|
||||
smtp = smtplib.SMTP(timeout=timeout)
|
||||
smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout)
|
||||
code, smtpmessage = smtp.connect(host, port)
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
|
||||
282
plugins/modules/packaging/language/pipx.py
Normal file
282
plugins/modules/packaging/language/pipx.py
Normal file
@@ -0,0 +1,282 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2021, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: pipx
|
||||
short_description: Manages applications installed with pipx
|
||||
version_added: 3.8.0
|
||||
description:
|
||||
- Manage Python applications installed in isolated virtualenvs using pipx.
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all]
|
||||
default: install
|
||||
description:
|
||||
- Desired state for the application.
|
||||
- The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively.
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
The name of the application to be installed. It must to be a simple package name.
|
||||
For passing package specifications or installing from URLs or directories,
|
||||
please use the I(source) option.
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
- >
|
||||
If the application source, such as a package with version specifier, or an URL,
|
||||
directory or any other accepted specification. See C(pipx) documentation for more details.
|
||||
- When specified, the C(pipx) command will use I(source) instead of I(name).
|
||||
install_deps:
|
||||
description:
|
||||
- Include applications of dependent packages.
|
||||
- Only used when I(state=install) or I(state=upgrade).
|
||||
type: bool
|
||||
default: false
|
||||
inject_packages:
|
||||
description:
|
||||
- Packages to be injected into an existing virtual environment.
|
||||
- Only used when I(state=inject).
|
||||
type: list
|
||||
elements: str
|
||||
force:
|
||||
description:
|
||||
- Force modification of the application's virtual environment. See C(pipx) for details.
|
||||
- Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject).
|
||||
type: bool
|
||||
default: false
|
||||
include_injected:
|
||||
description:
|
||||
- Upgrade the injected packages along with the application.
|
||||
- Only used when I(state=upgrade) or I(state=upgrade_all).
|
||||
type: bool
|
||||
default: false
|
||||
index_url:
|
||||
description:
|
||||
- Base URL of Python Package Index.
|
||||
- Only used when I(state=install), I(state=upgrade), or I(state=inject).
|
||||
type: str
|
||||
python:
|
||||
description:
|
||||
- Python version to be used when creating the application virtual environment. Must be 3.6+.
|
||||
- Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all).
|
||||
type: str
|
||||
executable:
|
||||
description:
|
||||
- Path to the C(pipx) installed in the system.
|
||||
- >
|
||||
If not specified, the module will use C(python -m pipx) to run the tool,
|
||||
using the same Python interpreter as ansible itself.
|
||||
type: path
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
Hence, when using version operators, C(pipx) module will always try to execute the operation,
|
||||
even when the application was previously installed.
|
||||
This feature will be added in the future.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
|
||||
- name: Install tox from git repository
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
source: git+https://github.com/tox-dev/tox.git
|
||||
|
||||
- name: Upgrade tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: upgrade
|
||||
|
||||
- name: Reinstall black with specific Python version
|
||||
community.general.pipx:
|
||||
name: black
|
||||
state: reinstall
|
||||
python: 3.7
|
||||
|
||||
- name: Uninstall pycowsay
|
||||
community.general.pipx:
|
||||
name: pycowsay
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import (
|
||||
CmdStateModuleHelper, ArgFormat, ModuleHelperException
|
||||
)
|
||||
from ansible.module_utils.facts.compat import ansible_facts
|
||||
|
||||
|
||||
_state_map = dict(
|
||||
present='install',
|
||||
absent='uninstall',
|
||||
uninstall_all='uninstall-all',
|
||||
upgrade_all='upgrade-all',
|
||||
reinstall_all='reinstall-all',
|
||||
)
|
||||
|
||||
|
||||
class PipX(CmdStateModuleHelper):
|
||||
output_params = ['name', 'source', 'index_url', 'force', 'installdeps']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='install',
|
||||
choices=[
|
||||
'present', 'absent', 'install', 'uninstall', 'uninstall_all',
|
||||
'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all']),
|
||||
name=dict(type='str'),
|
||||
source=dict(type='str'),
|
||||
install_deps=dict(type='bool', default=False),
|
||||
inject_packages=dict(type='list', elements='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
executable=dict(type='path')
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['name']),
|
||||
('state', 'install', ['name']),
|
||||
('state', 'absent', ['name']),
|
||||
('state', 'uninstall', ['name']),
|
||||
('state', 'inject', ['name', 'inject_packages']),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
command_args_formats = dict(
|
||||
state=dict(fmt=lambda v: [_state_map.get(v, v)]),
|
||||
name_source=dict(fmt=lambda n, s: [s] if s else [n], stars=1),
|
||||
install_deps=dict(fmt="--install-deps", style=ArgFormat.BOOLEAN),
|
||||
inject_packages=dict(fmt=lambda v: v),
|
||||
force=dict(fmt="--force", style=ArgFormat.BOOLEAN),
|
||||
include_injected=dict(fmt="--include-injected", style=ArgFormat.BOOLEAN),
|
||||
index_url=dict(fmt=('--index-url', '{0}'),),
|
||||
python=dict(fmt=('--python', '{0}'),),
|
||||
_list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN),
|
||||
)
|
||||
check_rc = True
|
||||
|
||||
def _retrieve_installed(self):
|
||||
def process_list(rc, out, err):
|
||||
if not out:
|
||||
return {}
|
||||
|
||||
results = {}
|
||||
raw_data = json.loads(out)
|
||||
for venv_name, venv in raw_data['venvs'].items():
|
||||
results[venv_name] = {
|
||||
'version': venv['metadata']['main_package']['package_version'],
|
||||
'injected': dict(
|
||||
(k, v['package_version']) for k, v in venv['metadata']['injected_packages']
|
||||
),
|
||||
}
|
||||
return results
|
||||
|
||||
installed = self.run_command(params=[{'_list': True}], process_output=process_list,
|
||||
publish_rc=False, publish_out=False, publish_err=False)
|
||||
|
||||
if self.vars.name is not None:
|
||||
app_list = installed.get(self.vars.name)
|
||||
if app_list:
|
||||
return {self.vars.name: app_list}
|
||||
else:
|
||||
return {}
|
||||
|
||||
return installed
|
||||
|
||||
def __init_module__(self):
|
||||
if self.vars.executable:
|
||||
self.command = [self.vars.executable]
|
||||
else:
|
||||
facts = ansible_facts(self.module, gather_subset=['python'])
|
||||
self.command = [facts['python']['executable'], '-m', 'pipx']
|
||||
|
||||
self.vars.set('will_change', False, output=False, change=True)
|
||||
self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
|
||||
|
||||
def __quit_module__(self):
|
||||
self.vars.application = self._retrieve_installed()
|
||||
|
||||
def state_install(self):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'python',
|
||||
{'name_source': [self.vars.name, self.vars.source]}])
|
||||
|
||||
state_present = state_install
|
||||
|
||||
def state_upgrade(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to upgrade a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'name'])
|
||||
|
||||
def state_uninstall(self):
|
||||
if self.vars.application and not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name'])
|
||||
|
||||
state_absent = state_uninstall
|
||||
|
||||
def state_reinstall(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to reinstall a non-existent application: {0}".format(self.vars.name))
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name', 'python'])
|
||||
|
||||
def state_inject(self):
|
||||
if not self.vars.application:
|
||||
raise ModuleHelperException(
|
||||
"Trying to inject packages into a non-existent application: {0}".format(self.vars.name))
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'force', 'name', 'inject_packages'])
|
||||
|
||||
def state_uninstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state'])
|
||||
|
||||
def state_reinstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'python'])
|
||||
|
||||
def state_upgrade_all(self):
|
||||
if self.vars.force:
|
||||
self.vars.will_change = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'include_injected', 'force'])
|
||||
|
||||
|
||||
def main():
|
||||
PipX.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -120,7 +120,7 @@ def selfupdate(module, port_path):
|
||||
changed = False
|
||||
msg = "Macports already up-to-date"
|
||||
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
|
||||
|
||||
@@ -134,11 +134,11 @@ def upgrade(module, port_path):
|
||||
if out.strip() == "Nothing to upgrade.":
|
||||
changed = False
|
||||
msg = "Ports already upgraded"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
elif rc == 0:
|
||||
changed = True
|
||||
msg = "Outdated ports upgraded successfully"
|
||||
return (changed, msg)
|
||||
return (changed, msg, out, err)
|
||||
else:
|
||||
module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
|
||||
|
||||
@@ -165,7 +165,7 @@ def query_port(module, port_path, name, state="present"):
|
||||
return False
|
||||
|
||||
|
||||
def remove_ports(module, port_path, ports):
|
||||
def remove_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Uninstalls one or more ports if installed. """
|
||||
|
||||
remove_c = 0
|
||||
@@ -176,20 +176,21 @@ def remove_ports(module, port_path, ports):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
|
||||
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already absent")
|
||||
module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def install_ports(module, port_path, ports, variant):
|
||||
def install_ports(module, port_path, ports, variant, stdout, stderr):
|
||||
""" Installs one or more ports if not already installed. """
|
||||
|
||||
install_c = 0
|
||||
@@ -199,66 +200,70 @@ def install_ports(module, port_path, ports, variant):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
|
||||
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already present")
|
||||
module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def activate_ports(module, port_path, ports):
|
||||
def activate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Activate a port if it's inactive. """
|
||||
|
||||
activate_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s activate %s" % (port_path, port))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
activate_c += 1
|
||||
|
||||
if activate_c > 0:
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
|
||||
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already active")
|
||||
module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def deactivate_ports(module, port_path, ports):
|
||||
def deactivate_ports(module, port_path, ports, stdout, stderr):
|
||||
""" Deactivate a port if it's active. """
|
||||
|
||||
deactivated_c = 0
|
||||
|
||||
for port in ports:
|
||||
if not query_port(module, port_path, port):
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
|
||||
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
|
||||
|
||||
if not query_port(module, port_path, port, state="active"):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
|
||||
|
||||
stdout += out
|
||||
stderr += err
|
||||
if query_port(module, port_path, port, state="active"):
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
|
||||
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
|
||||
|
||||
deactivated_c += 1
|
||||
|
||||
if deactivated_c > 0:
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
|
||||
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive")
|
||||
module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -272,35 +277,42 @@ def main():
|
||||
)
|
||||
)
|
||||
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
|
||||
|
||||
p = module.params
|
||||
|
||||
if p["selfupdate"]:
|
||||
(changed, msg) = selfupdate(module, port_path)
|
||||
(changed, msg, out, err) = selfupdate(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not (p["name"] or p["upgrade"]):
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
if p["upgrade"]:
|
||||
(changed, msg) = upgrade(module, port_path)
|
||||
(changed, msg, out, err) = upgrade(module, port_path)
|
||||
stdout += out
|
||||
stderr += err
|
||||
if not p["name"]:
|
||||
module.exit_json(changed=changed, msg=msg)
|
||||
module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
|
||||
|
||||
pkgs = p["name"]
|
||||
|
||||
variant = p["variant"]
|
||||
|
||||
if p["state"] in ["present", "installed"]:
|
||||
install_ports(module, port_path, pkgs, variant)
|
||||
install_ports(module, port_path, pkgs, variant, stdout, stderr)
|
||||
|
||||
elif p["state"] in ["absent", "removed"]:
|
||||
remove_ports(module, port_path, pkgs)
|
||||
remove_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "active":
|
||||
activate_ports(module, port_path, pkgs)
|
||||
activate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
elif p["state"] == "inactive":
|
||||
deactivate_ports(module, port_path, pkgs)
|
||||
deactivate_ports(module, port_path, pkgs, stdout, stderr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -134,6 +134,7 @@ EXAMPLES = '''
|
||||
'''
|
||||
|
||||
|
||||
from collections import defaultdict
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -226,7 +227,8 @@ def remove_packages(module, pkgng_path, packages, dir_arg):
|
||||
|
||||
|
||||
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
|
||||
install_c = 0
|
||||
action_queue = defaultdict(list)
|
||||
action_count = defaultdict(int)
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
@@ -263,29 +265,48 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, sta
|
||||
if already_installed and state == "present":
|
||||
continue
|
||||
|
||||
update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
if not update_available and already_installed and state == "latest":
|
||||
if (
|
||||
already_installed and state == "latest"
|
||||
and not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
):
|
||||
continue
|
||||
|
||||
if not module.check_mode:
|
||||
if already_installed:
|
||||
action = "upgrade"
|
||||
else:
|
||||
action = "install"
|
||||
if already_installed:
|
||||
action_queue["upgrade"].append(package)
|
||||
else:
|
||||
action_queue["install"].append(package)
|
||||
|
||||
if not module.check_mode:
|
||||
# install/upgrade all named packages with one pkg command
|
||||
for (action, package_list) in action_queue.items():
|
||||
packages = ' '.join(package_list)
|
||||
if old_pkgng:
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, packages))
|
||||
else:
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
|
||||
rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, packages))
|
||||
stdout += out
|
||||
stderr += err
|
||||
|
||||
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
|
||||
module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
|
||||
# individually verify packages are in requested state
|
||||
for package in package_list:
|
||||
verified = False
|
||||
if action == 'install':
|
||||
verified = query_package(module, pkgng_path, package, dir_arg)
|
||||
elif action == 'upgrade':
|
||||
verified = not query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
|
||||
|
||||
install_c += 1
|
||||
if verified:
|
||||
action_count[action] += 1
|
||||
else:
|
||||
module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr)
|
||||
|
||||
if install_c > 0:
|
||||
return (True, "added %s package(s)" % (install_c), stdout, stderr)
|
||||
if sum(action_count.values()) > 0:
|
||||
past_tense = {'install': 'installed', 'upgrade': 'upgraded'}
|
||||
messages = []
|
||||
for (action, count) in action_count.items():
|
||||
messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else ""))
|
||||
|
||||
return (True, '; '.join(messages), stdout, stderr)
|
||||
|
||||
return (False, "package(s) already %s" % (state), stdout, stderr)
|
||||
|
||||
|
||||
@@ -137,6 +137,10 @@ from distutils.version import LooseVersion
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six.moves import configparser, StringIO
|
||||
from io import open
|
||||
|
||||
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
|
||||
|
||||
@@ -382,12 +386,62 @@ def main():
|
||||
if not alias and state == "present":
|
||||
module.fail_json(msg='Name required when adding non-repo files.')
|
||||
|
||||
# Download / Open and parse .repo file to ensure idempotency
|
||||
if repo and repo.endswith('.repo'):
|
||||
if repo.startswith(('http://', 'https://')):
|
||||
response, info = fetch_url(module=module, url=repo, force=True)
|
||||
if not response or info['status'] != 200:
|
||||
module.fail_json(msg='Error downloading .repo file from provided URL')
|
||||
repofile_text = to_text(response.read(), errors='surrogate_or_strict')
|
||||
else:
|
||||
try:
|
||||
with open(repo, encoding='utf-8') as file:
|
||||
repofile_text = file.read()
|
||||
except IOError:
|
||||
module.fail_json(msg='Error opening .repo file from provided path')
|
||||
|
||||
repofile = configparser.ConfigParser()
|
||||
try:
|
||||
repofile.readfp(StringIO(repofile_text))
|
||||
except configparser.Error:
|
||||
module.fail_json(msg='Invalid format, .repo file could not be parsed')
|
||||
|
||||
# No support for .repo file with zero or more than one repository
|
||||
if len(repofile.sections()) != 1:
|
||||
err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections())
|
||||
module.fail_json(msg=err)
|
||||
|
||||
section = repofile.sections()[0]
|
||||
repofile_items = dict(repofile.items(section))
|
||||
# Only proceed if at least baseurl is available
|
||||
if 'baseurl' not in repofile_items:
|
||||
module.fail_json(msg='No baseurl found in .repo file')
|
||||
|
||||
# Set alias (name) and url based on values from .repo file
|
||||
alias = section
|
||||
repodata['alias'] = section
|
||||
repodata['url'] = repofile_items['baseurl']
|
||||
|
||||
# If gpgkey is part of the .repo file, auto import key
|
||||
if 'gpgkey' in repofile_items:
|
||||
auto_import_keys = True
|
||||
|
||||
# Map additional values, if available
|
||||
if 'name' in repofile_items:
|
||||
repodata['name'] = repofile_items['name']
|
||||
if 'enabled' in repofile_items:
|
||||
repodata['enabled'] = repofile_items['enabled']
|
||||
if 'autorefresh' in repofile_items:
|
||||
repodata['autorefresh'] = repofile_items['autorefresh']
|
||||
if 'gpgcheck' in repofile_items:
|
||||
repodata['gpgcheck'] = repofile_items['gpgcheck']
|
||||
|
||||
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
|
||||
|
||||
if repo:
|
||||
shortname = repo
|
||||
else:
|
||||
if alias:
|
||||
shortname = alias
|
||||
else:
|
||||
shortname = repo
|
||||
|
||||
if state == 'present':
|
||||
if exists and not mod:
|
||||
|
||||
1
plugins/modules/pipx.py
Symbolic link
1
plugins/modules/pipx.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./packaging/language/pipx.py
|
||||
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
1
plugins/modules/proxmox_tasks_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
cloud/misc/proxmox_tasks_info.py
|
||||
@@ -168,7 +168,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data }}"
|
||||
|
||||
- name: Get Lenovo FoD key collection resource via GetCollectionResource command
|
||||
@@ -180,7 +182,9 @@ EXAMPLES = '''
|
||||
password: "{{ password }}"
|
||||
resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.data_list }}"
|
||||
|
||||
- name: Update ComputeSystem property AssetTag via PatchResource command
|
||||
|
||||
@@ -67,7 +67,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
|
||||
|
||||
- name: Get CPU model
|
||||
@@ -78,7 +80,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
|
||||
|
||||
- name: Get memory inventory
|
||||
@@ -108,7 +112,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Volume Inventory
|
||||
@@ -119,7 +125,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Session information
|
||||
@@ -130,7 +137,9 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
|
||||
|
||||
- name: Get default inventory information
|
||||
@@ -139,7 +148,8 @@ EXAMPLES = '''
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- ansible.builtin.debug:
|
||||
- name: Print fetched information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.redfish_facts | to_nice_json }}"
|
||||
|
||||
- name: Get several inventories
|
||||
|
||||
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
1
plugins/modules/rundeck_job_executions_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_executions_info.py
|
||||
1
plugins/modules/rundeck_job_run.py
Symbolic link
1
plugins/modules/rundeck_job_run.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/rundeck_job_run.py
|
||||
@@ -211,7 +211,7 @@ class GitLabDeployKey(object):
|
||||
@param key_title Title of the key
|
||||
'''
|
||||
def findDeployKey(self, project, key_title):
|
||||
deployKeys = project.keys.list()
|
||||
deployKeys = project.keys.list(all=True)
|
||||
for deployKey in deployKeys:
|
||||
if (deployKey.title == key_title):
|
||||
return deployKey
|
||||
|
||||
@@ -206,10 +206,11 @@ class GitLabGroup(object):
|
||||
'project_creation_level': options['project_creation_level'],
|
||||
'auto_devops_enabled': options['auto_devops_enabled'],
|
||||
'subgroup_creation_level': options['subgroup_creation_level'],
|
||||
'require_two_factor_authentication': options['require_two_factor_authentication'],
|
||||
}
|
||||
if options.get('description'):
|
||||
payload['description'] = options['description']
|
||||
if options.get('require_two_factor_authentication'):
|
||||
payload['require_two_factor_authentication'] = options['require_two_factor_authentication']
|
||||
group = self.createGroup(payload)
|
||||
changed = True
|
||||
else:
|
||||
|
||||
@@ -179,9 +179,13 @@ class GitLabGroup(object):
|
||||
|
||||
# get group id if group exists
|
||||
def get_group_id(self, gitlab_group):
|
||||
group_exists = self._gitlab.groups.list(search=gitlab_group)
|
||||
if group_exists:
|
||||
return group_exists[0].id
|
||||
groups = self._gitlab.groups.list(search=gitlab_group)
|
||||
for group in groups:
|
||||
if group.full_path == gitlab_group:
|
||||
return group.id
|
||||
for group in groups:
|
||||
if group.path == gitlab_group or group.name == gitlab_group:
|
||||
return group.id
|
||||
|
||||
# get all members in a group
|
||||
def get_members_in_a_group(self, gitlab_group_id):
|
||||
|
||||
@@ -41,17 +41,27 @@ options:
|
||||
aliases: [ state ]
|
||||
node_auth:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.authmethod).
|
||||
- The value for C(node.session.auth.authmethod).
|
||||
type: str
|
||||
default: CHAP
|
||||
node_user:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.username).
|
||||
- The value for C(node.session.auth.username).
|
||||
type: str
|
||||
node_pass:
|
||||
description:
|
||||
- The value for C(discovery.sendtargets.auth.password).
|
||||
- The value for C(node.session.auth.password).
|
||||
type: str
|
||||
node_user_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.username_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
node_pass_in:
|
||||
description:
|
||||
- The value for C(node.session.auth.password_in).
|
||||
type: str
|
||||
version_added: 3.8.0
|
||||
auto_node_startup:
|
||||
description:
|
||||
- Whether the target node should be automatically connected at startup.
|
||||
@@ -191,6 +201,8 @@ def target_login(module, target, portal=None, port=None):
|
||||
node_auth = module.params['node_auth']
|
||||
node_user = module.params['node_user']
|
||||
node_pass = module.params['node_pass']
|
||||
node_user_in = module.params['node_user_in']
|
||||
node_pass_in = module.params['node_pass_in']
|
||||
|
||||
if node_user:
|
||||
params = [('node.session.auth.authmethod', node_auth),
|
||||
@@ -200,6 +212,13 @@ def target_login(module, target, portal=None, port=None):
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value]
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
if node_user_in:
|
||||
params = [('node.session.auth.username_in', node_user_in),
|
||||
('node.session.auth.password_in', node_pass_in)]
|
||||
for (name, value) in params:
|
||||
cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login']
|
||||
if portal is not None and port is not None:
|
||||
cmd.append('--portal')
|
||||
@@ -277,6 +296,8 @@ def main():
|
||||
node_auth=dict(type='str', default='CHAP'),
|
||||
node_user=dict(type='str'),
|
||||
node_pass=dict(type='str', no_log=True),
|
||||
node_user_in=dict(type='str'),
|
||||
node_pass_in=dict(type='str', no_log=True),
|
||||
|
||||
# actions
|
||||
login=dict(type='bool', aliases=['state']),
|
||||
@@ -286,7 +307,7 @@ def main():
|
||||
show_nodes=dict(type='bool', default=False),
|
||||
),
|
||||
|
||||
required_together=[['node_user', 'node_pass']],
|
||||
required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']],
|
||||
required_if=[('discover', True, ['portal'])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
@@ -54,6 +54,8 @@ options:
|
||||
description:
|
||||
- Insert the corresponding rule as rule number NUM.
|
||||
- Note that ufw numbers rules starting with 1.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: int
|
||||
insert_relative_to:
|
||||
description:
|
||||
@@ -120,6 +122,8 @@ options:
|
||||
delete:
|
||||
description:
|
||||
- Delete rule.
|
||||
- If I(delete=true) and a value is provided for I(insert),
|
||||
then I(insert) is ignored.
|
||||
type: bool
|
||||
default: false
|
||||
interface:
|
||||
@@ -511,12 +515,12 @@ def main():
|
||||
'interface_in and interface_out')
|
||||
# Rules are constructed according to the long format
|
||||
#
|
||||
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
|
||||
# [proto protocol] [app application] [comment COMMENT]
|
||||
cmd.append([module.boolean(params['route']), 'route'])
|
||||
cmd.append([module.boolean(params['delete']), 'delete'])
|
||||
if params['insert'] is not None:
|
||||
if params['insert'] is not None and not params['delete']:
|
||||
relative_to_cmd = params['insert_relative_to']
|
||||
if relative_to_cmd == 'zero':
|
||||
insert_to = params['insert']
|
||||
|
||||
@@ -142,7 +142,7 @@ def main():
|
||||
# Clean up old failed deployment
|
||||
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
|
||||
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
if is_failed(deploy_path, deployment):
|
||||
@@ -153,7 +153,7 @@ def main():
|
||||
if state == 'present' and deployed:
|
||||
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
|
||||
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
|
||||
shutil.copyfile(src, os.path.join(deploy_path, deployment))
|
||||
module.preserved_copy(src, os.path.join(deploy_path, deployment))
|
||||
deployed = False
|
||||
while not deployed:
|
||||
deployed = is_deployed(deploy_path, deployment)
|
||||
|
||||
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rundeck_job_executions_info
|
||||
short_description: Query executions for a Rundeck job
|
||||
description:
|
||||
- This module gets the list of executions for a specified Rundeck job.
|
||||
author: "Phillipe Smith (@phsmith)"
|
||||
version_added: 3.8.0
|
||||
options:
|
||||
job_id:
|
||||
type: str
|
||||
description:
|
||||
- The job unique ID.
|
||||
required: true
|
||||
status:
|
||||
type: str
|
||||
description:
|
||||
- The job status to filter.
|
||||
choices: [succeeded, failed, aborted, running]
|
||||
max:
|
||||
type: int
|
||||
description:
|
||||
- Max results to return.
|
||||
default: 20
|
||||
offset:
|
||||
type: int
|
||||
description:
|
||||
- The start point to return the results.
|
||||
default: 0
|
||||
extends_documentation_fragment:
|
||||
- community.general.rundeck
|
||||
- url
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Rundeck job executions info
|
||||
community.general.rundeck_job_executions_info:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
register: rundeck_job_executions_info
|
||||
|
||||
- name: Show Rundeck job executions info
|
||||
ansible.builtin.debug:
|
||||
var: rundeck_job_executions_info.executions
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
paging:
|
||||
description: Results pagination info.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
count:
|
||||
description: Number of results in the response.
|
||||
type: int
|
||||
returned: success
|
||||
total:
|
||||
description: Total number of results.
|
||||
type: int
|
||||
returned: success
|
||||
offset:
|
||||
description: Offset from first of all results.
|
||||
type: int
|
||||
returned: success
|
||||
max:
|
||||
description: Maximum number of results per page.
|
||||
type: int
|
||||
returned: success
|
||||
sample: {
|
||||
"count": 20,
|
||||
"total": 100,
|
||||
"offset": 0,
|
||||
"max": 20
|
||||
}
|
||||
executions:
|
||||
description: Job executions list.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"id": 1,
|
||||
"href": "https://rundeck.example.org/api/39/execution/1",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
|
||||
"status": "succeeded",
|
||||
"project": "myproject",
|
||||
"executionType": "user",
|
||||
"user": "admin",
|
||||
"date-started": {
|
||||
"unixtime": 1633525515026,
|
||||
"date": "2021-10-06T13:05:15Z"
|
||||
},
|
||||
"date-ended": {
|
||||
"unixtime": 1633525518386,
|
||||
"date": "2021-10-06T13:05:18Z"
|
||||
},
|
||||
"job": {
|
||||
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"averageDuration": 6381,
|
||||
"name": "Test",
|
||||
"group": "",
|
||||
"project": "myproject",
|
||||
"description": "",
|
||||
"options": {
|
||||
"exit_code": "0"
|
||||
},
|
||||
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
|
||||
},
|
||||
"description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
|
||||
"argstring": "-exit_code 0",
|
||||
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
# Modules import
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request
|
||||
)
|
||||
|
||||
|
||||
class RundeckJobExecutionsInfo(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = self.module.params["url"]
|
||||
self.api_version = self.module.params["api_version"]
|
||||
self.job_id = self.module.params["job_id"]
|
||||
self.offset = self.module.params["offset"]
|
||||
self.max = self.module.params["max"]
|
||||
self.status = self.module.params["status"] or ""
|
||||
|
||||
def job_executions(self):
|
||||
response, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="job/%s/executions?offset=%s&max=%s&status=%s"
|
||||
% (quote(self.job_id), self.offset, self.max, self.status),
|
||||
method="GET"
|
||||
)
|
||||
|
||||
if info["status"] != 200:
|
||||
self.module.fail_json(
|
||||
msg=info["msg"],
|
||||
executions=response
|
||||
)
|
||||
|
||||
self.module.exit_json(msg="Executions info result", **response)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
job_id=dict(required=True, type="str"),
|
||||
offset=dict(type="int", default=0),
|
||||
max=dict(type="int", default=20),
|
||||
status=dict(
|
||||
type="str",
|
||||
choices=["succeeded", "failed", "aborted", "running"]
|
||||
)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if module.params["api_version"] < 14:
|
||||
module.fail_json(msg="API version should be at least 14")
|
||||
|
||||
rundeck = RundeckJobExecutionsInfo(module)
|
||||
rundeck.job_executions()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
317
plugins/modules/web_infrastructure/rundeck_job_run.py
Normal file
317
plugins/modules/web_infrastructure/rundeck_job_run.py
Normal file
@@ -0,0 +1,317 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: rundeck_job_run
|
||||
short_description: Run a Rundeck job
|
||||
description:
|
||||
- This module runs a Rundeck job specified by ID.
|
||||
author: "Phillipe Smith (@phsmith)"
|
||||
version_added: 3.8.0
|
||||
options:
|
||||
job_id:
|
||||
type: str
|
||||
description:
|
||||
- The job unique ID.
|
||||
required: true
|
||||
job_options:
|
||||
type: dict
|
||||
description:
|
||||
- The job options for the steps.
|
||||
- Numeric values must be quoted.
|
||||
filter_nodes:
|
||||
type: str
|
||||
description:
|
||||
- Filter the nodes where the jobs must run.
|
||||
- See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax).
|
||||
run_at_time:
|
||||
type: str
|
||||
description:
|
||||
- Schedule the job execution to run at specific date and time.
|
||||
- ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00).
|
||||
loglevel:
|
||||
type: str
|
||||
description:
|
||||
- Log level configuration.
|
||||
choices: [debug, verbose, info, warn, error]
|
||||
default: info
|
||||
wait_execution:
|
||||
type: bool
|
||||
description:
|
||||
- Wait until the job finished the execution.
|
||||
default: true
|
||||
wait_execution_delay:
|
||||
type: int
|
||||
description:
|
||||
- Delay, in seconds, between job execution status check requests.
|
||||
default: 5
|
||||
wait_execution_timeout:
|
||||
type: int
|
||||
description:
|
||||
- Job execution wait timeout in seconds.
|
||||
- If the timeout is reached, the job will be aborted.
|
||||
- Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check.
|
||||
default: 120
|
||||
abort_on_timeout:
|
||||
type: bool
|
||||
description:
|
||||
- Send a job abort request if exceeded the I(wait_execution_timeout) specified.
|
||||
default: false
|
||||
extends_documentation_fragment:
|
||||
- community.general.rundeck
|
||||
- url
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Run a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Show execution info
|
||||
ansible.builtin.debug:
|
||||
var: rundeck_job_run.execution_info
|
||||
|
||||
- name: Run a Rundeck job with options
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
job_options:
|
||||
option_1: "value_1"
|
||||
option_2: "value_3"
|
||||
option_3: "value_3"
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Run a Rundeck job with timeout, delay between status check and abort on timeout
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
wait_execution_timeout: 30
|
||||
wait_execution_delay: 10
|
||||
abort_on_timeout: true
|
||||
register: rundeck_job_run
|
||||
|
||||
- name: Schedule a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
run_at_time: "2021-10-05T15:45:00-03:00"
|
||||
register: rundeck_job_schedule
|
||||
|
||||
- name: Fire-and-forget a Rundeck job
|
||||
community.general.rundeck_job_run:
|
||||
url: "https://rundeck.example.org"
|
||||
api_version: 39
|
||||
api_token: "mytoken"
|
||||
job_id: "xxxxxxxxxxxxxxxxx"
|
||||
wait_execution: false
|
||||
register: rundeck_job_run
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
execution_info:
|
||||
description: Rundeck job execution metadata.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"msg": "Job execution succeeded!",
|
||||
"execution_info": {
|
||||
"id": 1,
|
||||
"href": "https://rundeck.example.org/api/39/execution/1",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
|
||||
"status": "succeeded",
|
||||
"project": "myproject",
|
||||
"executionType": "user",
|
||||
"user": "admin",
|
||||
"date-started": {
|
||||
"unixtime": 1633449020784,
|
||||
"date": "2021-10-05T15:50:20Z"
|
||||
},
|
||||
"date-ended": {
|
||||
"unixtime": 1633449026358,
|
||||
"date": "2021-10-05T15:50:26Z"
|
||||
},
|
||||
"job": {
|
||||
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"averageDuration": 4917,
|
||||
"name": "Test",
|
||||
"group": "",
|
||||
"project": "myproject",
|
||||
"description": "",
|
||||
"options": {
|
||||
"exit_code": "0"
|
||||
},
|
||||
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
|
||||
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
|
||||
},
|
||||
"description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
|
||||
"argstring": "-exit_code 0",
|
||||
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
|
||||
"successfulNodes": [
|
||||
"localhost"
|
||||
],
|
||||
"output": "Test!"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
# Modules import
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from time import sleep
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
from ansible_collections.community.general.plugins.module_utils.rundeck import (
|
||||
api_argument_spec,
|
||||
api_request
|
||||
)
|
||||
|
||||
|
||||
class RundeckJobRun(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.url = self.module.params["url"]
|
||||
self.api_version = self.module.params["api_version"]
|
||||
self.job_id = self.module.params["job_id"]
|
||||
self.job_options = self.module.params["job_options"] or {}
|
||||
self.filter_nodes = self.module.params["filter_nodes"] or ""
|
||||
self.run_at_time = self.module.params["run_at_time"] or ""
|
||||
self.loglevel = self.module.params["loglevel"].upper()
|
||||
self.wait_execution = self.module.params['wait_execution']
|
||||
self.wait_execution_delay = self.module.params['wait_execution_delay']
|
||||
self.wait_execution_timeout = self.module.params['wait_execution_timeout']
|
||||
self.abort_on_timeout = self.module.params['abort_on_timeout']
|
||||
|
||||
for k, v in self.job_options.items():
|
||||
if not isinstance(v, str):
|
||||
self.module.exit_json(
|
||||
msg="Job option '%s' value must be a string" % k,
|
||||
execution_info={}
|
||||
)
|
||||
|
||||
def job_status_check(self, execution_id):
|
||||
response = dict()
|
||||
timeout = False
|
||||
due = datetime.now() + timedelta(seconds=self.wait_execution_timeout)
|
||||
|
||||
while not timeout:
|
||||
endpoint = "execution/%d" % execution_id
|
||||
response = api_request(module=self.module, endpoint=endpoint)[0]
|
||||
output = api_request(module=self.module,
|
||||
endpoint="execution/%d/output" % execution_id)
|
||||
log_output = "\n".join([x["log"] for x in output[0]["entries"]])
|
||||
response.update({"output": log_output})
|
||||
|
||||
if response["status"] == "aborted":
|
||||
break
|
||||
elif response["status"] == "scheduled":
|
||||
self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time,
|
||||
execution_info=response,
|
||||
changed=True)
|
||||
elif response["status"] == "failed":
|
||||
self.module.fail_json(msg="Job execution failed",
|
||||
execution_info=response)
|
||||
elif response["status"] == "succeeded":
|
||||
self.module.exit_json(msg="Job execution succeeded!",
|
||||
execution_info=response)
|
||||
|
||||
if datetime.now() >= due:
|
||||
timeout = True
|
||||
break
|
||||
|
||||
# Wait for 5s before continue
|
||||
sleep(self.wait_execution_delay)
|
||||
|
||||
response.update({"timed_out": timeout})
|
||||
return response
|
||||
|
||||
def job_run(self):
|
||||
response, info = api_request(
|
||||
module=self.module,
|
||||
endpoint="job/%s/run" % quote(self.job_id),
|
||||
method="POST",
|
||||
data={
|
||||
"loglevel": self.loglevel,
|
||||
"options": self.job_options,
|
||||
"runAtTime": self.run_at_time,
|
||||
"filter": self.filter_nodes
|
||||
}
|
||||
)
|
||||
|
||||
if info["status"] != 200:
|
||||
self.module.fail_json(msg=info["msg"])
|
||||
|
||||
if not self.wait_execution:
|
||||
self.module.exit_json(msg="Job run send successfully!",
|
||||
execution_info=response)
|
||||
|
||||
job_status = self.job_status_check(response["id"])
|
||||
|
||||
if job_status["timed_out"]:
|
||||
if self.abort_on_timeout:
|
||||
api_request(
|
||||
module=self.module,
|
||||
endpoint="execution/%s/abort" % response['id'],
|
||||
method="GET"
|
||||
)
|
||||
|
||||
abort_status = self.job_status_check(response["id"])
|
||||
|
||||
self.module.fail_json(msg="Job execution aborted due the timeout specified",
|
||||
execution_info=abort_status)
|
||||
|
||||
self.module.fail_json(msg="Job execution timed out",
|
||||
execution_info=job_status)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = api_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
job_id=dict(required=True, type="str"),
|
||||
job_options=dict(type="dict"),
|
||||
filter_nodes=dict(type="str"),
|
||||
run_at_time=dict(type="str"),
|
||||
wait_execution=dict(type="bool", default=True),
|
||||
wait_execution_delay=dict(type="int", default=5),
|
||||
wait_execution_timeout=dict(type="int", default=120),
|
||||
abort_on_timeout=dict(type="bool", default=False),
|
||||
loglevel=dict(
|
||||
type="str",
|
||||
choices=["debug", "verbose", "info", "warn", "error"],
|
||||
default="info"
|
||||
)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if module.params["api_version"] < 14:
|
||||
module.fail_json(msg="API version should be at least 14")
|
||||
|
||||
rundeck = RundeckJobRun(module)
|
||||
rundeck.job_run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -2,3 +2,4 @@ needs/root
|
||||
shippable/posix/group2
|
||||
destructive
|
||||
skip/aix
|
||||
skip/osx # FIXME
|
||||
|
||||
@@ -58,3 +58,40 @@
|
||||
"PLAY RECAP *********************************************************************",
|
||||
"testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
|
||||
]
|
||||
- name: Test to_yaml
|
||||
environment:
|
||||
ANSIBLE_NOCOLOR: 'true'
|
||||
ANSIBLE_FORCE_COLOR: 'false'
|
||||
ANSIBLE_STDOUT_CALLBACK: community.general.yaml
|
||||
playbook: |
|
||||
- hosts: testhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
data: |
|
||||
line 1
|
||||
line 2
|
||||
line 3
|
||||
tasks:
|
||||
- name: Test to_yaml
|
||||
debug:
|
||||
msg: "{{ '{{' }}'{{ '{{' }}'{{ '}}' }} data | to_yaml {{ '{{' }}'{{ '}}' }}'{{ '}}' }}"
|
||||
# The above should be: msg: "{{ data | to_yaml }}"
|
||||
# Unfortunately, the way Ansible handles templating, we need to do some funny 'escaping' tricks...
|
||||
expected_output: [
|
||||
"",
|
||||
"PLAY [testhost] ****************************************************************",
|
||||
"",
|
||||
"TASK [Test to_yaml] ************************************************************",
|
||||
"ok: [testhost] => ",
|
||||
" msg: |-",
|
||||
" 'line 1",
|
||||
" ",
|
||||
" line 2",
|
||||
" ",
|
||||
" line 3",
|
||||
" ",
|
||||
" '",
|
||||
"",
|
||||
"PLAY RECAP *********************************************************************",
|
||||
"testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
|
||||
]
|
||||
|
||||
@@ -32,54 +32,66 @@
|
||||
# that:
|
||||
# - upgrade_option_result.changed
|
||||
|
||||
- name: Install xz package using homebrew
|
||||
homebrew:
|
||||
name: xz
|
||||
state: present
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: xz_result
|
||||
- vars:
|
||||
package_name: gnu-tar
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- xz_result.changed
|
||||
block:
|
||||
- name: Make sure {{ package_name }} package is not installed
|
||||
homebrew:
|
||||
name: "{{ package_name }}"
|
||||
state: absent
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
|
||||
- name: Again install xz package using homebrew
|
||||
homebrew:
|
||||
name: xz
|
||||
state: present
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: xz_result
|
||||
- name: Install {{ package_name }} package using homebrew
|
||||
homebrew:
|
||||
name: "{{ package_name }}"
|
||||
state: present
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: package_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not xz_result.changed
|
||||
- assert:
|
||||
that:
|
||||
- package_result.changed
|
||||
|
||||
- name: Uninstall xz package using homebrew
|
||||
homebrew:
|
||||
name: xz
|
||||
state: absent
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: xz_result
|
||||
- name: Again install {{ package_name }} package using homebrew
|
||||
homebrew:
|
||||
name: "{{ package_name }}"
|
||||
state: present
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: package_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- xz_result.changed
|
||||
- assert:
|
||||
that:
|
||||
- not package_result.changed
|
||||
|
||||
- name: Again uninstall xz package using homebrew
|
||||
homebrew:
|
||||
name: xz
|
||||
state: absent
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: xz_result
|
||||
- name: Uninstall {{ package_name }} package using homebrew
|
||||
homebrew:
|
||||
name: "{{ package_name }}"
|
||||
state: absent
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: package_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not xz_result.changed
|
||||
- assert:
|
||||
that:
|
||||
- package_result.changed
|
||||
|
||||
- name: Again uninstall {{ package_name }} package using homebrew
|
||||
homebrew:
|
||||
name: "{{ package_name }}"
|
||||
state: absent
|
||||
update_homebrew: no
|
||||
become: yes
|
||||
become_user: "{{ brew_stat.stat.pw_name }}"
|
||||
register: package_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not package_result.changed
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
shippable/posix/group1
|
||||
destructive
|
||||
skip/aix
|
||||
skip/osx # FIXME
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
# This would require either dumping the content, or registering async task output
|
||||
- name: Start test smtpserver
|
||||
shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465'
|
||||
async: 30
|
||||
async: 45
|
||||
poll: 0
|
||||
register: smtpserver
|
||||
|
||||
@@ -88,3 +88,13 @@
|
||||
- fail:
|
||||
msg: Send mail using TLS failed.
|
||||
when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded
|
||||
|
||||
- name: Send a test-mail with body, specific recipient and specific ehlohost
|
||||
mail:
|
||||
port: 10025
|
||||
ehlohost: some.domain.tld
|
||||
from: ansible@localhost
|
||||
to: root@localhost
|
||||
subject: Test mail 6 (smtp + body + ehlohost)
|
||||
body: Test body 6
|
||||
secure: never
|
||||
|
||||
4
tests/integration/targets/pipx/aliases
Normal file
4
tests/integration/targets/pipx/aliases
Normal file
@@ -0,0 +1,4 @@
|
||||
destructive
|
||||
shippable/posix/group2
|
||||
skip/python2
|
||||
skip/python3.5
|
||||
92
tests/integration/targets/pipx/tasks/main.yml
Normal file
92
tests/integration/targets/pipx/tasks/main.yml
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
- name: install pipx
|
||||
pip:
|
||||
name: pipx
|
||||
extra_args: --user
|
||||
|
||||
##############################################################################
|
||||
- name: ensure application tox is uninstalled
|
||||
community.general.pipx:
|
||||
state: absent
|
||||
name: tox
|
||||
register: uninstall_tox
|
||||
|
||||
- name: install application tox
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
register: install_tox
|
||||
|
||||
- name: install application tox again
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
register: install_tox_again
|
||||
ignore_errors: yes
|
||||
|
||||
- name: install application tox again force
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
force: yes
|
||||
register: install_tox_again_force
|
||||
|
||||
- name: uninstall application tox
|
||||
community.general.pipx:
|
||||
state: absent
|
||||
name: tox
|
||||
register: uninstall_tox
|
||||
|
||||
- name: check assertions tox
|
||||
assert:
|
||||
that:
|
||||
- install_tox is changed
|
||||
- "'tox' in install_tox.application"
|
||||
- install_tox_again is not changed
|
||||
- install_tox_again_force is changed
|
||||
- uninstall_tox is changed
|
||||
- "'tox' not in uninstall_tox.application"
|
||||
|
||||
##############################################################################
|
||||
- name: install application tox 3.24.0
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
source: tox==3.24.0
|
||||
register: install_tox_324
|
||||
|
||||
- name: reinstall tox 3.24.0
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: reinstall
|
||||
register: reinstall_tox_324
|
||||
|
||||
- name: upgrade tox 3.24.0
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: upgrade
|
||||
register: upgrade_tox_324
|
||||
|
||||
- name: downgrade tox 3.24.0
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
source: tox==3.24.0
|
||||
force: yes
|
||||
register: downgrade_tox_324
|
||||
|
||||
- name: cleanup tox 3.24.0
|
||||
community.general.pipx:
|
||||
state: absent
|
||||
name: tox
|
||||
register: uninstall_tox_324
|
||||
|
||||
- name: check assertions tox 3.24.0
|
||||
assert:
|
||||
that:
|
||||
- install_tox_324 is changed
|
||||
- "'tox' in install_tox_324.application"
|
||||
- install_tox_324.application.tox.version == '3.24.0'
|
||||
- reinstall_tox_324 is changed
|
||||
- reinstall_tox_324.application.tox.version == '3.24.0'
|
||||
- upgrade_tox_324 is changed
|
||||
- upgrade_tox_324.application.tox.version != '3.24.0'
|
||||
- downgrade_tox_324 is changed
|
||||
- downgrade_tox_324.application.tox.version == '3.24.0'
|
||||
- uninstall_tox_324 is changed
|
||||
- "'tox' not in uninstall_tox_324.application"
|
||||
8
tests/integration/targets/rundeck/aliases
Normal file
8
tests/integration/targets/rundeck/aliases
Normal file
@@ -0,0 +1,8 @@
|
||||
destructive
|
||||
shippable/posix/group1
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/windows
|
||||
skip/freebsd
|
||||
unsupported
|
||||
3
tests/integration/targets/rundeck/defaults/main.yml
Normal file
3
tests/integration/targets/rundeck/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
rundeck_url: http://localhost:4440
|
||||
rundeck_api_version: 39
|
||||
rundeck_job_id: 3b8a6e54-69fb-42b7-b98f-f82e59238478
|
||||
23
tests/integration/targets/rundeck/files/test_job.yaml
Normal file
23
tests/integration/targets/rundeck/files/test_job.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
- defaultTab: nodes
|
||||
description: ''
|
||||
executionEnabled: true
|
||||
id: 3b8a6e54-69fb-42b7-b98f-f82e59238478
|
||||
loglevel: INFO
|
||||
name: test_job
|
||||
nodeFilterEditable: false
|
||||
options:
|
||||
- label: Exit Code
|
||||
name: exit_code
|
||||
value: '0'
|
||||
- label: Sleep
|
||||
name: sleep
|
||||
value: '1'
|
||||
plugins:
|
||||
ExecutionLifecycle: null
|
||||
scheduleEnabled: true
|
||||
sequence:
|
||||
commands:
|
||||
- exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE
|
||||
keepgoing: false
|
||||
strategy: node-first
|
||||
uuid: 3b8a6e54-69fb-42b7-b98f-f82e59238478
|
||||
2
tests/integration/targets/rundeck/meta/main.yml
Normal file
2
tests/integration/targets/rundeck/meta/main.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
dependencies:
|
||||
- setup_rundeck
|
||||
123
tests/integration/targets/rundeck/tasks/main.yml
Normal file
123
tests/integration/targets/rundeck/tasks/main.yml
Normal file
@@ -0,0 +1,123 @@
|
||||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
|
||||
- name: Generate a Rundeck API Token
|
||||
ansible.builtin.command: java -jar {{ rdeck_base }}/rundeck-cli.jar tokens create -u admin -d 24h -r admin
|
||||
environment:
|
||||
RD_URL: "{{ rundeck_url }}"
|
||||
RD_USER: admin
|
||||
RD_PASSWORD: admin
|
||||
register: rundeck_api_token
|
||||
|
||||
- name: Create a Rundeck project
|
||||
community.general.rundeck_project:
|
||||
name: "test_project"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
url: "{{ rundeck_url }}"
|
||||
token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
state: present
|
||||
|
||||
- name: Copy test_job definition to /tmp
|
||||
copy:
|
||||
src: test_job.yaml
|
||||
dest: /tmp/test_job.yaml
|
||||
|
||||
- name: Create Rundeck job Test
|
||||
ansible.builtin.command: java -jar {{ rdeck_base }}/rundeck-cli.jar jobs load -f /tmp/test_job.yaml -F yaml -p test_project
|
||||
environment:
|
||||
RD_URL: "{{ rundeck_url }}"
|
||||
RD_USER: admin
|
||||
RD_PASSWORD: admin
|
||||
|
||||
- name: Wrong Rundeck API Token
|
||||
community.general.rundeck_job_run:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: wrong_token
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
ignore_errors: true
|
||||
register: rundeck_job_run_wrong_token
|
||||
|
||||
- name: Assert that Rundeck authorization failed
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_run_wrong_token.msg == "Token authorization failed"
|
||||
|
||||
- name: Success run Rundeck job test_job
|
||||
community.general.rundeck_job_run:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
register: rundeck_job_run_success
|
||||
|
||||
- name: Assert that Rundeck job test_job runs successfully
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_run_success.execution_info.status == "succeeded"
|
||||
|
||||
- name: Fail run Rundeck job test_job
|
||||
community.general.rundeck_job_run:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
job_options:
|
||||
exit_code: "1"
|
||||
ignore_errors: true
|
||||
register: rundeck_job_run_fail
|
||||
|
||||
- name: Assert that Rundeck job test_job failed
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_run_fail.execution_info.status == "failed"
|
||||
|
||||
- name: Abort run Rundeck job test_job due timeout
|
||||
community.general.rundeck_job_run:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
job_options:
|
||||
sleep: "5"
|
||||
wait_execution_timeout: 2
|
||||
abort_on_timeout: true
|
||||
ignore_errors: true
|
||||
register: rundeck_job_run_aborted
|
||||
|
||||
- name: Assert that Rundeck job test_job is aborted
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_run_aborted.execution_info.status == "aborted"
|
||||
|
||||
- name: Fire-and-forget run Rundeck job test_job
|
||||
community.general.rundeck_job_run:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
job_options:
|
||||
sleep: "5"
|
||||
wait_execution: False
|
||||
register: rundeck_job_run_forget
|
||||
|
||||
- name: Assert that Rundeck job test_job is running
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_run_forget.execution_info.status == "running"
|
||||
|
||||
- name: Get Rundeck job test_job executions info
|
||||
community.general.rundeck_job_executions_info:
|
||||
url: "{{ rundeck_url }}"
|
||||
api_version: "{{ rundeck_api_version }}"
|
||||
api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
|
||||
job_id: "{{ rundeck_job_id }}"
|
||||
register: rundeck_job_executions_info
|
||||
|
||||
- name: Assert that Rundeck job executions info has 4 registers
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- rundeck_job_executions_info.paging.total | int == 4
|
||||
@@ -0,0 +1,2 @@
|
||||
rundeck_war_url: https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/rundeck-3.4.4-20210920.war/artifacts/rundeck-3.4.4-20210920.war/download
|
||||
rundeck_cli_url: https://github.com/rundeck/rundeck-cli/releases/download/v1.3.10/rundeck-cli-1.3.10-all.jar
|
||||
37
tests/integration/targets/setup_rundeck/tasks/main.yml
Normal file
37
tests/integration/targets/setup_rundeck/tasks/main.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- name: Skip unsupported platforms
|
||||
meta: end_play
|
||||
when: ansible_distribution not in ['CentOS', 'Fedora', 'Debian', 'Ubuntu']
|
||||
|
||||
- name: Include OS-specific variables
|
||||
include_vars: '{{ ansible_os_family }}.yml'
|
||||
when: ansible_os_family in ['Debian', 'RedHat']
|
||||
|
||||
- name: Set Rundeck base dir
|
||||
set_fact:
|
||||
rdeck_base: /home/rundeck
|
||||
|
||||
- name: Install OpenJDK
|
||||
package:
|
||||
name: "{{ openjdk_pkg }}"
|
||||
state: present
|
||||
|
||||
- name: Install Rundeck
|
||||
shell: |
|
||||
mkdir -p $RDECK_BASE;
|
||||
curl -k -o $RDECK_BASE/rundeck.war -L '{{ rundeck_war_url }}';
|
||||
curl -k -o $RDECK_BASE/rundeck-cli.jar -L '{{ rundeck_cli_url }}'
|
||||
cd $RDECK_BASE;
|
||||
java -Xmx4g -jar rundeck.war &
|
||||
environment:
|
||||
RDECK_BASE: "{{ rdeck_base }}"
|
||||
|
||||
- name: Wait for Rundeck port 4440
|
||||
wait_for:
|
||||
host: localhost
|
||||
port: 4440
|
||||
1
tests/integration/targets/setup_rundeck/vars/Debian.yml
Normal file
1
tests/integration/targets/setup_rundeck/vars/Debian.yml
Normal file
@@ -0,0 +1 @@
|
||||
openjdk_pkg: openjdk-8-jre-headless
|
||||
1
tests/integration/targets/setup_rundeck/vars/RedHat.yml
Normal file
1
tests/integration/targets/setup_rundeck/vars/RedHat.yml
Normal file
@@ -0,0 +1 @@
|
||||
openjdk_pkg: java-1.8.0-openjdk
|
||||
@@ -419,35 +419,35 @@
|
||||
- zypper_result_update_cache_check is successful
|
||||
- zypper_result_update_cache_check is not changed
|
||||
|
||||
- name: ensure no previous netcat package still exists
|
||||
zypper:
|
||||
name:
|
||||
- netcat-openbsd
|
||||
- gnu-netcat
|
||||
state: absent
|
||||
|
||||
- name: install netcat-openbsd which conflicts with gnu-netcat
|
||||
zypper:
|
||||
name: netcat-openbsd
|
||||
state: present
|
||||
|
||||
- name: try installation of gnu-netcat which should fail due to the conflict
|
||||
zypper:
|
||||
name: gnu-netcat
|
||||
state: present
|
||||
ignore_errors: yes
|
||||
register: zypper_pkg_conflict
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- zypper_pkg_conflict is failed
|
||||
- "'conflicts with netcat-openbsd provided' in zypper_pkg_conflict.stdout"
|
||||
|
||||
- name: retry installation of gnu-netcat with force_resolution set to choose a resolution
|
||||
zypper:
|
||||
name: gnu-netcat
|
||||
state: present
|
||||
force_resolution: True
|
||||
# - name: ensure no previous netcat package still exists
|
||||
# zypper:
|
||||
# name:
|
||||
# - netcat-openbsd
|
||||
# - gnu-netcat
|
||||
# state: absent
|
||||
#
|
||||
# - name: install netcat-openbsd which conflicts with gnu-netcat
|
||||
# zypper:
|
||||
# name: netcat-openbsd
|
||||
# state: present
|
||||
#
|
||||
# - name: try installation of gnu-netcat which should fail due to the conflict
|
||||
# zypper:
|
||||
# name: gnu-netcat
|
||||
# state: present
|
||||
# ignore_errors: yes
|
||||
# register: zypper_pkg_conflict
|
||||
#
|
||||
# - assert:
|
||||
# that:
|
||||
# - zypper_pkg_conflict is failed
|
||||
# - "'conflicts with netcat-openbsd provided' in zypper_pkg_conflict.stdout"
|
||||
#
|
||||
# - name: retry installation of gnu-netcat with force_resolution set to choose a resolution
|
||||
# zypper:
|
||||
# name: gnu-netcat
|
||||
# state: present
|
||||
# force_resolution: True
|
||||
|
||||
- name: duplicate rpms block
|
||||
vars:
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
[systemsmanagement_Uyuni_Utils]
|
||||
name=Several utilities to develop, build or release Uyuni (openSUSE_Leap_15.3)
|
||||
type=rpm-md
|
||||
baseurl=https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Utils/openSUSE_Leap_15.3/
|
||||
gpgcheck=1
|
||||
gpgkey=https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Utils/openSUSE_Leap_15.3/repodata/repomd.xml.key
|
||||
enabled=1
|
||||
@@ -0,0 +1,2 @@
|
||||
dependencies:
|
||||
- setup_remote_tmp_dir
|
||||
@@ -19,6 +19,8 @@
|
||||
- testrefresh
|
||||
- testprio
|
||||
- Apache_PHP_Modules
|
||||
- systemsmanagement_Uyuni_Stable
|
||||
- systemsmanagement_Uyuni_Utils
|
||||
|
||||
- name: collect repo configuration after test
|
||||
shell: "grep . /etc/zypp/repos.d/*"
|
||||
|
||||
@@ -4,6 +4,11 @@
|
||||
state: absent
|
||||
register: zypper_result
|
||||
|
||||
- name: verify no change on test repo deletion
|
||||
assert:
|
||||
that:
|
||||
- "not zypper_result.changed"
|
||||
|
||||
- name: Add test repo
|
||||
community.general.zypper_repository:
|
||||
name: test
|
||||
@@ -51,7 +56,8 @@
|
||||
command: zypper -x lr testrefresh
|
||||
register: zypper_result
|
||||
|
||||
- assert:
|
||||
- name: verify autorefresh option set properly
|
||||
assert:
|
||||
that:
|
||||
- '"autorefresh=\"0\"" in zypper_result.stdout'
|
||||
|
||||
@@ -66,7 +72,8 @@
|
||||
command: zypper -x lr testprio
|
||||
register: zypper_result
|
||||
|
||||
- assert:
|
||||
- name: verify priority option set properly
|
||||
assert:
|
||||
that:
|
||||
- '"priority=\"55\"" in zypper_result.stdout'
|
||||
|
||||
@@ -88,7 +95,8 @@
|
||||
command: zypper lr chrome2
|
||||
register: zypper_result2
|
||||
|
||||
- assert:
|
||||
- name: ensure same url cause update of existing repo even if name differ
|
||||
assert:
|
||||
that:
|
||||
- "zypper_result1.rc != 0"
|
||||
- "'not found' in zypper_result1.stderr"
|
||||
@@ -108,7 +116,8 @@
|
||||
command: zypper lr samename
|
||||
register: zypper_result
|
||||
|
||||
- assert:
|
||||
- name: ensure url get updated on repo with same name
|
||||
assert:
|
||||
that:
|
||||
- "'/science/' not in zypper_result.stdout"
|
||||
- "'/devel:/languages:/ruby/' in zypper_result.stdout"
|
||||
@@ -140,7 +149,8 @@
|
||||
state: present
|
||||
register: add_repo_again
|
||||
|
||||
- assert:
|
||||
- name: no update in case of $releasever usage in url
|
||||
assert:
|
||||
that:
|
||||
- add_repo is changed
|
||||
- add_repo_again is not changed
|
||||
@@ -151,10 +161,21 @@
|
||||
state: absent
|
||||
register: remove_repo
|
||||
|
||||
- assert:
|
||||
- name: verify repo was removed
|
||||
assert:
|
||||
that:
|
||||
- remove_repo is changed
|
||||
|
||||
- name: get list of files in /etc/zypp/repos.d/
|
||||
command: ls /etc/zypp/repos.d/
|
||||
changed_when: false
|
||||
register: releaseverrepo_etc_zypp_reposd
|
||||
|
||||
- name: verify removal of file releaseverrepo.repo in /etc/zypp/repos.d/
|
||||
assert:
|
||||
that:
|
||||
- "'releaseverrepo' not in releaseverrepo_etc_zypp_reposd.stdout"
|
||||
|
||||
- name: add a repo by basearch
|
||||
community.general.zypper_repository:
|
||||
name: basearchrepo
|
||||
@@ -169,7 +190,8 @@
|
||||
state: present
|
||||
register: add_repo_again
|
||||
|
||||
- assert:
|
||||
- name: no update in case of $basearch usage in url
|
||||
assert:
|
||||
that:
|
||||
- add_repo is changed
|
||||
- add_repo_again is not changed
|
||||
@@ -180,6 +202,74 @@
|
||||
state: absent
|
||||
register: remove_repo
|
||||
|
||||
- assert:
|
||||
- name: verify repo was removed
|
||||
assert:
|
||||
that:
|
||||
- remove_repo is changed
|
||||
|
||||
- name: add new repository via url to .repo file
|
||||
community.general.zypper_repository:
|
||||
repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
|
||||
state: present
|
||||
register: added_by_repo_file
|
||||
|
||||
- name: get repository details from zypper
|
||||
command: zypper lr systemsmanagement_Uyuni_Stable
|
||||
register: get_repository_details_from_zypper
|
||||
|
||||
- name: verify adding via .repo file was successful
|
||||
assert:
|
||||
that:
|
||||
- "added_by_repo_file is changed"
|
||||
- "get_repository_details_from_zypper.rc == 0"
|
||||
- "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout"
|
||||
|
||||
- name: add same repository via url to .repo file again to verify idempotency
|
||||
community.general.zypper_repository:
|
||||
repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
|
||||
state: present
|
||||
register: added_again_by_repo_file
|
||||
|
||||
- name: verify nothing was changed adding a repo with the same .repo file
|
||||
assert:
|
||||
that:
|
||||
- added_again_by_repo_file is not changed
|
||||
|
||||
- name: remove repository via url to .repo file
|
||||
community.general.zypper_repository:
|
||||
repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
|
||||
state: absent
|
||||
register: removed_by_repo_file
|
||||
|
||||
- name: get list of files in /etc/zypp/repos.d/
|
||||
command: ls /etc/zypp/repos.d/
|
||||
changed_when: false
|
||||
register: etc_zypp_reposd
|
||||
|
||||
- name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/
|
||||
assert:
|
||||
that:
|
||||
- "removed_by_repo_file"
|
||||
- "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout"
|
||||
|
||||
- name: Copy test .repo file
|
||||
copy:
|
||||
src: 'files/systemsmanagement_Uyuni_Utils.repo'
|
||||
dest: '{{ remote_tmp_dir }}'
|
||||
|
||||
- name: add new repository via local path to .repo file
|
||||
community.general.zypper_repository:
|
||||
repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo"
|
||||
state: present
|
||||
register: added_by_repo_local_file
|
||||
|
||||
- name: get repository details for systemsmanagement_Uyuni_Utils from zypper
|
||||
command: zypper lr systemsmanagement_Uyuni_Utils
|
||||
register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils
|
||||
|
||||
- name: verify adding repository via local .repo file was successful
|
||||
assert:
|
||||
that:
|
||||
- "added_by_repo_local_file is changed"
|
||||
- "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0"
|
||||
- "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout"
|
||||
|
||||
@@ -154,6 +154,7 @@ def main():
|
||||
}, extra=PREVENT_EXTRA)
|
||||
|
||||
schema = Schema({
|
||||
('notifications'): bool,
|
||||
('automerge'): bool,
|
||||
('macros'): MacroSchema,
|
||||
('files'): FilesSchema,
|
||||
|
||||
53
tests/sanity/ignore-2.13.txt
Normal file
53
tests/sanity/ignore-2.13.txt
Normal file
@@ -0,0 +1,53 @@
|
||||
plugins/module_utils/compat/ipaddress.py no-assert
|
||||
plugins/module_utils/compat/ipaddress.py no-unicode-literals
|
||||
plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path
|
||||
plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen
|
||||
plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed
|
||||
plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path
|
||||
plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values
|
||||
plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error
|
||||
plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter
|
||||
plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
|
||||
plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0
|
||||
plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
|
||||
plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0
|
||||
plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid
|
||||
plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter
|
||||
plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/puppet.py use-argspec-type-path
|
||||
plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented
|
||||
plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0
|
||||
plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path
|
||||
plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/xfconf.py validate-modules:return-syntax-error
|
||||
plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path
|
||||
91
tests/unit/plugins/callback/test_elastic.py
Normal file
91
tests/unit/plugins/callback/test_elastic.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible_collections.community.general.tests.unit.compat import unittest
|
||||
from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
|
||||
from ansible_collections.community.general.plugins.callback.elastic import ElasticSource, TaskData
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
||||
ELASTIC_MINIMUM_PYTHON_VERSION = (3, 6)
|
||||
|
||||
|
||||
class TestOpentelemetry(unittest.TestCase):
|
||||
@patch('ansible_collections.community.general.plugins.callback.elastic.socket')
|
||||
def setUp(self, mock_socket):
|
||||
if sys.version_info < ELASTIC_MINIMUM_PYTHON_VERSION:
|
||||
self.skipTest("Python %s+ is needed for Elastic" %
|
||||
",".join(map(str, ELASTIC_MINIMUM_PYTHON_VERSION)))
|
||||
mock_socket.gethostname.return_value = 'my-host'
|
||||
mock_socket.gethostbyname.return_value = '1.2.3.4'
|
||||
self.elastic = ElasticSource(display=None)
|
||||
self.task_fields = {'args': {}}
|
||||
self.mock_host = Mock('MockHost')
|
||||
self.mock_host.name = 'myhost'
|
||||
self.mock_host._uuid = 'myhost_uuid'
|
||||
self.mock_task = Task()
|
||||
self.mock_task.action = 'myaction'
|
||||
self.mock_task.no_log = False
|
||||
self.mock_task._role = 'myrole'
|
||||
self.mock_task._uuid = 'myuuid'
|
||||
self.mock_task.args = {}
|
||||
self.mock_task.get_name = MagicMock(return_value='mytask')
|
||||
self.mock_task.get_path = MagicMock(return_value='/mypath')
|
||||
self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '')
|
||||
self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
|
||||
|
||||
def test_start_task(self):
|
||||
tasks_data = OrderedDict()
|
||||
|
||||
self.elastic.start_task(
|
||||
tasks_data,
|
||||
False,
|
||||
'myplay',
|
||||
self.mock_task
|
||||
)
|
||||
|
||||
task_data = tasks_data['myuuid']
|
||||
self.assertEqual(task_data.uuid, 'myuuid')
|
||||
self.assertEqual(task_data.name, 'mytask')
|
||||
self.assertEqual(task_data.path, '/mypath')
|
||||
self.assertEqual(task_data.play, 'myplay')
|
||||
self.assertEqual(task_data.action, 'myaction')
|
||||
self.assertEqual(task_data.args, '')
|
||||
|
||||
def test_finish_task_with_a_host_match(self):
|
||||
tasks_data = OrderedDict()
|
||||
tasks_data['myuuid'] = self.my_task
|
||||
|
||||
self.elastic.finish_task(
|
||||
tasks_data,
|
||||
'ok',
|
||||
self.my_task_result
|
||||
)
|
||||
|
||||
task_data = tasks_data['myuuid']
|
||||
host_data = task_data.host_data['myhost_uuid']
|
||||
self.assertEqual(host_data.uuid, 'myhost_uuid')
|
||||
self.assertEqual(host_data.name, 'myhost')
|
||||
self.assertEqual(host_data.status, 'ok')
|
||||
|
||||
def test_finish_task_without_a_host_match(self):
|
||||
result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields)
|
||||
tasks_data = OrderedDict()
|
||||
tasks_data['myuuid'] = self.my_task
|
||||
|
||||
self.elastic.finish_task(
|
||||
tasks_data,
|
||||
'ok',
|
||||
result
|
||||
)
|
||||
|
||||
task_data = tasks_data['myuuid']
|
||||
host_data = task_data.host_data['include']
|
||||
self.assertEqual(host_data.uuid, 'include')
|
||||
self.assertEqual(host_data.name, 'include')
|
||||
self.assertEqual(host_data.status, 'ok')
|
||||
@@ -91,3 +91,38 @@ class TestOpentelemetry(unittest.TestCase):
|
||||
self.assertEqual(host_data.uuid, 'include')
|
||||
self.assertEqual(host_data.name, 'include')
|
||||
self.assertEqual(host_data.status, 'ok')
|
||||
|
||||
def test_get_error_message(self):
|
||||
test_cases = (
|
||||
('my-exception', 'my-msg', None, 'my-exception'),
|
||||
(None, 'my-msg', None, 'my-msg'),
|
||||
(None, None, None, 'failed'),
|
||||
)
|
||||
|
||||
for tc in test_cases:
|
||||
result = self.opentelemetry.get_error_message(generate_test_data(tc[0], tc[1], tc[2]))
|
||||
self.assertEqual(result, tc[3])
|
||||
|
||||
def test_enrich_error_message(self):
|
||||
test_cases = (
|
||||
('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'),
|
||||
('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'),
|
||||
(None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'),
|
||||
('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'),
|
||||
('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"')
|
||||
)
|
||||
|
||||
for tc in test_cases:
|
||||
result = self.opentelemetry.enrich_error_message(generate_test_data(tc[0], tc[1], tc[2]))
|
||||
self.assertEqual(result, tc[3])
|
||||
|
||||
|
||||
def generate_test_data(exception=None, msg=None, stderr=None):
|
||||
res_data = OrderedDict()
|
||||
if exception:
|
||||
res_data['exception'] = exception
|
||||
if msg:
|
||||
res_data['msg'] = msg
|
||||
if stderr:
|
||||
res_data['stderr'] = stderr
|
||||
return res_data
|
||||
|
||||
262
tests/unit/plugins/inventory/test_opennebula.py
Normal file
262
tests/unit/plugins/inventory/test_opennebula.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
# The API responses used in these tests were recorded from OpenNebula version 5.10.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import pytest
|
||||
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible_collections.community.general.plugins.inventory.opennebula import InventoryModule
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def inventory():
|
||||
r = InventoryModule()
|
||||
r.inventory = InventoryData()
|
||||
return r
|
||||
|
||||
|
||||
def test_verify_file(tmp_path, inventory):
|
||||
file = tmp_path / "foobar.opennebula.yml"
|
||||
file.touch()
|
||||
assert inventory.verify_file(str(file)) is True
|
||||
|
||||
|
||||
def test_verify_file_bad_config(inventory):
|
||||
assert inventory.verify_file('foobar.opennebula.yml') is False
|
||||
|
||||
|
||||
def get_vm_pool():
|
||||
data = type('pyone.bindings.VM_POOLSub', (object,), {'VM': []})()
|
||||
|
||||
vm = type('pyone.bindings.VMType90Sub', (object,), {
|
||||
'DEPLOY_ID': 'one-7157',
|
||||
'ETIME': 0,
|
||||
'GID': 132,
|
||||
'GNAME': 'CSApparelVDC',
|
||||
'HISTORY_RECORDS': {},
|
||||
'ID': 7157,
|
||||
'LAST_POLL': 1632762935,
|
||||
'LCM_STATE': 3,
|
||||
'MONITORING': {},
|
||||
'NAME': 'sam-691-sam',
|
||||
'RESCHED': 0,
|
||||
'SNAPSHOTS': [],
|
||||
'STATE': 3,
|
||||
'STIME': 1632755245,
|
||||
'TEMPLATE': OrderedDict({
|
||||
'NIC': OrderedDict({
|
||||
'AR_ID': '0',
|
||||
'BRIDGE': 'onebr80',
|
||||
'BRIDGE_TYPE': 'linux',
|
||||
'CLUSTER_ID': '0',
|
||||
'IP': '172.22.4.187',
|
||||
'MAC': '02:00:ac:16:04:bb',
|
||||
'MTU': '8192',
|
||||
'NAME': 'NIC0',
|
||||
'NETWORK': 'Private Net CSApparel',
|
||||
'NETWORK_ID': '80',
|
||||
'NETWORK_UNAME': 'CSApparelVDC-admin',
|
||||
'NIC_ID': '0',
|
||||
'PHYDEV': 'team0',
|
||||
'SECURITY_GROUPS': '0',
|
||||
'TARGET': 'one-7157-0',
|
||||
'VLAN_ID': '480',
|
||||
'VN_MAD': '802.1Q'
|
||||
})
|
||||
}),
|
||||
'USER_TEMPLATE': OrderedDict({
|
||||
'HYPERVISOR': 'kvm',
|
||||
'INPUTS_ORDER': '',
|
||||
'LOGO': 'images/logos/centos.png',
|
||||
'MEMORY_UNIT_COST': 'MB',
|
||||
'SCHED_REQUIREMENTS': 'CLUSTER_ID="0"'
|
||||
})
|
||||
})()
|
||||
data.VM.append(vm)
|
||||
|
||||
vm = type('pyone.bindings.VMType90Sub', (object,), {
|
||||
'DEPLOY_ID': 'one-327',
|
||||
'ETIME': 0,
|
||||
'GID': 0,
|
||||
'GNAME': 'oneadmin',
|
||||
'HISTORY_RECORDS': {},
|
||||
'ID': 327,
|
||||
'LAST_POLL': 1632763543,
|
||||
'LCM_STATE': 3,
|
||||
'MONITORING': {},
|
||||
'NAME': 'zabbix-327',
|
||||
'RESCHED': 0,
|
||||
'SNAPSHOTS': [],
|
||||
'STATE': 3,
|
||||
'STIME': 1575410106,
|
||||
'TEMPLATE': OrderedDict({
|
||||
'NIC': [
|
||||
OrderedDict({
|
||||
'AR_ID': '0',
|
||||
'BRIDGE': 'onerb.103',
|
||||
'BRIDGE_TYPE': 'linux',
|
||||
'IP': '185.165.1.1',
|
||||
'IP6_GLOBAL': '2000:a001::b9ff:feae:aa0d',
|
||||
'IP6_LINK': 'fe80::b9ff:feae:aa0d',
|
||||
'MAC': '02:00:b9:ae:aa:0d',
|
||||
'NAME': 'NIC0',
|
||||
'NETWORK': 'Public',
|
||||
'NETWORK_ID': '7',
|
||||
'NIC_ID': '0',
|
||||
'PHYDEV': 'team0',
|
||||
'SECURITY_GROUPS': '0',
|
||||
'TARGET': 'one-327-0',
|
||||
'VLAN_ID': '100',
|
||||
'VN_MAD': '802.1Q'
|
||||
}),
|
||||
OrderedDict({
|
||||
'AR_ID': '0',
|
||||
'BRIDGE': 'br0',
|
||||
'BRIDGE_TYPE': 'linux',
|
||||
'CLUSTER_ID': '0',
|
||||
'IP': '192.168.1.1',
|
||||
'MAC': '02:00:c0:a8:3b:01',
|
||||
'NAME': 'NIC1',
|
||||
'NETWORK': 'Management',
|
||||
'NETWORK_ID': '11',
|
||||
'NIC_ID': '1',
|
||||
'SECURITY_GROUPS': '0',
|
||||
'TARGET': 'one-327-1',
|
||||
'VN_MAD': 'bridge'
|
||||
})
|
||||
]
|
||||
}),
|
||||
'USER_TEMPLATE': OrderedDict({
|
||||
'HYPERVISOR': 'kvm',
|
||||
'INPUTS_ORDER': '',
|
||||
'LABELS': 'Oracle Linux',
|
||||
'LOGO': 'images/logos/centos.png',
|
||||
'MEMORY_UNIT_COST': 'MB',
|
||||
'SAVED_TEMPLATE_ID': '29'
|
||||
})
|
||||
})()
|
||||
data.VM.append(vm)
|
||||
|
||||
vm = type('pyone.bindings.VMType90Sub', (object,), {
|
||||
'DEPLOY_ID': 'one-107',
|
||||
'ETIME': 0,
|
||||
'GID': 0,
|
||||
'GNAME': 'oneadmin',
|
||||
'HISTORY_RECORDS': {},
|
||||
'ID': 107,
|
||||
'LAST_POLL': 1632764186,
|
||||
'LCM_STATE': 3,
|
||||
'MONITORING': {},
|
||||
'NAME': 'gitlab-107',
|
||||
'RESCHED': 0,
|
||||
'SNAPSHOTS': [],
|
||||
'STATE': 3,
|
||||
'STIME': 1572485522,
|
||||
'TEMPLATE': OrderedDict({
|
||||
'NIC': OrderedDict({
|
||||
'AR_ID': '0',
|
||||
'BRIDGE': 'onerb.103',
|
||||
'BRIDGE_TYPE': 'linux',
|
||||
'IP': '185.165.1.3',
|
||||
'IP6_GLOBAL': '2000:a001::b9ff:feae:aa03',
|
||||
'IP6_LINK': 'fe80::b9ff:feae:aa03',
|
||||
'MAC': '02:00:b9:ae:aa:03',
|
||||
'NAME': 'NIC0',
|
||||
'NETWORK': 'Public',
|
||||
'NETWORK_ID': '7',
|
||||
'NIC_ID': '0',
|
||||
'PHYDEV': 'team0',
|
||||
'SECURITY_GROUPS': '0',
|
||||
'TARGET': 'one-107-0',
|
||||
'VLAN_ID': '100',
|
||||
'VN_MAD': '802.1Q'
|
||||
})
|
||||
}),
|
||||
'USER_TEMPLATE': OrderedDict({
|
||||
'HYPERVISOR': 'kvm',
|
||||
'INPUTS_ORDER': '',
|
||||
'LABELS': 'Gitlab,Centos',
|
||||
'LOGO': 'images/logos/centos.png',
|
||||
'MEMORY_UNIT_COST': 'MB',
|
||||
'SCHED_REQUIREMENTS': 'ID="0" | ID="1" | ID="2"',
|
||||
'SSH_PORT': '8822'
|
||||
})
|
||||
})()
|
||||
data.VM.append(vm)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_option(option):
|
||||
if option == 'api_url':
|
||||
return 'https://opennebula:2633/RPC2'
|
||||
if option == 'api_username':
|
||||
return 'username'
|
||||
elif option == 'api_password':
|
||||
return 'password'
|
||||
elif option == 'api_authfile':
|
||||
return '~/.one/one_auth'
|
||||
elif option == 'hostname':
|
||||
return 'v4_first_ip'
|
||||
elif option == 'group_by_labels':
|
||||
return True
|
||||
elif option == 'filter_by_label':
|
||||
return None
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def test_get_connection_info(inventory, mocker):
|
||||
inventory.get_option = mocker.MagicMock(side_effect=get_option)
|
||||
|
||||
auth = inventory._get_connection_info()
|
||||
assert (auth.username and auth.password)
|
||||
|
||||
|
||||
def test_populate(inventory, mocker):
|
||||
# bypass API fetch call
|
||||
inventory._get_vm_pool = mocker.MagicMock(side_effect=get_vm_pool)
|
||||
inventory.get_option = mocker.MagicMock(side_effect=get_option)
|
||||
inventory._populate()
|
||||
|
||||
# get different hosts
|
||||
host_sam = inventory.inventory.get_host('sam-691-sam')
|
||||
host_zabbix = inventory.inventory.get_host('zabbix-327')
|
||||
host_gitlab = inventory.inventory.get_host('gitlab-107')
|
||||
|
||||
# test if groups exists
|
||||
assert 'Gitlab' in inventory.inventory.groups
|
||||
assert 'Centos' in inventory.inventory.groups
|
||||
assert 'Oracle_Linux' in inventory.inventory.groups
|
||||
|
||||
# check if host_zabbix is in Oracle_Linux group
|
||||
group_oracle_linux = inventory.inventory.groups['Oracle_Linux']
|
||||
assert group_oracle_linux.hosts == [host_zabbix]
|
||||
|
||||
# check if host_gitlab is in Gitlab and Centos group
|
||||
group_gitlab = inventory.inventory.groups['Gitlab']
|
||||
group_centos = inventory.inventory.groups['Centos']
|
||||
assert group_gitlab.hosts == [host_gitlab]
|
||||
assert group_centos.hosts == [host_gitlab]
|
||||
|
||||
# check IPv4 address
|
||||
assert '172.22.4.187' == host_sam.get_vars()['v4_first_ip']
|
||||
|
||||
# check IPv6 address
|
||||
assert '2000:a001::b9ff:feae:aa0d' == host_zabbix.get_vars()['v6_first_ip']
|
||||
|
||||
# check ansible_hosts
|
||||
assert '172.22.4.187' == host_sam.get_vars()['ansible_host']
|
||||
assert '185.165.1.1' == host_zabbix.get_vars()['ansible_host']
|
||||
assert '185.165.1.3' == host_gitlab.get_vars()['ansible_host']
|
||||
|
||||
# check for custom ssh port
|
||||
assert '8822' == host_gitlab.get_vars()['ansible_port']
|
||||
210
tests/unit/plugins/modules/cloud/misc/test_proxmox_tasks_info.py
Normal file
210
tests/unit/plugins/modules/cloud/misc/test_proxmox_tasks_info.py
Normal file
@@ -0,0 +1,210 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
# Proxmox Tasks module unit tests.
|
||||
# The API responses used in these tests were recorded from PVE version 6.4-8
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import pytest
|
||||
import json
|
||||
|
||||
from ansible_collections.community.general.plugins.modules.cloud.misc import proxmox_tasks_info
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import ProxmoxAnsible
|
||||
from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
|
||||
AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
|
||||
)
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
|
||||
from ansible_collections.community.general.plugins.module_utils import proxmox
|
||||
|
||||
NODE = 'node01'
|
||||
TASK_UPID = 'UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:'
|
||||
TASKS = [
|
||||
{
|
||||
"endtime": 1629092710,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 3539,
|
||||
"pstart": 474062216,
|
||||
"starttime": 1629092709,
|
||||
"status": "OK",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
|
||||
"user": "root@pam"
|
||||
},
|
||||
{
|
||||
"endtime": 1627975785,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 10717,
|
||||
"pstart": 362369675,
|
||||
"starttime": 1627975784,
|
||||
"status": "command 'ifreload -a' failed: exit code 1",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
|
||||
"user": "root@pam"
|
||||
},
|
||||
{
|
||||
"endtime": 1627975503,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 6778,
|
||||
"pstart": 362341540,
|
||||
"starttime": 1627975503,
|
||||
"status": "OK",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
|
||||
"user": "root@pam"
|
||||
}
|
||||
]
|
||||
EXPECTED_TASKS = [
|
||||
{
|
||||
"endtime": 1629092710,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 3539,
|
||||
"pstart": 474062216,
|
||||
"starttime": 1629092709,
|
||||
"status": "OK",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
|
||||
"user": "root@pam",
|
||||
"failed": False
|
||||
},
|
||||
{
|
||||
"endtime": 1627975785,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 10717,
|
||||
"pstart": 362369675,
|
||||
"starttime": 1627975784,
|
||||
"status": "command 'ifreload -a' failed: exit code 1",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
|
||||
"user": "root@pam",
|
||||
"failed": True
|
||||
},
|
||||
{
|
||||
"endtime": 1627975503,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 6778,
|
||||
"pstart": 362341540,
|
||||
"starttime": 1627975503,
|
||||
"status": "OK",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
|
||||
"user": "root@pam",
|
||||
"failed": False
|
||||
}
|
||||
]
|
||||
|
||||
EXPECTED_SINGLE_TASK = [
|
||||
{
|
||||
"endtime": 1627975785,
|
||||
"id": "networking",
|
||||
"node": "iaclab-01-01",
|
||||
"pid": 10717,
|
||||
"pstart": 362369675,
|
||||
"starttime": 1627975784,
|
||||
"status": "command 'ifreload -a' failed: exit code 1",
|
||||
"type": "srvreload",
|
||||
"upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
|
||||
"user": "root@pam",
|
||||
"failed": True
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
|
||||
def test_without_required_parameters(connect_mock, capfd, mocker):
|
||||
set_module_args({})
|
||||
with pytest.raises(SystemExit):
|
||||
proxmox_tasks_info.main()
|
||||
out, err = capfd.readouterr()
|
||||
assert not err
|
||||
assert json.loads(out)['failed']
|
||||
|
||||
|
||||
@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
|
||||
def test_get_tasks(connect_mock, capfd, mocker):
|
||||
set_module_args({'api_host': 'proxmoxhost',
|
||||
'api_user': 'root@pam',
|
||||
'api_password': 'supersecret',
|
||||
'node': NODE})
|
||||
|
||||
def f():
|
||||
m = mocker.MagicMock()
|
||||
g = mocker.MagicMock()
|
||||
m.nodes = mocker.MagicMock(return_value=g)
|
||||
g.tasks.get = mocker.MagicMock(return_value=TASKS)
|
||||
return m
|
||||
|
||||
connect_mock.side_effect = f
|
||||
proxmox_tasks_info.HAS_PROXMOXER = True
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
proxmox_tasks_info.main()
|
||||
out, err = capfd.readouterr()
|
||||
assert not err
|
||||
assert len(json.loads(out)['proxmox_tasks']) != 0
|
||||
assert not json.loads(out)['changed']
|
||||
|
||||
|
||||
@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
|
||||
def test_get_single_task(connect_mock, capfd, mocker):
|
||||
set_module_args({'api_host': 'proxmoxhost',
|
||||
'api_user': 'root@pam',
|
||||
'api_password': 'supersecret',
|
||||
'node': NODE,
|
||||
'task': TASK_UPID})
|
||||
|
||||
def f():
|
||||
m = mocker.MagicMock()
|
||||
g = mocker.MagicMock()
|
||||
m.nodes = mocker.MagicMock(return_value=g)
|
||||
g.tasks.get = mocker.MagicMock(return_value=TASKS)
|
||||
return m
|
||||
|
||||
connect_mock.side_effect = f
|
||||
proxmox_tasks_info.HAS_PROXMOXER = True
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
proxmox_tasks_info.main()
|
||||
out, err = capfd.readouterr()
|
||||
assert not err
|
||||
assert len(json.loads(out)['proxmox_tasks']) == 1
|
||||
assert json.loads(out)
|
||||
assert not json.loads(out)['changed']
|
||||
|
||||
|
||||
@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
|
||||
def test_get_non_existent_task(connect_mock, capfd, mocker):
|
||||
set_module_args({'api_host': 'proxmoxhost',
|
||||
'api_user': 'root@pam',
|
||||
'api_password': 'supersecret',
|
||||
'node': NODE,
|
||||
'task': 'UPID:nonexistent'})
|
||||
|
||||
def f():
|
||||
m = mocker.MagicMock()
|
||||
g = mocker.MagicMock()
|
||||
m.nodes = mocker.MagicMock(return_value=g)
|
||||
g.tasks.get = mocker.MagicMock(return_value=TASKS)
|
||||
return m
|
||||
|
||||
connect_mock.side_effect = f
|
||||
proxmox_tasks_info.HAS_PROXMOXER = True
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
proxmox_tasks_info.main()
|
||||
out, err = capfd.readouterr()
|
||||
assert not err
|
||||
assert json.loads(out)['failed']
|
||||
assert 'proxmox_tasks' not in json.loads(out)
|
||||
assert not json.loads(out)['changed']
|
||||
assert json.loads(
|
||||
out)['msg'] == 'Task: UPID:nonexistent does not exist on node: node01.'
|
||||
@@ -224,7 +224,7 @@ class TestKeycloakIdentityProvider(ModuleTestCase):
|
||||
# Verify that the module's changed status matches what is expected
|
||||
self.assertIs(exec_info.exception.args[0]['changed'], changed)
|
||||
|
||||
def test_create_when_present(self):
|
||||
def test_update_when_present(self):
|
||||
"""Update existing identity provider"""
|
||||
|
||||
module_args = {
|
||||
@@ -250,6 +250,15 @@ class TestKeycloakIdentityProvider(ModuleTestCase):
|
||||
'syncMode': "FORCE"
|
||||
},
|
||||
'mappers': [{
|
||||
'name': "username",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'user.attribute': "username",
|
||||
'syncMode': "INHERIT",
|
||||
}
|
||||
}, {
|
||||
'name': "first_name",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
@@ -319,10 +328,20 @@ class TestKeycloakIdentityProvider(ModuleTestCase):
|
||||
]
|
||||
return_value_mappers_get = [
|
||||
[{
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'syncMode': "INHERIT",
|
||||
'user.attribute': "username"
|
||||
},
|
||||
"id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'name': "username"
|
||||
}, {
|
||||
"config": {
|
||||
"claim": "first_name_changeme",
|
||||
"syncMode": "INHERIT",
|
||||
"user.attribute": "first_name_changeme"
|
||||
"user.attribute": "first_name"
|
||||
},
|
||||
"id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
|
||||
"identityProviderAlias": "oidc-idp",
|
||||
@@ -330,6 +349,79 @@ class TestKeycloakIdentityProvider(ModuleTestCase):
|
||||
"name": "first_name"
|
||||
}],
|
||||
[{
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'syncMode': "INHERIT",
|
||||
'user.attribute': "username"
|
||||
},
|
||||
"id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'name': "username"
|
||||
}, {
|
||||
"config": {
|
||||
"claim": "first_name_changeme",
|
||||
"syncMode": "INHERIT",
|
||||
"user.attribute": "first_name"
|
||||
},
|
||||
"id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
|
||||
"identityProviderAlias": "oidc-idp",
|
||||
"identityProviderMapper": "oidc-user-attribute-idp-mapper",
|
||||
"name": "first_name"
|
||||
}],
|
||||
[{
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'syncMode': "INHERIT",
|
||||
'user.attribute': "username"
|
||||
},
|
||||
"id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'name': "username"
|
||||
}, {
|
||||
"config": {
|
||||
"claim": "first_name_changeme",
|
||||
"syncMode": "INHERIT",
|
||||
"user.attribute": "first_name"
|
||||
},
|
||||
"id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
|
||||
"identityProviderAlias": "oidc-idp",
|
||||
"identityProviderMapper": "oidc-user-attribute-idp-mapper",
|
||||
"name": "first_name"
|
||||
}],
|
||||
[{
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'syncMode': "INHERIT",
|
||||
'user.attribute': "username"
|
||||
},
|
||||
"id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'name': "username"
|
||||
}, {
|
||||
"config": {
|
||||
"claim": "first_name_changeme",
|
||||
"syncMode": "INHERIT",
|
||||
"user.attribute": "first_name"
|
||||
},
|
||||
"id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
|
||||
"identityProviderAlias": "oidc-idp",
|
||||
"identityProviderMapper": "oidc-user-attribute-idp-mapper",
|
||||
"name": "first_name"
|
||||
}],
|
||||
[{
|
||||
'config': {
|
||||
'claim': "username",
|
||||
'syncMode': "INHERIT",
|
||||
'user.attribute': "username"
|
||||
},
|
||||
"id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
|
||||
'identityProviderAlias': "oidc-idp",
|
||||
'identityProviderMapper': "oidc-user-attribute-idp-mapper",
|
||||
'name': "username"
|
||||
}, {
|
||||
"config": {
|
||||
"claim": "first_name",
|
||||
"syncMode": "INHERIT",
|
||||
@@ -371,7 +463,7 @@ class TestKeycloakIdentityProvider(ModuleTestCase):
|
||||
self.module.main()
|
||||
|
||||
self.assertEqual(len(mock_get_identity_provider.mock_calls), 2)
|
||||
self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2)
|
||||
self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 5)
|
||||
self.assertEqual(len(mock_update_identity_provider.mock_calls), 1)
|
||||
self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1)
|
||||
self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1)
|
||||
|
||||
@@ -122,6 +122,37 @@ ipv6.ignore-auto-dns: no
|
||||
ipv6.ignore-auto-routes: no
|
||||
"""
|
||||
|
||||
TESTCASE_GENERIC_MODIFY_ROUTING_RULES = [
|
||||
{
|
||||
'type': 'generic',
|
||||
'conn_name': 'non_existent_nw_device',
|
||||
'ifname': 'generic_non_existant',
|
||||
'ip4': '10.10.10.10/24',
|
||||
'gw4': '10.10.10.1',
|
||||
'routing_rules4': ['priority 5 from 10.0.0.0/24 table 5000', 'priority 10 from 10.0.1.0/24 table 5001'],
|
||||
'state': 'present',
|
||||
'_ansible_check_mode': False,
|
||||
},
|
||||
]
|
||||
|
||||
TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT = """\
|
||||
connection.id: non_existent_nw_device
|
||||
connection.interface-name: generic_non_existant
|
||||
connection.autoconnect: yes
|
||||
ipv4.method: manual
|
||||
ipv4.addresses: 10.10.10.10/24
|
||||
ipv4.gateway: 10.10.10.1
|
||||
ipv4.routing-rules: priority 5 from 10.0.0.0/24 table 5000, priority 10 from 10.0.1.0/24 table 5001
|
||||
ipv4.ignore-auto-dns: no
|
||||
ipv4.ignore-auto-routes: no
|
||||
ipv4.never-default: no
|
||||
ipv4.may-fail: yes
|
||||
ipv6.method: auto
|
||||
ipv6.ignore-auto-dns: no
|
||||
ipv6.ignore-auto-routes: no
|
||||
"""
|
||||
|
||||
|
||||
TESTCASE_GENERIC_DNS4_SEARCH = [
|
||||
{
|
||||
'type': 'generic',
|
||||
@@ -738,6 +769,13 @@ def mocked_generic_connection_unchanged(mocker):
|
||||
execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, ""))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mocked_generic_connection_unchanged(mocker):
|
||||
mocker_set(mocker,
|
||||
connection_exists=True,
|
||||
execute_return=(0, TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT, ""))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mocked_generic_connection_dns_search_unchanged(mocker):
|
||||
mocker_set(mocker,
|
||||
@@ -1038,6 +1076,26 @@ def test_generic_connection_unchanged(mocked_generic_connection_unchanged, capfd
|
||||
assert not results['changed']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_MODIFY_ROUTING_RULES, indirect=['patch_ansible_module'])
|
||||
def test_generic_connection_modify_routing_rules4(mocked_generic_connection_create, capfd):
|
||||
"""
|
||||
Test : Generic connection modified with routing-rules4
|
||||
"""
|
||||
with pytest.raises(SystemExit):
|
||||
nmcli.main()
|
||||
|
||||
assert nmcli.Nmcli.execute_command.call_count == 1
|
||||
arg_list = nmcli.Nmcli.execute_command.call_args_list
|
||||
args, kwargs = arg_list[0]
|
||||
|
||||
assert 'ipv4.routing-rules' in args[0]
|
||||
|
||||
out, err = capfd.readouterr()
|
||||
results = json.loads(out)
|
||||
assert not results.get('failed')
|
||||
assert results['changed']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
|
||||
def test_generic_connection_create_dns_search(mocked_generic_connection_create, capfd):
|
||||
"""
|
||||
|
||||
@@ -54,6 +54,7 @@ dry_mode_cmd_with_port_700 = {
|
||||
"ufw status verbose": ufw_status_verbose_with_port_7000,
|
||||
"ufw --version": ufw_version_35,
|
||||
"ufw --dry-run allow from any to any port 7000 proto tcp": skippg_adding_existing_rules,
|
||||
"ufw --dry-run insert 1 allow from any to any port 7000 proto tcp": skippg_adding_existing_rules,
|
||||
"ufw --dry-run delete allow from any to any port 7000 proto tcp": "",
|
||||
"ufw --dry-run delete allow from any to any port 7001 proto tcp": user_rules_with_port_7000,
|
||||
"ufw --dry-run route allow in on foo out on bar from 1.1.1.1 port 7000 to 8.8.8.8 port 7001 proto tcp": "",
|
||||
@@ -178,6 +179,17 @@ class TestUFW(unittest.TestCase):
|
||||
result = self.__getResult(do_nothing_func_port_7000)
|
||||
self.assertFalse(result.exception.args[0]['changed'])
|
||||
|
||||
def test_check_mode_add_insert_rules(self):
|
||||
set_module_args({
|
||||
'insert': '1',
|
||||
'rule': 'allow',
|
||||
'proto': 'tcp',
|
||||
'port': '7000',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
result = self.__getResult(do_nothing_func_port_7000)
|
||||
self.assertFalse(result.exception.args[0]['changed'])
|
||||
|
||||
def test_check_mode_add_detailed_route(self):
|
||||
set_module_args({
|
||||
'rule': 'allow',
|
||||
@@ -318,6 +330,19 @@ class TestUFW(unittest.TestCase):
|
||||
|
||||
self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
|
||||
|
||||
def test_check_mode_delete_existing_insert_rules(self):
|
||||
|
||||
set_module_args({
|
||||
'insert': '1',
|
||||
'rule': 'allow',
|
||||
'proto': 'tcp',
|
||||
'port': '7000',
|
||||
'delete': 'yes',
|
||||
'_ansible_check_mode': True,
|
||||
})
|
||||
|
||||
self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
|
||||
|
||||
def test_check_mode_delete_not_existing_rules(self):
|
||||
|
||||
set_module_args({
|
||||
@@ -330,6 +355,19 @@ class TestUFW(unittest.TestCase):
|
||||
|
||||
self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
|
||||
|
||||
def test_check_mode_delete_not_existing_insert_rules(self):
|
||||
|
||||
set_module_args({
|
||||
'insert': '1',
|
||||
'rule': 'allow',
|
||||
'proto': 'tcp',
|
||||
'port': '7001',
|
||||
'delete': 'yes',
|
||||
'_ansible_check_mode': True,
|
||||
})
|
||||
|
||||
self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
|
||||
|
||||
def test_enable_mode(self):
|
||||
set_module_args({
|
||||
'state': 'enabled',
|
||||
|
||||
@@ -17,7 +17,8 @@ PyGithub
|
||||
httmock
|
||||
|
||||
# requirement for maven_artifact module
|
||||
lxml
|
||||
lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
|
||||
lxml ; python_version >= '2.7'
|
||||
semantic_version
|
||||
|
||||
# requirement for datadog_downtime module
|
||||
@@ -28,6 +29,10 @@ dnsimple >= 2 ; python_version >= '3.6'
|
||||
dataclasses ; python_version == '3.6'
|
||||
|
||||
# requirement for the opentelemetry callback plugin
|
||||
opentelemetry-api ; python_version >= '3.6'
|
||||
opentelemetry-exporter-otlp ; python_version >= '3.6'
|
||||
opentelemetry-sdk ; python_version >= '3.6'
|
||||
# WARNING: these libraries depend on grpcio, which takes 7 minutes (!) to build in CI on Python 3.10
|
||||
opentelemetry-api ; python_version >= '3.6' and python_version < '3.10'
|
||||
opentelemetry-exporter-otlp ; python_version >= '3.6' and python_version < '3.10'
|
||||
opentelemetry-sdk ; python_version >= '3.6' and python_version < '3.10'
|
||||
|
||||
# requirement for the elastic callback plugin
|
||||
elastic-apm ; python_version >= '3.6'
|
||||
|
||||
@@ -50,7 +50,7 @@ function retry
|
||||
echo "@* -> ${result}"
|
||||
done
|
||||
echo "Command '@*' failed 3 times!"
|
||||
exit -1
|
||||
exit 255
|
||||
}
|
||||
|
||||
command -v pip
|
||||
@@ -87,9 +87,13 @@ if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then
|
||||
fi
|
||||
|
||||
if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then
|
||||
CRYPTO_BRANCH=main
|
||||
if [ "${script}" == "linux" ] && [[ "${test}" =~ "ubuntu1604/" ]]; then
|
||||
CRYPTO_BRANCH=stable-1
|
||||
fi
|
||||
# To prevent Python dependencies on other collections only install other collections for integration tests
|
||||
retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix"
|
||||
retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
|
||||
retry git clone --depth=1 --branch "${CRYPTO_BRANCH}" --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
|
||||
# NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
|
||||
# retry ansible-galaxy -vvv collection install ansible.posix
|
||||
# retry ansible-galaxy -vvv collection install community.crypto
|
||||
|
||||
Reference in New Issue
Block a user