mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-01 10:53:20 +00:00
Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d483fd9482 | ||
|
|
8da9cf3276 | ||
|
|
3c5c3a0113 | ||
|
|
7def57a71f | ||
|
|
e5930aabcb | ||
|
|
48bfba435f | ||
|
|
9740b76f3c | ||
|
|
24cf561135 | ||
|
|
61324ed9eb | ||
|
|
99336ba5fe | ||
|
|
9d99ccef2d | ||
|
|
a146eb3118 | ||
|
|
c7f7bd6050 | ||
|
|
54099d77ff | ||
|
|
ee07d8320a | ||
|
|
0729f0c262 | ||
|
|
57cd48f3cf | ||
|
|
afd2151672 | ||
|
|
ea9b272043 | ||
|
|
60addb332d | ||
|
|
1ade62c5bc | ||
|
|
7c8cc96d8b | ||
|
|
ca177a0ceb | ||
|
|
c0e769e5f5 | ||
|
|
585dbc3171 | ||
|
|
b400491ef3 | ||
|
|
490baed566 | ||
|
|
811c4a304a | ||
|
|
c0fde76b79 | ||
|
|
16c7615b82 | ||
|
|
474364c862 | ||
|
|
1da5f7dc54 | ||
|
|
559c914e36 | ||
|
|
91cca4ae49 | ||
|
|
82a9db9738 | ||
|
|
3fd84d71b8 | ||
|
|
a17124f3c4 | ||
|
|
efc2cbf840 | ||
|
|
aa136aca4c | ||
|
|
a1ca89b058 | ||
|
|
dd70419d18 | ||
|
|
ef5ac023cf | ||
|
|
8bc5494ad5 | ||
|
|
d95a821d5b | ||
|
|
b7697fe3de | ||
|
|
16e05ab5f3 | ||
|
|
5cf7ce705a | ||
|
|
c8b8668212 | ||
|
|
2d450a5a36 | ||
|
|
e08412c345 | ||
|
|
c355f93d62 | ||
|
|
80206b5a53 |
@@ -197,10 +197,10 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: Alpine 3.16
|
||||
test: alpine/3.16
|
||||
# - name: Fedora 36
|
||||
# test: fedora/36
|
||||
- name: Alpine 3.17
|
||||
test: alpine/3.17
|
||||
# - name: Fedora 37
|
||||
# test: fedora/37
|
||||
# - name: Ubuntu 20.04
|
||||
# test: ubuntu/20.04
|
||||
- name: Ubuntu 22.04
|
||||
@@ -219,10 +219,12 @@ stages:
|
||||
test: macos/12.0
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: RHEL 9.1
|
||||
test: rhel/9.1
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
- name: FreeBSD 12.4
|
||||
test: freebsd/12.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -305,8 +307,8 @@ stages:
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
- name: Fedora 37
|
||||
test: fedora37
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
@@ -327,8 +329,8 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.14/linux/{0}
|
||||
targets:
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -487,10 +489,11 @@ stages:
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_community_devel
|
||||
- Generic_devel
|
||||
- Generic_2_11
|
||||
- Generic_2_12
|
||||
- Generic_2_13
|
||||
- Generic_2_14
|
||||
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
|
||||
# - Generic_devel
|
||||
# - Generic_2_11
|
||||
# - Generic_2_12
|
||||
# - Generic_2_13
|
||||
# - Generic_2_14
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
4
.github/BOTMETA.yml
vendored
4
.github/BOTMETA.yml
vendored
@@ -830,6 +830,10 @@ files:
|
||||
maintainers: shane-walker xcambar
|
||||
$modules/nsupdate.py:
|
||||
maintainers: nerzhul
|
||||
$modules/ocapi_command.py:
|
||||
maintainers: $team_wdc
|
||||
$modules/ocapi_info.py:
|
||||
maintainers: $team_wdc
|
||||
$modules/oci_vcn.py:
|
||||
maintainers: $team_oracle rohitChaware
|
||||
$modules/odbc.py:
|
||||
|
||||
@@ -6,6 +6,76 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 5.0.0.
|
||||
|
||||
v6.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- apache2_module - add module argument ``warn_mpm_absent`` to control whether warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793).
|
||||
- bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694).
|
||||
- bitwarden lookup plugin - implement filtering results by ``collection_id`` parameter (https://github.com/ansible-collections/community.general/issues/5849).
|
||||
- dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913).
|
||||
- gitlab_project - add ``builds_access_level``, ``container_registry_access_level`` and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706).
|
||||
- gitlab_runner - add new boolean option ``access_level_on_creation``. It controls, whether the value of ``access_level`` is used for runner registration or not. The option ``access_level`` has been ignored on registration so far and was only used on updates (https://github.com/ansible-collections/community.general/issues/5907, https://github.com/ansible-collections/community.general/pull/5908).
|
||||
- ilo_redfish_utils module utils - change implementation of DNS Server IP and NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804).
|
||||
- ipa_group - allow to add and remove external users with the ``external_user`` option (https://github.com/ansible-collections/community.general/pull/5897).
|
||||
- iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844).
|
||||
- one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf`` API call (https://github.com/ansible-collections/community.general/pull/5812).
|
||||
- opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718).
|
||||
- redhat_subscription - adds ``token`` parameter for subscription-manager authentication using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725).
|
||||
- snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773).
|
||||
- snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486).
|
||||
- sudoers - add ``setenv`` parameters to support passing environment variables via sudo. (https://github.com/ansible-collections/community.general/pull/5883)
|
||||
|
||||
Breaking Changes / Porting Guide
|
||||
--------------------------------
|
||||
|
||||
- ModuleHelper module utils - when the module sets output variables named ``msg``, ``exception``, ``output``, ``vars``, or ``changed``, the actual output will prefix those names with ``_`` (underscore symbol) only when they clash with output variables generated by ModuleHelper itself, which only occurs when handling exceptions. Please note that this breaking change does not require a new major release since before this release, it was not possible to add such variables to the output `due to a bug <https://github.com/ansible-collections/community.general/pull/5755>`__ (https://github.com/ansible-collections/community.general/pull/5765).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772).
|
||||
- gitlab_runner - the default of the new option ``access_level_on_creation`` will change from ``false`` to ``true`` in community.general 7.0.0. This will cause ``access_level`` to be used during runner registration as well, and not only during updates (https://github.com/ansible-collections/community.general/pull/5908).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ModuleHelper - fix bug when adjusting the name of reserved output variables (https://github.com/ansible-collections/community.general/pull/5755).
|
||||
- alternatives - support subcommands on Fedora 37, which uses ``follower`` instead of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794).
|
||||
- bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not unlocked (https://github.com/ansible-collections/community.general/pull/5811).
|
||||
- dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field (https://github.com/ansible-collections/community.general/pull/5914).
|
||||
- gem - fix force parameter not being passed to gem command when uninstalling (https://github.com/ansible-collections/community.general/pull/5822).
|
||||
- gem - fix hang due to interactive prompt for confirmation on specific version uninstall (https://github.com/ansible-collections/community.general/pull/5751).
|
||||
- gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888).
|
||||
- keycloak_user_federation - fixes federation creation issue. When a new federation was created and at the same time a default / standard mapper was also changed / updated the creation process failed as a bad None set variable led to a bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750).
|
||||
- keycloak_user_federation - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing user federations because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732).
|
||||
- loganalytics callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logdna callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logstash callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- nsupdate - fix zone lookup. The SOA record for an existing zone is returned as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817, https://github.com/ansible-collections/community.general/pull/5818).
|
||||
- proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803).
|
||||
- redfish_utils - removed basic auth HTTP header when performing a GET on the service root resource and when performing a POST to the session collection (https://github.com/ansible-collections/community.general/issues/5886).
|
||||
- splunk callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- sumologic callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- syslog_json callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- terraform - fix ``current`` workspace never getting appended to the ``all`` key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735).
|
||||
- terraform - fix ``terraform init`` failure when there are multiple workspaces on the remote backend and when ``default`` workspace is missing by setting ``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when used (https://github.com/ansible-collections/community.general/pull/5735).
|
||||
- terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843).
|
||||
- xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- ocapi_command - Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
- ocapi_info - Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
|
||||
v6.2.0
|
||||
======
|
||||
|
||||
|
||||
@@ -883,3 +883,144 @@ releases:
|
||||
- 5744-unixy-callback-fix-config-manager-typo.yml
|
||||
- 6.2.0.yml
|
||||
release_date: '2023-01-04'
|
||||
6.3.0:
|
||||
changes:
|
||||
breaking_changes:
|
||||
- 'ModuleHelper module utils - when the module sets output variables named ``msg``,
|
||||
``exception``, ``output``, ``vars``, or ``changed``, the actual output will
|
||||
prefix those names with ``_`` (underscore symbol) only when they clash with
|
||||
output variables generated by ModuleHelper itself, which only occurs when
|
||||
handling exceptions. Please note that this breaking change does not require
|
||||
a new major release since before this release, it was not possible to add
|
||||
such variables to the output `due to a bug <https://github.com/ansible-collections/community.general/pull/5755>`__
|
||||
(https://github.com/ansible-collections/community.general/pull/5765).
|
||||
|
||||
'
|
||||
bugfixes:
|
||||
- ModuleHelper - fix bug when adjusting the name of reserved output variables
|
||||
(https://github.com/ansible-collections/community.general/pull/5755).
|
||||
- alternatives - support subcommands on Fedora 37, which uses ``follower`` instead
|
||||
of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794).
|
||||
- bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not
|
||||
unlocked (https://github.com/ansible-collections/community.general/pull/5811).
|
||||
- dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field
|
||||
(https://github.com/ansible-collections/community.general/pull/5914).
|
||||
- gem - fix force parameter not being passed to gem command when uninstalling
|
||||
(https://github.com/ansible-collections/community.general/pull/5822).
|
||||
- gem - fix hang due to interactive prompt for confirmation on specific version
|
||||
uninstall (https://github.com/ansible-collections/community.general/pull/5751).
|
||||
- gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888).
|
||||
- keycloak_user_federation - fixes federation creation issue. When a new federation
|
||||
was created and at the same time a default / standard mapper was also changed
|
||||
/ updated the creation process failed as a bad None set variable led to a
|
||||
bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750).
|
||||
- 'keycloak_user_federation - fixes idempotency detection issues. In some cases
|
||||
the module could fail to properly detect already existing user federations
|
||||
because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732).
|
||||
|
||||
'
|
||||
- loganalytics callback plugin - adjust type of callback to ``notification``,
|
||||
it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logdna callback plugin - adjust type of callback to ``notification``, it was
|
||||
incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logstash callback plugin - adjust type of callback to ``notification``, it
|
||||
was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- nsupdate - fix zone lookup. The SOA record for an existing zone is returned
|
||||
as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817,
|
||||
https://github.com/ansible-collections/community.general/pull/5818).
|
||||
- proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803).
|
||||
- redfish_utils - removed basic auth HTTP header when performing a GET on the
|
||||
service root resource and when performing a POST to the session collection
|
||||
(https://github.com/ansible-collections/community.general/issues/5886).
|
||||
- splunk callback plugin - adjust type of callback to ``notification``, it was
|
||||
incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- sumologic callback plugin - adjust type of callback to ``notification``, it
|
||||
was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- syslog_json callback plugin - adjust type of callback to ``notification``,
|
||||
it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- terraform - fix ``current`` workspace never getting appended to the ``all``
|
||||
key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735).
|
||||
- terraform - fix ``terraform init`` failure when there are multiple workspaces
|
||||
on the remote backend and when ``default`` workspace is missing by setting
|
||||
``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when
|
||||
used (https://github.com/ansible-collections/community.general/pull/5735).
|
||||
- terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843).
|
||||
- xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808).
|
||||
deprecated_features:
|
||||
- consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772).
|
||||
- gitlab_runner - the default of the new option ``access_level_on_creation``
|
||||
will change from ``false`` to ``true`` in community.general 7.0.0. This will
|
||||
cause ``access_level`` to be used during runner registration as well, and
|
||||
not only during updates (https://github.com/ansible-collections/community.general/pull/5908).
|
||||
minor_changes:
|
||||
- apache2_module - add module argument ``warn_mpm_absent`` to control whether
|
||||
warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793).
|
||||
- bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694).
|
||||
- bitwarden lookup plugin - implement filtering results by ``collection_id``
|
||||
parameter (https://github.com/ansible-collections/community.general/issues/5849).
|
||||
- dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913).
|
||||
- gitlab_project - add ``builds_access_level``, ``container_registry_access_level``
|
||||
and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706).
|
||||
- gitlab_runner - add new boolean option ``access_level_on_creation``. It controls,
|
||||
whether the value of ``access_level`` is used for runner registration or not.
|
||||
The option ``access_level`` has been ignored on registration so far and was
|
||||
only used on updates (https://github.com/ansible-collections/community.general/issues/5907,
|
||||
https://github.com/ansible-collections/community.general/pull/5908).
|
||||
- ilo_redfish_utils module utils - change implementation of DNS Server IP and
|
||||
NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804).
|
||||
- ipa_group - allow to add and remove external users with the ``external_user``
|
||||
option (https://github.com/ansible-collections/community.general/pull/5897).
|
||||
- iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844).
|
||||
- one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf``
|
||||
API call (https://github.com/ansible-collections/community.general/pull/5812).
|
||||
- opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718).
|
||||
- redhat_subscription - adds ``token`` parameter for subscription-manager authentication
|
||||
using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725).
|
||||
- snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773).
|
||||
- snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486).
|
||||
- sudoers - add ``setenv`` parameters to support passing environment variables
|
||||
via sudo. (https://github.com/ansible-collections/community.general/pull/5883)
|
||||
release_summary: Regular bugfix and feature release.
|
||||
fragments:
|
||||
- 5486-snap-alias-cmd-runner.yml
|
||||
- 5694-add-custom-fields-to-bitwarden.yml
|
||||
- 5706-add-builds-forks-container-registry.yml
|
||||
- 5718-opkg-refactor.yaml
|
||||
- 5725-redhat_subscription-add-red-hat-api-token.yml
|
||||
- 5732-bugfix-keycloak-userfed-idempotency.yml
|
||||
- 5735-terraform-init-fix-when-default-workspace-doesnt-exists.yaml
|
||||
- 5750-bugfixing-keycloak-usrfed-fail-when-update-default-mapper-simultaneously.yml
|
||||
- 5751-gem-fix-uninstall-hang.yml
|
||||
- 5755-mh-fix-output-conflict.yml
|
||||
- 5761-callback-types.yml
|
||||
- 5765-mh-lax-output-conflict.yml
|
||||
- 5772-consul-deprecate-params-when-absent.yml
|
||||
- 5773-snap-mh-execute.yml
|
||||
- 5793-apache2-module-npm-warnings.yml
|
||||
- 5794-alternatives-fedora37.yml
|
||||
- 5803-proxmox-read-timeout.yml
|
||||
- 5804-minor-changes-to-hpe-ilo-collection.yml
|
||||
- 5808-xml-children-parameter-does-not-exist.yml
|
||||
- 5811-clarify-bitwarden-error.yml
|
||||
- 5812-implement-updateconf-api-call.yml
|
||||
- 5818-nsupdate-fix-zone-lookup.yml
|
||||
- 5822-gem-uninstall-force.yml
|
||||
- 5843-terraform-validate-no-color.yml
|
||||
- 5844-iptables-state-refactor.yml
|
||||
- 5851-lookup-bitwarden-add-filter-by-collection-id-parameter.yml
|
||||
- 5883-sudoers-add-support-for-setenv-parameter.yml
|
||||
- 5886-redfish-correct-basic-auth-usage-on-session-creation.yml
|
||||
- 5888-update-key-title.yml
|
||||
- 5897-ipa_group-add-external-users.yml
|
||||
- 5907-fix-gitlab_runner-not-idempotent.yml
|
||||
- 5913-dig-caa.yml
|
||||
- 5914-dig-dnskey.yml
|
||||
- 6.3.0.yml
|
||||
modules:
|
||||
- description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
name: ocapi_command
|
||||
namespace: ''
|
||||
- description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
name: ocapi_info
|
||||
namespace: ''
|
||||
release_date: '2023-01-31'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 6.2.0
|
||||
version: 6.3.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -16,15 +16,15 @@ DOCUMENTATION = '''
|
||||
- cgroups
|
||||
short_description: Profiles maximum memory usage of tasks and full execution using cgroups
|
||||
description:
|
||||
- This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups
|
||||
- This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups.
|
||||
notes:
|
||||
- Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...)
|
||||
- This cgroup should only be used by ansible to get accurate results
|
||||
- To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile)
|
||||
- Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...).
|
||||
- This cgroup should only be used by ansible to get accurate results.
|
||||
- To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile).
|
||||
options:
|
||||
max_mem_file:
|
||||
required: true
|
||||
description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes)
|
||||
description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes).
|
||||
env:
|
||||
- name: CGROUP_MAX_MEM_FILE
|
||||
ini:
|
||||
@@ -32,7 +32,7 @@ DOCUMENTATION = '''
|
||||
key: max_mem_file
|
||||
cur_mem_file:
|
||||
required: true
|
||||
description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes)
|
||||
description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes).
|
||||
env:
|
||||
- name: CGROUP_CUR_MEM_FILE
|
||||
ini:
|
||||
|
||||
@@ -13,8 +13,8 @@ DOCUMENTATION = '''
|
||||
type: aggregate
|
||||
short_description: demo callback that adds play/task context
|
||||
description:
|
||||
- Displays some play and task context along with normal output
|
||||
- This is mostly for demo purposes
|
||||
- Displays some play and task context along with normal output.
|
||||
- This is mostly for demo purposes.
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
'''
|
||||
|
||||
@@ -21,7 +21,7 @@ DOCUMENTATION = '''
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
requirements:
|
||||
- set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
|
||||
- set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled))
|
||||
'''
|
||||
|
||||
from ansible import constants as C
|
||||
|
||||
@@ -14,7 +14,7 @@ short_description: minimal stdout output
|
||||
extends_documentation_fragment:
|
||||
- default_callback
|
||||
description:
|
||||
- When in verbose mode it will act the same as the default callback
|
||||
- When in verbose mode it will act the same as the default callback.
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
requirements:
|
||||
|
||||
@@ -13,10 +13,10 @@ DOCUMENTATION = '''
|
||||
type: notification
|
||||
short_description: post task events to a jabber server
|
||||
description:
|
||||
- The chatty part of ChatOps with a Hipchat server as a target
|
||||
- The chatty part of ChatOps with a Hipchat server as a target.
|
||||
- This callback plugin sends status updates to a HipChat channel during playbook execution.
|
||||
requirements:
|
||||
- xmpp (python lib https://github.com/ArchipelProject/xmpppy)
|
||||
- xmpp (Python library U(https://github.com/ArchipelProject/xmpppy))
|
||||
options:
|
||||
server:
|
||||
description: connection info to jabber server
|
||||
|
||||
@@ -13,10 +13,10 @@ DOCUMENTATION = '''
|
||||
type: notification
|
||||
short_description: write playbook output to log file
|
||||
description:
|
||||
- This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory
|
||||
- This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory.
|
||||
requirements:
|
||||
- Whitelist in configuration
|
||||
- A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
|
||||
- A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller
|
||||
options:
|
||||
log_folder:
|
||||
default: /var/log/ansible/hosts
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: loganalytics
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
@@ -155,7 +155,7 @@ class AzureLogAnalyticsSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -9,17 +9,17 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logdna
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends playbook logs to LogDNA
|
||||
description:
|
||||
- This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
|
||||
- This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
|
||||
requirements:
|
||||
- LogDNA Python Library (https://github.com/logdna/python)
|
||||
- LogDNA Python Library (U(https://github.com/logdna/python))
|
||||
- whitelisting in configuration
|
||||
options:
|
||||
conf_key:
|
||||
required: true
|
||||
description: LogDNA Ingestion Key
|
||||
description: LogDNA Ingestion Key.
|
||||
type: string
|
||||
env:
|
||||
- name: LOGDNA_INGESTION_KEY
|
||||
@@ -28,7 +28,7 @@ DOCUMENTATION = '''
|
||||
key: conf_key
|
||||
plugin_ignore_errors:
|
||||
required: false
|
||||
description: Whether to ignore errors on failing or not
|
||||
description: Whether to ignore errors on failing or not.
|
||||
type: boolean
|
||||
env:
|
||||
- name: ANSIBLE_IGNORE_ERRORS
|
||||
@@ -38,7 +38,7 @@ DOCUMENTATION = '''
|
||||
default: false
|
||||
conf_hostname:
|
||||
required: false
|
||||
description: Alternative Host Name; the current host name by default
|
||||
description: Alternative Host Name; the current host name by default.
|
||||
type: string
|
||||
env:
|
||||
- name: LOGDNA_HOSTNAME
|
||||
@@ -47,7 +47,7 @@ DOCUMENTATION = '''
|
||||
key: conf_hostname
|
||||
conf_tags:
|
||||
required: false
|
||||
description: Tags
|
||||
description: Tags.
|
||||
type: string
|
||||
env:
|
||||
- name: LOGDNA_TAGS
|
||||
@@ -111,7 +111,7 @@ def isJSONable(obj):
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
CALLBACK_VERSION = 0.1
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.logdna'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -13,15 +13,15 @@ DOCUMENTATION = '''
|
||||
short_description: Sends events to Logentries
|
||||
description:
|
||||
- This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
|
||||
- Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini
|
||||
- Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named C(logentries.ini).
|
||||
- In 2.4 and above you can just put it in the main Ansible configuration file.
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
- certifi (python library)
|
||||
- flatdict (python library), if you want to use the 'flatten' option
|
||||
- certifi (Python library)
|
||||
- flatdict (Python library), if you want to use the 'flatten' option
|
||||
options:
|
||||
api:
|
||||
description: URI to the Logentries API
|
||||
description: URI to the Logentries API.
|
||||
env:
|
||||
- name: LOGENTRIES_API
|
||||
default: data.logentries.com
|
||||
@@ -29,7 +29,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_logentries
|
||||
key: api
|
||||
port:
|
||||
description: HTTP port to use when connecting to the API
|
||||
description: HTTP port to use when connecting to the API.
|
||||
env:
|
||||
- name: LOGENTRIES_PORT
|
||||
default: 80
|
||||
@@ -37,7 +37,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_logentries
|
||||
key: port
|
||||
tls_port:
|
||||
description: Port to use when connecting to the API when TLS is enabled
|
||||
description: Port to use when connecting to the API when TLS is enabled.
|
||||
env:
|
||||
- name: LOGENTRIES_TLS_PORT
|
||||
default: 443
|
||||
@@ -45,7 +45,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_logentries
|
||||
key: tls_port
|
||||
token:
|
||||
description: The logentries "TCP token"
|
||||
description: The logentries C(TCP token).
|
||||
env:
|
||||
- name: LOGENTRIES_ANSIBLE_TOKEN
|
||||
required: true
|
||||
@@ -54,7 +54,7 @@ DOCUMENTATION = '''
|
||||
key: token
|
||||
use_tls:
|
||||
description:
|
||||
- Toggle to decide whether to use TLS to encrypt the communications with the API server
|
||||
- Toggle to decide whether to use TLS to encrypt the communications with the API server.
|
||||
env:
|
||||
- name: LOGENTRIES_USE_TLS
|
||||
default: false
|
||||
@@ -63,7 +63,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_logentries
|
||||
key: use_tls
|
||||
flatten:
|
||||
description: flatten complex data structures into a single dictionary with complex keys
|
||||
description: Flatten complex data structures into a single dictionary with complex keys.
|
||||
type: boolean
|
||||
default: false
|
||||
env:
|
||||
|
||||
@@ -13,13 +13,13 @@ DOCUMENTATION = r'''
|
||||
type: notification
|
||||
short_description: Sends events to Logstash
|
||||
description:
|
||||
- This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash
|
||||
- This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash).
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
- logstash (python library)
|
||||
- logstash (Python library)
|
||||
options:
|
||||
server:
|
||||
description: Address of the Logstash server
|
||||
description: Address of the Logstash server.
|
||||
env:
|
||||
- name: LOGSTASH_SERVER
|
||||
ini:
|
||||
@@ -28,7 +28,7 @@ DOCUMENTATION = r'''
|
||||
version_added: 1.0.0
|
||||
default: localhost
|
||||
port:
|
||||
description: Port on which logstash is listening
|
||||
description: Port on which logstash is listening.
|
||||
env:
|
||||
- name: LOGSTASH_PORT
|
||||
ini:
|
||||
@@ -37,7 +37,7 @@ DOCUMENTATION = r'''
|
||||
version_added: 1.0.0
|
||||
default: 5000
|
||||
type:
|
||||
description: Message type
|
||||
description: Message type.
|
||||
env:
|
||||
- name: LOGSTASH_TYPE
|
||||
ini:
|
||||
@@ -54,7 +54,7 @@ DOCUMENTATION = r'''
|
||||
env:
|
||||
- name: LOGSTASH_PRE_COMMAND
|
||||
format_version:
|
||||
description: Logging format
|
||||
description: Logging format.
|
||||
type: str
|
||||
version_added: 2.0.0
|
||||
ini:
|
||||
@@ -113,7 +113,7 @@ from ansible.plugins.callback import CallbackBase
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.logstash'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ DOCUMENTATION = '''
|
||||
- set as main display callback
|
||||
short_description: Don't display stuff to screen
|
||||
description:
|
||||
- This callback prevents outputing events to screen
|
||||
- This callback prevents outputing events to screen.
|
||||
'''
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -14,12 +14,12 @@ DOCUMENTATION = '''
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelisting in configuration
|
||||
- the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
|
||||
- the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program
|
||||
short_description: notify using software speech synthesizer
|
||||
description:
|
||||
- This plugin will use the 'say' or 'espeak' program to "speak" about play events.
|
||||
- This plugin will use the C(say) or C(espeak) program to "speak" about play events.
|
||||
notes:
|
||||
- In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
||||
- In Ansible 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
||||
'''
|
||||
|
||||
import platform
|
||||
|
||||
@@ -22,7 +22,7 @@ DOCUMENTATION = '''
|
||||
options:
|
||||
nocolor:
|
||||
default: false
|
||||
description: This setting allows suppressing colorizing output
|
||||
description: This setting allows suppressing colorizing output.
|
||||
env:
|
||||
- name: ANSIBLE_NOCOLOR
|
||||
- name: ANSIBLE_SELECTIVE_DONT_COLORIZE
|
||||
|
||||
@@ -18,11 +18,11 @@ DOCUMENTATION = '''
|
||||
short_description: Sends play events to a Slack channel
|
||||
description:
|
||||
- This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
|
||||
- Before 2.4 only environment variables were available for configuring this plugin
|
||||
- Before Ansible 2.4 only environment variables were available for configuring this plugin.
|
||||
options:
|
||||
webhook_url:
|
||||
required: true
|
||||
description: Slack Webhook URL
|
||||
description: Slack Webhook URL.
|
||||
env:
|
||||
- name: SLACK_WEBHOOK_URL
|
||||
ini:
|
||||
@@ -45,7 +45,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_slack
|
||||
key: username
|
||||
validate_certs:
|
||||
description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
|
||||
description: Validate the SSL certificate of the Slack server for HTTPS URLs.
|
||||
env:
|
||||
- name: SLACK_VALIDATE_CERTS
|
||||
ini:
|
||||
|
||||
@@ -8,27 +8,27 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: splunk
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||
description:
|
||||
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
|
||||
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
|
||||
- The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/).
|
||||
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
|
||||
requirements:
|
||||
- Whitelisting this callback plugin
|
||||
- 'Create a HTTP Event Collector in Splunk'
|
||||
- 'Define the url and token in ansible.cfg'
|
||||
- 'Define the URL and token in C(ansible.cfg)'
|
||||
options:
|
||||
url:
|
||||
description: URL to the Splunk HTTP collector source
|
||||
description: URL to the Splunk HTTP collector source.
|
||||
env:
|
||||
- name: SPLUNK_URL
|
||||
ini:
|
||||
- section: callback_splunk
|
||||
key: url
|
||||
authtoken:
|
||||
description: Token to authenticate the connection to the Splunk HTTP collector
|
||||
description: Token to authenticate the connection to the Splunk HTTP collector.
|
||||
env:
|
||||
- name: SPLUNK_AUTHTOKEN
|
||||
ini:
|
||||
@@ -48,7 +48,7 @@ DOCUMENTATION = '''
|
||||
version_added: '1.0.0'
|
||||
include_milliseconds:
|
||||
description: Whether to include milliseconds as part of the generated timestamp field in the event
|
||||
sent to the Splunk HTTP collector
|
||||
sent to the Splunk HTTP collector.
|
||||
env:
|
||||
- name: SPLUNK_INCLUDE_MILLISECONDS
|
||||
ini:
|
||||
@@ -165,7 +165,7 @@ class SplunkHTTPCollectorSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.splunk'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -8,18 +8,18 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sumologic
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends task result events to Sumologic
|
||||
author: "Ryan Currah (@ryancurrah)"
|
||||
description:
|
||||
- This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source
|
||||
- This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source.
|
||||
requirements:
|
||||
- Whitelisting this callback plugin
|
||||
- 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
|
||||
of C("timestamp": "(.*)")'
|
||||
options:
|
||||
url:
|
||||
description: URL to the Sumologic HTTP collector source
|
||||
description: URL to the Sumologic HTTP collector source.
|
||||
env:
|
||||
- name: SUMOLOGIC_URL
|
||||
ini:
|
||||
@@ -28,7 +28,7 @@ options:
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: >
|
||||
examples: |
|
||||
To enable, add this to your ansible.cfg file in the defaults block
|
||||
[defaults]
|
||||
callback_whitelist = community.general.sumologic
|
||||
@@ -111,7 +111,7 @@ class SumologicHTTPCollectorSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.sumologic'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ DOCUMENTATION = '''
|
||||
- whitelist in configuration
|
||||
short_description: sends JSON events to syslog
|
||||
description:
|
||||
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format
|
||||
- Before Ansible 2.9 only environment variables were available for configuration
|
||||
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
|
||||
- Before Ansible 2.9 only environment variables were available for configuration.
|
||||
options:
|
||||
server:
|
||||
description: syslog server that will receive the event
|
||||
description: Syslog server that will receive the event.
|
||||
env:
|
||||
- name: SYSLOG_SERVER
|
||||
default: localhost
|
||||
@@ -27,7 +27,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_syslog_json
|
||||
key: syslog_server
|
||||
port:
|
||||
description: port on which the syslog server is listening
|
||||
description: Port on which the syslog server is listening.
|
||||
env:
|
||||
- name: SYSLOG_PORT
|
||||
default: 514
|
||||
@@ -35,7 +35,7 @@ DOCUMENTATION = '''
|
||||
- section: callback_syslog_json
|
||||
key: syslog_port
|
||||
facility:
|
||||
description: syslog facility to log as
|
||||
description: Syslog facility to log as.
|
||||
env:
|
||||
- name: SYSLOG_FACILITY
|
||||
default: user
|
||||
@@ -71,7 +71,7 @@ class CallbackModule(CallbackBase):
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.syslog_json'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: yaml
|
||||
type: stdout
|
||||
short_description: yaml-ized Ansible screen output
|
||||
short_description: YAML-ized Ansible screen output
|
||||
description:
|
||||
- Ansible output that can be quite a bit easier to read than the
|
||||
default JSON formatting.
|
||||
|
||||
@@ -20,9 +20,13 @@ attributes:
|
||||
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||
'''
|
||||
|
||||
# platform:
|
||||
# description: Target OS/families that can be operated against.
|
||||
# support: N/A
|
||||
PLATFORM = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
platform:
|
||||
description: Target OS/families that can be operated against.
|
||||
support: N/A
|
||||
'''
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
INFO_MODULE = r'''
|
||||
|
||||
@@ -60,7 +60,7 @@ options:
|
||||
sasl_class:
|
||||
description:
|
||||
- The class to use for SASL authentication.
|
||||
- possible choices are C(external), C(gssapi).
|
||||
- Possible choices are C(external), C(gssapi).
|
||||
type: str
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
|
||||
@@ -28,8 +28,12 @@ DOCUMENTATION = """
|
||||
default: name
|
||||
version_added: 5.7.0
|
||||
field:
|
||||
description: Field to fetch; leave unset to fetch whole response.
|
||||
description: Field to fetch. Leave unset to fetch whole response.
|
||||
type: str
|
||||
collection_id:
|
||||
description: Collection ID to filter results by collection. Leave unset to skip filtering.
|
||||
type: str
|
||||
version_added: 6.3.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
@@ -43,10 +47,20 @@ EXAMPLES = """
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
|
||||
|
||||
- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
|
||||
|
||||
- name: "Get full Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test') }}
|
||||
|
||||
- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -78,7 +92,7 @@ class Bitwarden(object):
|
||||
return self._cli_path
|
||||
|
||||
@property
|
||||
def logged_in(self):
|
||||
def unlocked(self):
|
||||
out, err = self._run(['status'], stdin="")
|
||||
decoded = AnsibleJSONDecoder().raw_decode(out)[0]
|
||||
return decoded['status'] == 'unlocked'
|
||||
@@ -91,10 +105,17 @@ class Bitwarden(object):
|
||||
raise BitwardenException(err)
|
||||
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
|
||||
|
||||
def _get_matches(self, search_value, search_field):
|
||||
def _get_matches(self, search_value, search_field, collection_id):
|
||||
"""Return matching records whose search_field is equal to key.
|
||||
"""
|
||||
out, err = self._run(['list', 'items', '--search', search_value])
|
||||
|
||||
# Prepare set of params for Bitwarden CLI
|
||||
params = ['list', 'items', '--search', search_value]
|
||||
|
||||
if collection_id:
|
||||
params.extend(['--collectionid', collection_id])
|
||||
|
||||
out, err = self._run(params)
|
||||
|
||||
# This includes things that matched in different fields.
|
||||
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
|
||||
@@ -102,17 +123,27 @@ class Bitwarden(object):
|
||||
# Filter to only include results from the right field.
|
||||
return [item for item in initial_matches if item[search_field] == search_value]
|
||||
|
||||
def get_field(self, field, search_value, search_field="name"):
|
||||
"""Return a list of the specified field for records whose search_field match search_value.
|
||||
def get_field(self, field, search_value, search_field="name", collection_id=None):
|
||||
"""Return a list of the specified field for records whose search_field match search_value
|
||||
and filtered by collection if collection has been provided.
|
||||
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value, search_field)
|
||||
matches = self._get_matches(search_value, search_field, collection_id)
|
||||
|
||||
if field:
|
||||
if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
|
||||
return [match['login'][field] for match in matches]
|
||||
|
||||
return matches
|
||||
elif not field:
|
||||
return matches
|
||||
else:
|
||||
custom_field_matches = []
|
||||
for match in matches:
|
||||
for custom_field in match['fields']:
|
||||
if custom_field['name'] == field:
|
||||
custom_field_matches.append(custom_field['value'])
|
||||
if matches and not custom_field_matches:
|
||||
raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
|
||||
return custom_field_matches
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
@@ -121,10 +152,11 @@ class LookupModule(LookupBase):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
field = self.get_option('field')
|
||||
search_field = self.get_option('search')
|
||||
if not _bitwarden.logged_in:
|
||||
raise AnsibleError("Not logged into Bitwarden. Run 'bw login'.")
|
||||
collection_id = self.get_option('collection_id')
|
||||
if not _bitwarden.unlocked:
|
||||
raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
|
||||
|
||||
return [_bitwarden.get_field(field, term, search_field) for term in terms]
|
||||
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
|
||||
|
||||
|
||||
_bitwarden = Bitwarden()
|
||||
|
||||
@@ -35,9 +35,10 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- Record type to query.
|
||||
- C(DLV) has been removed in community.general 6.0.0.
|
||||
- C(CAA) has been added in community.general 6.3.0.
|
||||
type: str
|
||||
default: 'A'
|
||||
choices: [A, ALL, AAAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
|
||||
choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
|
||||
flat:
|
||||
description: If 0 each record is returned as a dictionary, otherwise a string.
|
||||
type: int
|
||||
@@ -129,6 +130,12 @@ RETURN = """
|
||||
AAAA:
|
||||
description:
|
||||
- address
|
||||
CAA:
|
||||
description:
|
||||
- flags
|
||||
- tag
|
||||
- value
|
||||
version_added: 6.3.0
|
||||
CNAME:
|
||||
description:
|
||||
- target
|
||||
@@ -198,7 +205,7 @@ try:
|
||||
import dns.resolver
|
||||
import dns.reversename
|
||||
import dns.rdataclass
|
||||
from dns.rdatatype import (A, AAAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC,
|
||||
from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC,
|
||||
MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
|
||||
HAVE_DNS = True
|
||||
except ImportError:
|
||||
@@ -218,6 +225,7 @@ def make_rdata_dict(rdata):
|
||||
supported_types = {
|
||||
A: ['address'],
|
||||
AAAA: ['address'],
|
||||
CAA: ['flags', 'tag', 'value'],
|
||||
CNAME: ['target'],
|
||||
DNAME: ['target'],
|
||||
DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
|
||||
@@ -230,7 +238,7 @@ def make_rdata_dict(rdata):
|
||||
NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
|
||||
PTR: ['target'],
|
||||
RP: ['mbox', 'txt'],
|
||||
# RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
|
||||
# RRSIG: ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'],
|
||||
SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
|
||||
SPF: ['strings'],
|
||||
SRV: ['priority', 'weight', 'port', 'target'],
|
||||
@@ -251,6 +259,8 @@ def make_rdata_dict(rdata):
|
||||
|
||||
if rdata.rdtype == DS and f == 'digest':
|
||||
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
|
||||
if rdata.rdtype == DNSKEY and f == 'algorithm':
|
||||
val = int(val)
|
||||
if rdata.rdtype == DNSKEY and f == 'key':
|
||||
val = dns.rdata._base64ify(rdata.key).replace(' ', '')
|
||||
if rdata.rdtype == NSEC3PARAM and f == 'salt':
|
||||
|
||||
@@ -85,17 +85,16 @@ class iLORedfishUtils(RedfishUtils):
|
||||
|
||||
datetime_uri = self.manager_uri + "DateTime"
|
||||
|
||||
response = self.get_request(self.root_uri + datetime_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
listofips = mgr_attributes['mgr_attr_value'].split(" ")
|
||||
if len(listofips) > 2:
|
||||
return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"}
|
||||
|
||||
data = response['data']
|
||||
ntp_list = []
|
||||
for ips in listofips:
|
||||
ntp_list.append(ips)
|
||||
|
||||
ntp_list = data[setkey]
|
||||
if len(ntp_list) == 2:
|
||||
ntp_list.pop(0)
|
||||
|
||||
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||
while len(ntp_list) < 2:
|
||||
ntp_list.append("0.0.0.0")
|
||||
|
||||
payload = {setkey: ntp_list}
|
||||
|
||||
@@ -137,18 +136,16 @@ class iLORedfishUtils(RedfishUtils):
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
uri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
listofips = attr['mgr_attr_value'].split(" ")
|
||||
if len(listofips) > 3:
|
||||
return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"}
|
||||
|
||||
data = response['data']
|
||||
dns_list = []
|
||||
for ips in listofips:
|
||||
dns_list.append(ips)
|
||||
|
||||
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||
|
||||
if len(dns_list) == 3:
|
||||
dns_list.pop(0)
|
||||
|
||||
dns_list.append(attr['mgr_attr_value'])
|
||||
while len(dns_list) < 3:
|
||||
dns_list.append("0.0.0.0")
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
|
||||
@@ -37,8 +37,17 @@ def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
def fix_var_conflicts(output):
|
||||
result = dict([
|
||||
(k if k not in conflict_list else "_" + k, v)
|
||||
for k, v in output.items()
|
||||
])
|
||||
return result
|
||||
|
||||
try:
|
||||
func(self, *args, **kwargs)
|
||||
except SystemExit:
|
||||
@@ -46,12 +55,16 @@ def module_fails_on_exception(func):
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
# patchy solution to resolve conflict with output variables
|
||||
output = fix_var_conflicts(self.output)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
output=self.output, vars=self.vars.output(), **output)
|
||||
except Exception as e:
|
||||
# patchy solution to resolve conflict with output variables
|
||||
output = fix_var_conflicts(self.output)
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
output=self.output, vars=self.vars.output(), **output)
|
||||
return wrapper
|
||||
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprec
|
||||
|
||||
|
||||
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
@@ -60,10 +59,6 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
return result
|
||||
|
||||
|
||||
|
||||
502
plugins/module_utils/ocapi_utils.py
Normal file
502
plugins/module_utils/ocapi_utils.py
Normal file
@@ -0,0 +1,502 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022 Western Digital Corporation
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
|
||||
|
||||
GET_HEADERS = {'accept': 'application/json'}
|
||||
PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
|
||||
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
|
||||
DELETE_HEADERS = {'accept': 'application/json'}
|
||||
|
||||
HEALTH_OK = 5
|
||||
|
||||
|
||||
class OcapiUtils(object):
|
||||
|
||||
def __init__(self, creds, base_uri, proxy_slot_number, timeout, module):
|
||||
self.root_uri = base_uri
|
||||
self.proxy_slot_number = proxy_slot_number
|
||||
self.creds = creds
|
||||
self.timeout = timeout
|
||||
self.module = module
|
||||
|
||||
def _auth_params(self):
|
||||
"""
|
||||
Return tuple of required authentication params based on the username and password.
|
||||
|
||||
:return: tuple of username, password
|
||||
"""
|
||||
username = self.creds['user']
|
||||
password = self.creds['pswd']
|
||||
force_basic_auth = True
|
||||
return username, password, force_basic_auth
|
||||
|
||||
def get_request(self, uri):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on GET request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def delete_request(self, uri, etag=None):
|
||||
req_headers = dict(DELETE_HEADERS)
|
||||
if etag is not None:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, method="DELETE", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
if resp.status != 204:
|
||||
data = json.loads(to_native(resp.read()))
|
||||
else:
|
||||
data = ""
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on DELETE request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def put_request(self, uri, payload, etag=None):
|
||||
req_headers = dict(PUT_HEADERS)
|
||||
if etag is not None:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params()
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(payload),
|
||||
headers=req_headers, method="PUT",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on PUT request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
|
||||
def post_request(self, uri, payload, content_type="application/json", timeout=None):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
if content_type != "application/json":
|
||||
req_headers["content-type"] = content_type
|
||||
username, password, basic_auth = self._auth_params()
|
||||
if content_type == "application/json":
|
||||
request_data = json.dumps(payload)
|
||||
else:
|
||||
request_data = payload
|
||||
try:
|
||||
resp = open_url(uri, data=request_data,
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout if timeout is None else timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
return {'ret': False,
|
||||
'msg': "HTTP Error %s on POST request to '%s'"
|
||||
% (e.code, uri),
|
||||
'status': e.code}
|
||||
except URLError as e:
|
||||
return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
|
||||
% (uri, e.reason)}
|
||||
# Almost all errors should be caught above, but just in case
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
|
||||
def get_uri_with_slot_number_query_param(self, uri):
|
||||
"""Return the URI with proxy slot number added as a query param, if there is one.
|
||||
|
||||
If a proxy slot number is provided, to access it, we must append it as a query parameter.
|
||||
This method returns the given URI with the slotnumber query param added, if there is one.
|
||||
If there is not a proxy slot number, it just returns the URI as it was passed in.
|
||||
"""
|
||||
if self.proxy_slot_number is not None:
|
||||
parsed_url = urlparse(uri)
|
||||
return parsed_url._replace(query="slotnumber=" + str(self.proxy_slot_number)).geturl()
|
||||
else:
|
||||
return uri
|
||||
|
||||
def manage_system_power(self, command):
|
||||
"""Process a command to manage the system power.
|
||||
|
||||
:param str command: The Ansible command being processed.
|
||||
"""
|
||||
if command == "PowerGracefulRestart":
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
|
||||
# Get the resource so that we have the Etag
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
# Issue the PUT to do the reboot (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'Reboot': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
elif command.startswith("PowerMode"):
|
||||
return self.manage_power_mode(command)
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command: ' + command}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def manage_chassis_indicator_led(self, command):
|
||||
"""Process a command to manage the chassis indicator LED.
|
||||
|
||||
:param string command: The Ansible command being processed.
|
||||
"""
|
||||
return self.manage_indicator_led(command, self.root_uri)
|
||||
|
||||
def manage_indicator_led(self, command, resource_uri=None):
|
||||
"""Process a command to manage an indicator LED.
|
||||
|
||||
:param string command: The Ansible command being processed.
|
||||
:param string resource_uri: URI of the resource whose indicator LED is being managed.
|
||||
"""
|
||||
key = "IndicatorLED"
|
||||
if resource_uri is None:
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
|
||||
payloads = {
|
||||
'IndicatorLedOn': {
|
||||
'ID': 2
|
||||
},
|
||||
'IndicatorLedOff': {
|
||||
'ID': 4
|
||||
}
|
||||
}
|
||||
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found" % key}
|
||||
if 'ID' not in data[key]:
|
||||
return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'}
|
||||
|
||||
if command in payloads.keys():
|
||||
# See if the LED is already set as requested.
|
||||
current_led_status = data[key]['ID']
|
||||
if current_led_status == payloads[command]['ID']:
|
||||
return {'ret': True, 'changed': False}
|
||||
|
||||
# Set the LED (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'IndicatorLED': payloads[command]}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command'}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def manage_power_mode(self, command):
|
||||
key = "PowerState"
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri)
|
||||
|
||||
payloads = {
|
||||
"PowerModeNormal": 2,
|
||||
"PowerModeLow": 4
|
||||
}
|
||||
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found" % key}
|
||||
if 'ID' not in data[key]:
|
||||
return {'ret': False, 'msg': 'PowerState for resource has no ID.'}
|
||||
|
||||
if command in payloads.keys():
|
||||
# See if the PowerState is already set as requested.
|
||||
current_power_state = data[key]['ID']
|
||||
if current_power_state == payloads[command]:
|
||||
return {'ret': True, 'changed': False}
|
||||
|
||||
# Set the Power State (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'PowerState': {"ID": payloads[command]}}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
else:
|
||||
return {'ret': False, 'msg': 'Invalid command: ' + command}
|
||||
|
||||
return {'ret': True}
|
||||
|
||||
def prepare_multipart_firmware_upload(self, filename):
|
||||
"""Prepare a multipart/form-data body for OCAPI firmware upload.
|
||||
|
||||
:arg filename: The name of the file to upload.
|
||||
:returns: tuple of (content_type, body) where ``content_type`` is
|
||||
the ``multipart/form-data`` ``Content-Type`` header including
|
||||
``boundary`` and ``body`` is the prepared bytestring body
|
||||
|
||||
Prepares the body to include "FirmwareFile" field with the contents of the file.
|
||||
Because some OCAPI targets do not support Base-64 encoding for multipart/form-data,
|
||||
this method sends the file as binary.
|
||||
"""
|
||||
boundary = str(uuid.uuid4()) # Generate a random boundary
|
||||
body = "--" + boundary + '\r\n'
|
||||
body += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % to_native(os.path.basename(filename))
|
||||
body += 'Content-Type: application/octet-stream\r\n\r\n'
|
||||
body_bytes = bytearray(body, 'utf-8')
|
||||
with open(filename, 'rb') as f:
|
||||
body_bytes += f.read()
|
||||
body_bytes += bytearray("\r\n--%s--" % boundary, 'utf-8')
|
||||
return ("multipart/form-data; boundary=%s" % boundary,
|
||||
body_bytes)
|
||||
|
||||
def upload_firmware_image(self, update_image_path):
|
||||
"""Perform Firmware Upload to the OCAPI storage device.
|
||||
|
||||
:param str update_image_path: The path/filename of the firmware image, on the local filesystem.
|
||||
"""
|
||||
if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)):
|
||||
return {'ret': False, 'msg': 'File does not exist.'}
|
||||
url = self.root_uri + "OperatingSystem"
|
||||
url = self.get_uri_with_slot_number_query_param(url)
|
||||
content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path)
|
||||
|
||||
# Post the firmware (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
result = self.post_request(url, b_form_data, content_type=content_type, timeout=300)
|
||||
if result['ret'] is False:
|
||||
return result
|
||||
return {'ret': True}
|
||||
|
||||
def update_firmware_image(self):
|
||||
"""Perform a Firmware Update on the OCAPI storage device."""
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the PUT.
|
||||
response = self.get_request(resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
|
||||
# Issue the PUT (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'FirmwareUpdate': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'jobUri': response["headers"]["location"]}
|
||||
|
||||
def activate_firmware_image(self):
|
||||
"""Perform a Firmware Activate on the OCAPI storage device."""
|
||||
resource_uri = self.root_uri
|
||||
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the PUT.
|
||||
response = self.get_request(resource_uri)
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
etag = response['headers']['etag']
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
# Issue the PUT (unless we are in check mode)
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
payload = {'FirmwareActivate': True}
|
||||
response = self.put_request(resource_uri, payload, etag)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'jobUri': response["headers"]["location"]}
|
||||
|
||||
def get_job_status(self, job_uri):
|
||||
"""Get the status of a job.
|
||||
|
||||
:param str job_uri: The URI of the job's status monitor.
|
||||
"""
|
||||
job_uri = self.get_uri_with_slot_number_query_param(job_uri)
|
||||
response = self.get_request(job_uri)
|
||||
if response['ret'] is False:
|
||||
if response.get('status') == 404:
|
||||
# Job not found -- assume 0%
|
||||
return {
|
||||
"ret": True,
|
||||
"percentComplete": 0,
|
||||
"operationStatus": "Not Available",
|
||||
"operationStatusId": 1,
|
||||
"operationHealth": None,
|
||||
"operationHealthId": None,
|
||||
"details": "Job does not exist.",
|
||||
"jobExists": False
|
||||
}
|
||||
else:
|
||||
return response
|
||||
details = response["data"]["Status"].get("Details")
|
||||
if type(details) is str:
|
||||
details = [details]
|
||||
health_list = response["data"]["Status"]["Health"]
|
||||
return_value = {
|
||||
"ret": True,
|
||||
"percentComplete": response["data"]["PercentComplete"],
|
||||
"operationStatus": response["data"]["Status"]["State"]["Name"],
|
||||
"operationStatusId": response["data"]["Status"]["State"]["ID"],
|
||||
"operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None,
|
||||
"operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None,
|
||||
"details": details,
|
||||
"jobExists": True
|
||||
}
|
||||
return return_value
|
||||
|
||||
def delete_job(self, job_uri):
|
||||
"""Delete the OCAPI job referenced by the specified job_uri."""
|
||||
job_uri = self.get_uri_with_slot_number_query_param(job_uri)
|
||||
# We have to do a GET to obtain the Etag. It's required on the DELETE.
|
||||
response = self.get_request(job_uri)
|
||||
|
||||
if response['ret'] is True:
|
||||
if 'etag' not in response['headers']:
|
||||
return {'ret': False, 'msg': 'Etag not found in response.'}
|
||||
else:
|
||||
etag = response['headers']['etag']
|
||||
|
||||
if response['data']['PercentComplete'] != 100:
|
||||
return {
|
||||
'ret': False,
|
||||
'changed': False,
|
||||
'msg': 'Cannot delete job because it is in progress.'
|
||||
}
|
||||
|
||||
if response['ret'] is False:
|
||||
if response['status'] == 404:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': False,
|
||||
'msg': 'Job already deleted.'
|
||||
}
|
||||
return response
|
||||
if self.module.check_mode:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True,
|
||||
'msg': 'Update not performed in check mode.'
|
||||
}
|
||||
|
||||
# Do the DELETE (unless we are in check mode)
|
||||
response = self.delete_request(job_uri, etag)
|
||||
if response['ret'] is False:
|
||||
if response['status'] == 404:
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': False
|
||||
}
|
||||
elif response['status'] == 409:
|
||||
return {
|
||||
'ret': False,
|
||||
'changed': False,
|
||||
'msg': 'Cannot delete job because it is in progress.'
|
||||
}
|
||||
return response
|
||||
return {
|
||||
'ret': True,
|
||||
'changed': True
|
||||
}
|
||||
@@ -26,6 +26,36 @@ except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
|
||||
# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
|
||||
# It allows for easily handling lists like "NIC" or "DISK" in the JSON-like template representation.
|
||||
# There are either lists of dictionaries (length > 1) or just dictionaries.
|
||||
def flatten(to_flatten, extract=False):
|
||||
"""Flattens nested lists (with optional value extraction)."""
|
||||
def recurse(to_flatten):
|
||||
return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten]
|
||||
value = recurse(to_flatten)
|
||||
if extract and len(value) == 1:
|
||||
return value[0]
|
||||
return value
|
||||
|
||||
|
||||
# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
|
||||
# It renders JSON-like template representation into OpenNebula's template syntax (string).
|
||||
def render(to_render):
|
||||
"""Converts dictionary to OpenNebula template."""
|
||||
def recurse(to_render):
|
||||
for key, value in sorted(to_render.items()):
|
||||
if isinstance(value, dict):
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
|
||||
continue
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
|
||||
continue
|
||||
yield '{0:}="{1:}"'.format(key, value)
|
||||
return '\n'.join(recurse(to_render))
|
||||
|
||||
|
||||
class OpenNebulaModule:
|
||||
"""
|
||||
Base class for all OpenNebula Ansible Modules.
|
||||
|
||||
@@ -38,6 +38,8 @@ class RedfishUtils(object):
|
||||
self.timeout = timeout
|
||||
self.module = module
|
||||
self.service_root = '/redfish/v1/'
|
||||
self.session_service_uri = '/redfish/v1/SessionService'
|
||||
self.sessions_uri = '/redfish/v1/SessionService/Sessions'
|
||||
self.resource_id = resource_id
|
||||
self.data_modification = data_modification
|
||||
self.strip_etag_quotes = strip_etag_quotes
|
||||
@@ -125,6 +127,10 @@ class RedfishUtils(object):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
# Service root is an unauthenticated resource; remove credentials
|
||||
# in case the caller will be using sessions later.
|
||||
if uri == (self.root_uri + self.service_root):
|
||||
basic_auth = False
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
@@ -151,6 +157,11 @@ class RedfishUtils(object):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
# When performing a POST to the session collection, credentials are
|
||||
# provided in the request body. Do not provide the basic auth
|
||||
# header since this can cause conflicts with some services
|
||||
if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri):
|
||||
basic_auth = False
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
@@ -363,23 +374,23 @@ class RedfishUtils(object):
|
||||
return {'ret': True}
|
||||
|
||||
def _find_sessionservice_resource(self):
|
||||
# Get the service root
|
||||
response = self.get_request(self.root_uri + self.service_root)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'SessionService' not in data:
|
||||
|
||||
# Check for the session service and session collection. Well-known
|
||||
# defaults are provided in the constructor, but services that predate
|
||||
# Redfish 1.6.0 might contain different values.
|
||||
self.session_service_uri = data.get('SessionService', {}).get('@odata.id')
|
||||
self.sessions_uri = data.get('Links', {}).get('Sessions', {}).get('@odata.id')
|
||||
|
||||
# If one isn't found, return an error
|
||||
if self.session_service_uri is None:
|
||||
return {'ret': False, 'msg': "SessionService resource not found"}
|
||||
else:
|
||||
session_service = data["SessionService"]["@odata.id"]
|
||||
self.session_service_uri = session_service
|
||||
response = self.get_request(self.root_uri + session_service)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
sessions = data['Sessions']['@odata.id']
|
||||
if sessions[-1:] == '/':
|
||||
sessions = sessions[:-1]
|
||||
self.sessions_uri = sessions
|
||||
if self.sessions_uri is None:
|
||||
return {'ret': False, 'msg': "SessionCollection resource not found"}
|
||||
return {'ret': True}
|
||||
|
||||
def _get_resource_uri_by_id(self, uris, id_prop):
|
||||
|
||||
@@ -84,6 +84,10 @@ def parse_pagination_link(header):
|
||||
|
||||
|
||||
def filter_sensitive_attributes(container, attributes):
|
||||
'''
|
||||
WARNING: This function is effectively private, **do not use it**!
|
||||
It will be removed or renamed once changing its name no longer triggers a pylint bug.
|
||||
'''
|
||||
for attr in attributes:
|
||||
container[attr] = "SENSITIVE_VALUE"
|
||||
|
||||
|
||||
@@ -60,6 +60,8 @@ options:
|
||||
description:
|
||||
- A list of subcommands.
|
||||
- Each subcommand needs a name, a link and a path parameter.
|
||||
- Subcommands are also named 'slaves' or 'followers', depending on the version
|
||||
of alternatives.
|
||||
type: list
|
||||
elements: dict
|
||||
aliases: ['slaves']
|
||||
@@ -310,10 +312,10 @@ class AlternativesModule(object):
|
||||
current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE)
|
||||
current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE)
|
||||
current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE)
|
||||
subcmd_path_link_regex = re.compile(r'^\s*slave (\S+) is (.*)$', re.MULTILINE)
|
||||
subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE)
|
||||
|
||||
alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+slave.*)*)', re.MULTILINE)
|
||||
subcmd_regex = re.compile(r'^\s+slave (.*): (.*)$', re.MULTILINE)
|
||||
alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE)
|
||||
subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE)
|
||||
|
||||
match = current_mode_regex.search(display_output)
|
||||
if not match:
|
||||
|
||||
@@ -49,6 +49,12 @@ options:
|
||||
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
|
||||
type: bool
|
||||
default: false
|
||||
warn_mpm_absent:
|
||||
description:
|
||||
- Control the behavior of the warning process for MPM modules.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 6.3.0
|
||||
requirements: ["a2enmod","a2dismod"]
|
||||
notes:
|
||||
- This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
|
||||
@@ -78,6 +84,18 @@ EXAMPLES = '''
|
||||
name: mpm_worker
|
||||
ignore_configcheck: true
|
||||
|
||||
- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module
|
||||
community.general.apache2_module:
|
||||
name: "{{ item.module }}"
|
||||
state: "{{ item.state }}"
|
||||
warn_mpm_absent: false
|
||||
ignore_configcheck: true
|
||||
loop:
|
||||
- module: mpm_event
|
||||
state: absent
|
||||
- module: mpm_prefork
|
||||
state: present
|
||||
|
||||
- name: Enable dump_io module, which is identified as dumpio_module inside apache2
|
||||
community.general.apache2_module:
|
||||
state: present
|
||||
@@ -140,10 +158,11 @@ def _module_is_enabled(module):
|
||||
error_msg = "Error executing %s: %s" % (control_binary, stderr)
|
||||
if module.params['ignore_configcheck']:
|
||||
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
|
||||
module.warnings.append(
|
||||
"No MPM module loaded! apache2 reload AND other module actions"
|
||||
" will fail if no MPM module is loaded immediately."
|
||||
)
|
||||
if module.params['warn_mpm_absent']:
|
||||
module.warnings.append(
|
||||
"No MPM module loaded! apache2 reload AND other module actions"
|
||||
" will fail if no MPM module is loaded immediately."
|
||||
)
|
||||
else:
|
||||
module.warnings.append(error_msg)
|
||||
return False
|
||||
@@ -249,6 +268,7 @@ def main():
|
||||
force=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
ignore_configcheck=dict(type='bool', default=False),
|
||||
warn_mpm_absent=dict(type='bool', default=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ description:
|
||||
by Consul from the Service name and id respectively by appending 'service:'
|
||||
Node level checks require a I(check_name) and optionally a I(check_id)."
|
||||
- Currently, there is no complete way to retrieve the script, interval or ttl
|
||||
metadata for a registered check. Without this metadata it is not possible to
|
||||
metadata for a registered check. Without this metadata it is not possible to
|
||||
tell if the data supplied with ansible represents a change to a check. As a
|
||||
result this does not attempt to determine changes and will always report a
|
||||
changed occurred. An API method is planned to supply this metadata so at that
|
||||
@@ -37,7 +37,7 @@ options:
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- register or deregister the consul service, defaults to present
|
||||
- Register or deregister the consul service, defaults to present.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
service_name:
|
||||
@@ -45,30 +45,30 @@ options:
|
||||
description:
|
||||
- Unique name for the service on a node, must be unique per node,
|
||||
required if registering a service. May be omitted if registering
|
||||
a node level check
|
||||
a node level check.
|
||||
service_id:
|
||||
type: str
|
||||
description:
|
||||
- the ID for the service, must be unique per node. If I(state=absent),
|
||||
- The ID for the service, must be unique per node. If I(state=absent),
|
||||
defaults to the service name if supplied.
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- host of the consul agent defaults to localhost
|
||||
- Host of the consul agent defaults to localhost.
|
||||
default: localhost
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- the port on which the consul agent is running
|
||||
- The port on which the consul agent is running.
|
||||
default: 8500
|
||||
scheme:
|
||||
type: str
|
||||
description:
|
||||
- the protocol scheme on which the consul agent is running
|
||||
- The protocol scheme on which the consul agent is running.
|
||||
default: http
|
||||
validate_certs:
|
||||
description:
|
||||
- whether to verify the TLS certificate of the consul agent
|
||||
- Whether to verify the TLS certificate of the consul agent.
|
||||
type: bool
|
||||
default: true
|
||||
notes:
|
||||
@@ -78,12 +78,12 @@ options:
|
||||
service_port:
|
||||
type: int
|
||||
description:
|
||||
- the port on which the service is listening. Can optionally be supplied for
|
||||
registration of a service, i.e. if I(service_name) or I(service_id) is set
|
||||
- The port on which the service is listening. Can optionally be supplied for
|
||||
registration of a service, i.e. if I(service_name) or I(service_id) is set.
|
||||
service_address:
|
||||
type: str
|
||||
description:
|
||||
- the address to advertise that the service will be listening on.
|
||||
- The address to advertise that the service will be listening on.
|
||||
This value will be passed as the I(address) parameter to Consul's
|
||||
C(/v1/agent/service/register) API method, so refer to the Consul API
|
||||
documentation for further details.
|
||||
@@ -91,63 +91,68 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- tags that will be attached to the service registration.
|
||||
- Tags that will be attached to the service registration.
|
||||
script:
|
||||
type: str
|
||||
description:
|
||||
- the script/command that will be run periodically to check the health
|
||||
of the service. Scripts require I(interval) and vice versa.
|
||||
- The script/command that will be run periodically to check the health of the service.
|
||||
- Requires I(interval) to be provided.
|
||||
interval:
|
||||
type: str
|
||||
description:
|
||||
- the interval at which the service check will be run. This is a number
|
||||
with a s or m suffix to signify the units of seconds or minutes e.g
|
||||
C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
|
||||
C(1) will be C(1m). Required if the I(script) parameter is specified.
|
||||
- The interval at which the service check will be run.
|
||||
This is a number with a C(s) or C(m) suffix to signify the units of seconds or minutes e.g C(15s) or C(1m).
|
||||
If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
|
||||
- Required if one of the parameters I(script), I(http), or I(tcp) is specified.
|
||||
check_id:
|
||||
type: str
|
||||
description:
|
||||
- an ID for the service check. If I(state=absent), defaults to
|
||||
- An ID for the service check. If I(state=absent), defaults to
|
||||
I(check_name). Ignored if part of a service definition.
|
||||
check_name:
|
||||
type: str
|
||||
description:
|
||||
- a name for the service check. Required if standalone, ignored if
|
||||
- Name for the service check. Required if standalone, ignored if
|
||||
part of service definition.
|
||||
ttl:
|
||||
type: str
|
||||
description:
|
||||
- checks can be registered with a ttl instead of a I(script) and I(interval)
|
||||
- Checks can be registered with a ttl instead of a I(script) and I(interval)
|
||||
this means that the service will check in with the agent before the
|
||||
ttl expires. If it doesn't the check will be considered failed.
|
||||
Required if registering a check and the script an interval are missing
|
||||
Similar to the interval this is a number with a s or m suffix to
|
||||
signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
|
||||
is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
|
||||
Similar to the interval this is a number with a C(s) or C(m) suffix to
|
||||
signify the units of seconds or minutes e.g C(15s) or C(1m).
|
||||
If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
|
||||
tcp:
|
||||
type: str
|
||||
description:
|
||||
- Checks can be registered with a TCP port. This means that consul
|
||||
will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
|
||||
The format is C(host:port), for example C(localhost:80).
|
||||
I(interval) must also be provided with this option.
|
||||
- Requires I(interval) to be provided.
|
||||
version_added: '1.3.0'
|
||||
http:
|
||||
type: str
|
||||
description:
|
||||
- checks can be registered with an HTTP endpoint. This means that consul
|
||||
- Checks can be registered with an HTTP endpoint. This means that consul
|
||||
will check that the http endpoint returns a successful HTTP status.
|
||||
I(interval) must also be provided with this option.
|
||||
- Requires I(interval) to be provided.
|
||||
timeout:
|
||||
type: str
|
||||
description:
|
||||
- A custom HTTP check timeout. The consul default is 10 seconds.
|
||||
Similar to the interval this is a number with a C(s) or C(m) suffix to
|
||||
signify the units of seconds or minutes, e.g. C(15s) or C(1m).
|
||||
If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
|
||||
token:
|
||||
type: str
|
||||
description:
|
||||
- the token key identifying an ACL rule set. May be required to register services.
|
||||
- The token key identifying an ACL rule set. May be required to register services.
|
||||
ack_params_state_absent:
|
||||
type: bool
|
||||
description:
|
||||
- Disable deprecation warning when using parameters incompatible with I(state=absent).
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -583,7 +588,8 @@ def main():
|
||||
http=dict(type='str'),
|
||||
timeout=dict(type='str'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
token=dict(no_log=True)
|
||||
token=dict(no_log=True),
|
||||
ack_params_state_absent=dict(type='bool'),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['service_name']),
|
||||
@@ -591,14 +597,29 @@ def main():
|
||||
],
|
||||
supports_check_mode=False,
|
||||
)
|
||||
p = module.params
|
||||
|
||||
test_dependencies(module)
|
||||
if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']) and not p['ack_params_state_absent']:
|
||||
module.deprecate(
|
||||
"The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is deprecated. "
|
||||
"In community.general 8.0.0 their use will become an error. "
|
||||
"To suppress this deprecation notice, set parameter ack_params_state_absent=true.",
|
||||
version="8.0.0",
|
||||
collection_name="community.general",
|
||||
)
|
||||
# When reaching c.g 8.0.0:
|
||||
# - Replace the deprecation with a fail_json(), remove the "ack_params_state_absent" condition from the "if"
|
||||
# - Add mutually_exclusive for ('script', 'ttl', 'tcp', 'http'), then remove that validation from parse_check()
|
||||
# - Add required_by {'script': 'interval', 'http': 'interval', 'tcp': 'interval'}, then remove checks for 'interval' in ConsulCheck.__init__()
|
||||
# - Deprecate the parameter ack_params_state_absent
|
||||
|
||||
try:
|
||||
register_with_consul(module)
|
||||
except SystemExit:
|
||||
raise
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
module.params['host'], module.params['port'], str(e)))
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e)))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ options:
|
||||
required: false
|
||||
force:
|
||||
description:
|
||||
- Force gem to install, bypassing dependency checks.
|
||||
- Force gem to (un-)install, bypassing dependency checks.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
@@ -234,7 +234,9 @@ def uninstall(module):
|
||||
cmd.extend(['--version', module.params['version']])
|
||||
else:
|
||||
cmd.append('--all')
|
||||
cmd.append('--executable')
|
||||
cmd.append('--executable')
|
||||
if module.params['force']:
|
||||
cmd.append('--force')
|
||||
cmd.append(module.params['name'])
|
||||
module.run_command(cmd, environ_update=environ, check_rc=True)
|
||||
|
||||
|
||||
@@ -151,6 +151,7 @@ class GitLabDeployKey(object):
|
||||
changed = True
|
||||
else:
|
||||
changed, deploy_key = self.update_deploy_key(self.deploy_key_object, {
|
||||
'title': key_title,
|
||||
'can_push': options['can_push']})
|
||||
|
||||
self.deploy_key_object = deploy_key
|
||||
|
||||
@@ -172,6 +172,30 @@ options:
|
||||
- This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true).
|
||||
type: str
|
||||
version_added: "4.2.0"
|
||||
builds_access_level:
|
||||
description:
|
||||
- C(private) means that repository CI/CD is allowed only to project members.
|
||||
- C(disabled) means that repository CI/CD is disabled.
|
||||
- C(enabled) means that repository CI/CD is enabled.
|
||||
type: str
|
||||
choices: ["private", "disabled", "enabled"]
|
||||
version_added: "6.2.0"
|
||||
forking_access_level:
|
||||
description:
|
||||
- C(private) means that repository forks is allowed only to project members.
|
||||
- C(disabled) means that repository forks are disabled.
|
||||
- C(enabled) means that repository forks are enabled.
|
||||
type: str
|
||||
choices: ["private", "disabled", "enabled"]
|
||||
version_added: "6.2.0"
|
||||
container_registry_access_level:
|
||||
description:
|
||||
- C(private) means that container registry is allowed only to project members.
|
||||
- C(disabled) means that container registry is disabled.
|
||||
- C(enabled) means that container registry is enabled.
|
||||
type: str
|
||||
choices: ["private", "disabled", "enabled"]
|
||||
version_added: "6.2.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -287,6 +311,9 @@ class GitLabProject(object):
|
||||
'squash_option': options['squash_option'],
|
||||
'ci_config_path': options['ci_config_path'],
|
||||
'shared_runners_enabled': options['shared_runners_enabled'],
|
||||
'builds_access_level': options['builds_access_level'],
|
||||
'forking_access_level': options['forking_access_level'],
|
||||
'container_registry_access_level': options['container_registry_access_level'],
|
||||
}
|
||||
# Because we have already call userExists in main()
|
||||
if self.project_object is None:
|
||||
@@ -417,6 +444,9 @@ def main():
|
||||
ci_config_path=dict(type='str'),
|
||||
shared_runners_enabled=dict(type='bool'),
|
||||
avatar_path=dict(type='path'),
|
||||
builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
@@ -464,6 +494,9 @@ def main():
|
||||
shared_runners_enabled = module.params['shared_runners_enabled']
|
||||
avatar_path = module.params['avatar_path']
|
||||
default_branch = module.params['default_branch']
|
||||
builds_access_level = module.params['builds_access_level']
|
||||
forking_access_level = module.params['forking_access_level']
|
||||
container_registry_access_level = module.params['container_registry_access_level']
|
||||
|
||||
if default_branch and not initialize_with_readme:
|
||||
module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
|
||||
@@ -533,6 +566,9 @@ def main():
|
||||
"ci_config_path": ci_config_path,
|
||||
"shared_runners_enabled": shared_runners_enabled,
|
||||
"avatar_path": avatar_path,
|
||||
"builds_access_level": builds_access_level,
|
||||
"forking_access_level": forking_access_level,
|
||||
"container_registry_access_level": container_registry_access_level,
|
||||
}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs)
|
||||
|
||||
@@ -84,12 +84,23 @@ options:
|
||||
access_level:
|
||||
description:
|
||||
- Determines if a runner can pick up jobs only from protected branches.
|
||||
- If I(access_level_on_creation) is not explicitly set to C(true), this option is ignored on registration and
|
||||
is only applied on updates.
|
||||
- If set to C(ref_protected), runner can pick up jobs only from protected branches.
|
||||
- If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches.
|
||||
required: false
|
||||
default: ref_protected
|
||||
choices: ["ref_protected", "not_protected"]
|
||||
type: str
|
||||
access_level_on_creation:
|
||||
description:
|
||||
- Whether the runner should be registered with an access level or not.
|
||||
- If set to C(true), the value of I(access_level) is used for runner registration.
|
||||
- If set to C(false), GitLab registers the runner with the default access level.
|
||||
- The current default of this option is C(false). This default is deprecated and will change to C(true) in commuinty.general 7.0.0.
|
||||
required: false
|
||||
type: bool
|
||||
version_added: 6.3.0
|
||||
maximum_timeout:
|
||||
description:
|
||||
- The maximum time that a runner has to complete a specific job.
|
||||
@@ -207,27 +218,34 @@ class GitLabRunner(object):
|
||||
def create_or_update_runner(self, description, options):
|
||||
changed = False
|
||||
|
||||
arguments = {
|
||||
'active': options['active'],
|
||||
'locked': options['locked'],
|
||||
'run_untagged': options['run_untagged'],
|
||||
'maximum_timeout': options['maximum_timeout'],
|
||||
'tag_list': options['tag_list'],
|
||||
}
|
||||
# Because we have already call userExists in main()
|
||||
if self.runner_object is None:
|
||||
runner = self.create_runner({
|
||||
'description': description,
|
||||
'active': options['active'],
|
||||
'token': options['registration_token'],
|
||||
'locked': options['locked'],
|
||||
'run_untagged': options['run_untagged'],
|
||||
'maximum_timeout': options['maximum_timeout'],
|
||||
'tag_list': options['tag_list'],
|
||||
})
|
||||
arguments['description'] = description
|
||||
arguments['token'] = options['registration_token']
|
||||
|
||||
access_level_on_creation = self._module.params['access_level_on_creation']
|
||||
if access_level_on_creation is None:
|
||||
message = "The option 'access_level_on_creation' is unspecified, so 'false' is assumed. "\
|
||||
"That means any value of 'access_level' is ignored and GitLab registers the runner with its default value. "\
|
||||
"The option 'access_level_on_creation' will switch to 'true' in community.general 7.0.0"
|
||||
self._module.deprecate(message, version='7.0.0', collection_name='community.general')
|
||||
access_level_on_creation = False
|
||||
|
||||
if access_level_on_creation:
|
||||
arguments['access_level'] = options['access_level']
|
||||
|
||||
runner = self.create_runner(arguments)
|
||||
changed = True
|
||||
else:
|
||||
changed, runner = self.update_runner(self.runner_object, {
|
||||
'active': options['active'],
|
||||
'locked': options['locked'],
|
||||
'run_untagged': options['run_untagged'],
|
||||
'maximum_timeout': options['maximum_timeout'],
|
||||
'access_level': options['access_level'],
|
||||
'tag_list': options['tag_list'],
|
||||
})
|
||||
arguments['access_level'] = options['access_level']
|
||||
changed, runner = self.update_runner(self.runner_object, arguments)
|
||||
|
||||
self.runner_object = runner
|
||||
if changed:
|
||||
@@ -328,6 +346,7 @@ def main():
|
||||
run_untagged=dict(type='bool', default=True),
|
||||
locked=dict(type='bool', default=False),
|
||||
access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
|
||||
access_level_on_creation=dict(type='bool'),
|
||||
maximum_timeout=dict(type='int', default=3600),
|
||||
registration_token=dict(type='str', no_log=True),
|
||||
project=dict(type='str'),
|
||||
|
||||
@@ -125,7 +125,7 @@ def main():
|
||||
password=dict(no_log=True),
|
||||
auth_token=dict(no_log=True),
|
||||
attribute_name=dict(required=True),
|
||||
attribute_value=dict(),
|
||||
attribute_value=dict(type='str'),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
required_together=[
|
||||
|
||||
@@ -64,6 +64,17 @@ options:
|
||||
- If option is omitted assigned users will not be checked or changed.
|
||||
type: list
|
||||
elements: str
|
||||
external_user:
|
||||
description:
|
||||
- List of external users assigned to this group.
|
||||
- Behaves identically to I(user) with respect to I(append) attribute.
|
||||
- List entries can be in C(DOMAIN\\username) or SID format.
|
||||
- Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users.
|
||||
This is because only SIDs are returned by IPA query.
|
||||
- I(external=true) is needed for this option to work.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 6.3.0
|
||||
state:
|
||||
description:
|
||||
- State to ensure
|
||||
@@ -116,6 +127,28 @@ EXAMPLES = r'''
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Add external user to a group
|
||||
community.general.ipa_group:
|
||||
name: developers
|
||||
external: true
|
||||
append: true
|
||||
external_user:
|
||||
- S-1-5-21-123-1234-12345-63421
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Add a user from MYDOMAIN
|
||||
community.general.ipa_group:
|
||||
name: developers
|
||||
external: true
|
||||
append: true
|
||||
external_user:
|
||||
- MYDOMAIN\\john
|
||||
ipa_host: ipa.example.com
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Ensure group is absent
|
||||
community.general.ipa_group:
|
||||
name: sysops
|
||||
@@ -164,6 +197,9 @@ class GroupIPAClient(IPAClient):
|
||||
def group_add_member_user(self, name, item):
|
||||
return self.group_add_member(name=name, item={'user': item})
|
||||
|
||||
def group_add_member_externaluser(self, name, item):
|
||||
return self.group_add_member(name=name, item={'ipaexternalmember': item})
|
||||
|
||||
def group_remove_member(self, name, item):
|
||||
return self._post_json(method='group_remove_member', name=name, item=item)
|
||||
|
||||
@@ -173,6 +209,9 @@ class GroupIPAClient(IPAClient):
|
||||
def group_remove_member_user(self, name, item):
|
||||
return self.group_remove_member(name=name, item={'user': item})
|
||||
|
||||
def group_remove_member_externaluser(self, name, item):
|
||||
return self.group_remove_member(name=name, item={'ipaexternalmember': item})
|
||||
|
||||
|
||||
def get_group_dict(description=None, external=None, gid=None, nonposix=None):
|
||||
group = {}
|
||||
@@ -208,12 +247,19 @@ def ensure(module, client):
|
||||
name = module.params['cn']
|
||||
group = module.params['group']
|
||||
user = module.params['user']
|
||||
external = module.params['external']
|
||||
external_user = module.params['external_user']
|
||||
append = module.params['append']
|
||||
|
||||
module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
|
||||
gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
|
||||
module_group = get_group_dict(description=module.params['description'],
|
||||
external=external,
|
||||
gid=module.params['gidnumber'],
|
||||
nonposix=module.params['nonposix'])
|
||||
ipa_group = client.group_find(name=name)
|
||||
|
||||
if (not (external or external_user is None)):
|
||||
module.fail_json("external_user can only be set if external = True")
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
if not ipa_group:
|
||||
@@ -242,6 +288,11 @@ def ensure(module, client):
|
||||
client.group_remove_member_user,
|
||||
append=append) or changed
|
||||
|
||||
if external_user is not None:
|
||||
changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user,
|
||||
client.group_add_member_externaluser,
|
||||
client.group_remove_member_externaluser,
|
||||
append=append) or changed
|
||||
else:
|
||||
if ipa_group:
|
||||
changed = True
|
||||
@@ -256,6 +307,7 @@ def main():
|
||||
argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
|
||||
description=dict(type='str'),
|
||||
external=dict(type='bool'),
|
||||
external_user=dict(type='list', elements='str'),
|
||||
gidnumber=dict(type='str', aliases=['gid']),
|
||||
group=dict(type='list', elements='str'),
|
||||
nonposix=dict(type='bool'),
|
||||
|
||||
@@ -260,10 +260,7 @@ def read_state(b_path):
|
||||
'''
|
||||
with open(b_path, 'r') as f:
|
||||
text = f.read()
|
||||
lines = text.splitlines()
|
||||
while '' in lines:
|
||||
lines.remove('')
|
||||
return lines
|
||||
return [t for t in text.splitlines() if t != '']
|
||||
|
||||
|
||||
def write_state(b_path, lines, changed):
|
||||
@@ -273,8 +270,7 @@ def write_state(b_path, lines, changed):
|
||||
# Populate a temporary file
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
with os.fdopen(tmpfd, 'w') as f:
|
||||
for line in lines:
|
||||
f.write('%s\n' % line)
|
||||
f.write("{0}\n".format("\n".join(lines)))
|
||||
|
||||
# Prepare to copy temporary file to the final destination
|
||||
if not os.path.exists(b_path):
|
||||
@@ -335,9 +331,7 @@ def filter_and_format_state(string):
|
||||
string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string)
|
||||
if not module.params['counters']:
|
||||
string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string)
|
||||
lines = string.splitlines()
|
||||
while '' in lines:
|
||||
lines.remove('')
|
||||
lines = [line for line in string.splitlines() if line != '']
|
||||
return lines
|
||||
|
||||
|
||||
@@ -354,10 +348,7 @@ def per_table_state(command, state):
|
||||
dummy, out, dummy = module.run_command(COMMAND, check_rc=True)
|
||||
out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out)
|
||||
out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out)
|
||||
table = out.splitlines()
|
||||
while '' in table:
|
||||
table.remove('')
|
||||
tables[t] = table
|
||||
tables[t] = [tt for tt in out.splitlines() if tt != '']
|
||||
return tables
|
||||
|
||||
|
||||
@@ -548,8 +539,7 @@ def main():
|
||||
if module.check_mode:
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
with os.fdopen(tmpfd, 'w') as f:
|
||||
for line in initial_state:
|
||||
f.write('%s\n' % line)
|
||||
f.write("{0}\n".format("\n".join(initial_state)))
|
||||
|
||||
if filecmp.cmp(tmpfile, b_path):
|
||||
restored_state = initial_state
|
||||
|
||||
@@ -24,7 +24,7 @@ description:
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
|
||||
|
||||
|
||||
options:
|
||||
@@ -835,7 +835,7 @@ def main():
|
||||
|
||||
# See if it already exists in Keycloak
|
||||
if cid is None:
|
||||
found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm)
|
||||
found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm)
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name))
|
||||
before_comp = next(iter(found), None)
|
||||
@@ -923,6 +923,8 @@ def main():
|
||||
updated_mappers = desired_comp.pop('mappers', [])
|
||||
after_comp = kc.create_component(desired_comp, realm)
|
||||
|
||||
cid = after_comp['id']
|
||||
|
||||
for mapper in updated_mappers:
|
||||
found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm)
|
||||
if len(found) > 1:
|
||||
|
||||
@@ -269,12 +269,16 @@ class RecordManager(object):
|
||||
if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
|
||||
self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
|
||||
self.module.params['server'], self.module.params['record']))
|
||||
try:
|
||||
zone = lookup.authority[0].name
|
||||
if zone == name:
|
||||
return zone.to_text()
|
||||
except IndexError:
|
||||
pass
|
||||
# If the response contains an Answer SOA RR whose name matches the queried name,
|
||||
# this is the name of the zone in which the record needs to be inserted.
|
||||
for rr in lookup.answer:
|
||||
if rr.rdtype == dns.rdatatype.SOA and rr.name == name:
|
||||
return rr.name.to_text()
|
||||
# If the response contains an Authority SOA RR whose name is a subdomain of the queried name,
|
||||
# this SOA name is the zone in which the record needs to be inserted.
|
||||
for rr in lookup.authority:
|
||||
if rr.rdtype == dns.rdatatype.SOA and name.fullcompare(rr.name)[0] == dns.name.NAMERELN_SUBDOMAIN:
|
||||
return rr.name.to_text()
|
||||
try:
|
||||
name = name.parent()
|
||||
except dns.name.NoParent:
|
||||
|
||||
267
plugins/modules/ocapi_command.py
Normal file
267
plugins/modules/ocapi_command.py
Normal file
@@ -0,0 +1,267 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022 Western Digital Corporation
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ocapi_command
|
||||
version_added: 6.3.0
|
||||
short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
description:
|
||||
- Builds OCAPI URIs locally and sends them to remote OOB controllers to
|
||||
perform an action.
|
||||
- Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update.
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller.
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- Command to execute on OOB controller.
|
||||
type: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller.
|
||||
type: str
|
||||
proxy_slot_number:
|
||||
description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
|
||||
type: int
|
||||
update_image_path:
|
||||
required: false
|
||||
description:
|
||||
- For C(FWUpload), the path on the local filesystem of the firmware update image.
|
||||
type: str
|
||||
job_name:
|
||||
required: false
|
||||
description:
|
||||
- For C(DeleteJob) command, the name of the job to delete.
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- Username for authenticating to OOB controller.
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authenticating to OOB controller.
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller.
|
||||
default: 10
|
||||
type: int
|
||||
|
||||
author: "Mike Moerk (@mikemoerk)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set the power state to low
|
||||
community.general.ocapi_command:
|
||||
category: Chassis
|
||||
command: PowerModeLow
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set the power state to normal
|
||||
community.general.ocapi_command:
|
||||
category: Chassis
|
||||
command: PowerModeNormal
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Set chassis indicator LED to on
|
||||
community.general.ocapi_command:
|
||||
category: Chassis
|
||||
command: IndicatorLedOn
|
||||
baseuri: "{{ baseuri }}"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Set chassis indicator LED to off
|
||||
community.general.ocapi_command:
|
||||
category: Chassis
|
||||
command: IndicatorLedOff
|
||||
baseuri: "{{ baseuri }}"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Reset Enclosure
|
||||
community.general.ocapi_command:
|
||||
category: Systems
|
||||
command: PowerGracefulRestart
|
||||
baseuri: "{{ baseuri }}"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Firmware Upload
|
||||
community.general.ocapi_command:
|
||||
category: Update
|
||||
command: FWUpload
|
||||
baseuri: "iom1.wdc.com"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
update_image_path: "/path/to/firmware.tar.gz"
|
||||
- name: Firmware Update
|
||||
community.general.ocapi_command:
|
||||
category: Update
|
||||
command: FWUpdate
|
||||
baseuri: "iom1.wdc.com"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Firmware Activate
|
||||
community.general.ocapi_command:
|
||||
category: Update
|
||||
command: FWActivate
|
||||
baseuri: "iom1.wdc.com"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
- name: Delete Job
|
||||
community.general.ocapi_command:
|
||||
category: Jobs
|
||||
command: DeleteJob
|
||||
job_name: FirmwareUpdate
|
||||
baseuri: "{{ baseuri }}"
|
||||
proxy_slot_number: 2
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
|
||||
jobUri:
|
||||
description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware Activate.
|
||||
returned: when supported
|
||||
type: str
|
||||
sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/"
|
||||
|
||||
operationStatusId:
|
||||
description: OCAPI State ID (see OCAPI documentation for possible values).
|
||||
returned: when supported
|
||||
type: int
|
||||
sample: 2
|
||||
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote_plus, urljoin
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"],
|
||||
"Systems": ["PowerGracefulRestart"],
|
||||
"Update": ["FWUpload", "FWUpdate", "FWActivate"],
|
||||
"Jobs": ["DeleteJob"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='str'),
|
||||
job_name=dict(type='str'),
|
||||
baseuri=dict(required=True, type='str'),
|
||||
proxy_slot_number=dict(type='int'),
|
||||
update_image_path=dict(type='str'),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {
|
||||
'user': module.params['username'],
|
||||
'pswd': module.params['password']
|
||||
}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
base_uri = "https://" + module.params["baseuri"]
|
||||
proxy_slot_number = module.params.get("proxy_slot_number")
|
||||
ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that the command is valid
|
||||
if command not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Chassis":
|
||||
if command.startswith("IndicatorLed"):
|
||||
result = ocapi_utils.manage_chassis_indicator_led(command)
|
||||
elif command.startswith("PowerMode"):
|
||||
result = ocapi_utils.manage_system_power(command)
|
||||
elif category == "Systems":
|
||||
if command.startswith("Power"):
|
||||
result = ocapi_utils.manage_system_power(command)
|
||||
elif category == "Update":
|
||||
if command == "FWUpload":
|
||||
update_image_path = module.params.get("update_image_path")
|
||||
if update_image_path is None:
|
||||
module.fail_json(msg=to_native("Missing update_image_path."))
|
||||
result = ocapi_utils.upload_firmware_image(update_image_path)
|
||||
elif command == "FWUpdate":
|
||||
result = ocapi_utils.update_firmware_image()
|
||||
elif command == "FWActivate":
|
||||
result = ocapi_utils.activate_firmware_image()
|
||||
elif category == "Jobs":
|
||||
if command == "DeleteJob":
|
||||
job_name = module.params.get("job_name")
|
||||
if job_name is None:
|
||||
module.fail_json("Missing job_name")
|
||||
job_uri = urljoin(base_uri, "Jobs/" + job_name)
|
||||
result = ocapi_utils.delete_job(job_uri)
|
||||
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
else:
|
||||
del result['ret']
|
||||
changed = result.get('changed', True)
|
||||
session = result.get('session', dict())
|
||||
kwargs = {
|
||||
"changed": changed,
|
||||
"session": session,
|
||||
"msg": "Action was successful." if not module.check_mode else result.get(
|
||||
"msg", "No action performed in check mode."
|
||||
)
|
||||
}
|
||||
result_keys = [result_key for result_key in result if result_key not in kwargs]
|
||||
for result_key in result_keys:
|
||||
kwargs[result_key] = result[result_key]
|
||||
module.exit_json(**kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
221
plugins/modules/ocapi_info.py
Normal file
221
plugins/modules/ocapi_info.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022 Western Digital Corporation
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ocapi_info
|
||||
version_added: 6.3.0
|
||||
short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
|
||||
description:
|
||||
- Builds OCAPI URIs locally and sends them to remote OOB controllers to
|
||||
get information back.
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller.
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- Command to execute on OOB controller.
|
||||
type: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller.
|
||||
type: str
|
||||
proxy_slot_number:
|
||||
description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
|
||||
type: int
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- Username for authenticating to OOB controller.
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authenticating to OOB controller.
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller.
|
||||
default: 10
|
||||
type: int
|
||||
job_name:
|
||||
description:
|
||||
- Name of job for fetching status.
|
||||
type: str
|
||||
|
||||
|
||||
author: "Mike Moerk (@mikemoerk)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get job status
|
||||
community.general.ocapi_info:
|
||||
category: Status
|
||||
command: JobStatus
|
||||
baseuri: "http://iom1.wdc.com"
|
||||
jobName: FirmwareUpdate
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
|
||||
percentComplete:
|
||||
description: Percent complete of the relevant operation. Applies to C(JobStatus) command.
|
||||
returned: when supported
|
||||
type: int
|
||||
sample: 99
|
||||
|
||||
operationStatus:
|
||||
description: Status of the relevant operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
|
||||
returned: when supported
|
||||
type: str
|
||||
sample: "Activate needed"
|
||||
|
||||
operationStatusId:
|
||||
description: Integer value of status (corresponds to operationStatus). Applies to C(JobStatus) command. See OCAPI documentation for details.
|
||||
returned: when supported
|
||||
type: int
|
||||
sample: 65540
|
||||
|
||||
operationHealth:
|
||||
description: Health of the operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
|
||||
returned: when supported
|
||||
type: str
|
||||
sample: "OK"
|
||||
|
||||
operationHealthId:
|
||||
description: >
|
||||
Integer value for health of the operation (corresponds to C(operationHealth)). Applies to C(JobStatus) command.
|
||||
See OCAPI documentation for details.
|
||||
returned: when supported
|
||||
type: str
|
||||
sample: "OK"
|
||||
|
||||
details:
|
||||
description: Details of the relevant operation. Applies to C(JobStatus) command.
|
||||
returned: when supported
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
status:
|
||||
description: Dict containing status information. See OCAPI documentation for details.
|
||||
returned: when supported
|
||||
type: dict
|
||||
sample: {
|
||||
"Details": [
|
||||
"None"
|
||||
],
|
||||
"Health": [
|
||||
{
|
||||
"ID": 5,
|
||||
"Name": "OK"
|
||||
}
|
||||
],
|
||||
"State": {
|
||||
"ID": 16,
|
||||
"Name": "In service"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote_plus, urljoin
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Jobs": ["JobStatus"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='str'),
|
||||
job_name=dict(type='str'),
|
||||
baseuri=dict(required=True, type='str'),
|
||||
proxy_slot_number=dict(type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {
|
||||
'user': module.params['username'],
|
||||
'pswd': module.params['password']
|
||||
}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
base_uri = "https://" + module.params["baseuri"]
|
||||
proxy_slot_number = module.params.get("proxy_slot_number")
|
||||
ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that the command is valid
|
||||
if command not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Jobs":
|
||||
if command == "JobStatus":
|
||||
if module.params.get("job_name") is None:
|
||||
module.fail_json(msg=to_native(
|
||||
"job_name required for JobStatus command."))
|
||||
job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"])
|
||||
result = ocapi_utils.get_job_status(job_uri)
|
||||
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
else:
|
||||
del result['ret']
|
||||
changed = False
|
||||
session = result.get('session', dict())
|
||||
kwargs = {
|
||||
"changed": changed,
|
||||
"session": session,
|
||||
"msg": "Action was successful." if not module.check_mode else result.get(
|
||||
"msg", "No action performed in check mode."
|
||||
)
|
||||
}
|
||||
result_keys = [result_key for result_key in result if result_key not in kwargs]
|
||||
for result_key in result_keys:
|
||||
kwargs[result_key] = result[result_key]
|
||||
module.exit_json(**kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -196,6 +196,13 @@ options:
|
||||
- Name of Datastore to use to create a new instace
|
||||
version_added: '0.2.0'
|
||||
type: str
|
||||
updateconf:
|
||||
description:
|
||||
- When I(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
|
||||
- When new VMs are being created, emulates the C(updateconf) API call via direct template merge.
|
||||
- Allows for complete modifications of the C(CONTEXT) attribute.
|
||||
type: dict
|
||||
version_added: 6.3.0
|
||||
author:
|
||||
- "Milan Ilic (@ilicmilan)"
|
||||
- "Jan Meerkamp (@meerkampdvv)"
|
||||
@@ -403,6 +410,30 @@ EXAMPLES = '''
|
||||
disk_saveas:
|
||||
name: bar-image
|
||||
disk_id: 1
|
||||
|
||||
- name: "Deploy 2 new instances with a custom 'start script'"
|
||||
community.general.one_vm:
|
||||
template_name: app_template
|
||||
count: 2
|
||||
updateconf:
|
||||
CONTEXT:
|
||||
START_SCRIPT: ip r r 169.254.16.86/32 dev eth0
|
||||
|
||||
- name: "Add a custom 'start script' to a running VM"
|
||||
community.general.one_vm:
|
||||
instance_ids: 351
|
||||
updateconf:
|
||||
CONTEXT:
|
||||
START_SCRIPT: ip r r 169.254.16.86/32 dev eth0
|
||||
|
||||
- name: "Update SSH public keys inside the VM's context"
|
||||
community.general.one_vm:
|
||||
instance_ids: 351
|
||||
updateconf:
|
||||
CONTEXT:
|
||||
SSH_PUBLIC_KEY: |-
|
||||
ssh-rsa ...
|
||||
ssh-ed25519 ...
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -510,6 +541,17 @@ instances:
|
||||
"TE_GALAXY": "bar",
|
||||
"USER_INPUTS": null
|
||||
}
|
||||
updateconf:
|
||||
description: A dictionary of key/values attributes that are set with the updateconf API call.
|
||||
type: dict
|
||||
version_added: 6.3.0
|
||||
sample: {
|
||||
"OS": { "ARCH": "x86_64" },
|
||||
"CONTEXT": {
|
||||
"START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
|
||||
"SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
|
||||
}
|
||||
}
|
||||
tagged_instances:
|
||||
description:
|
||||
- A list of instances info based on a specific attributes and/or
|
||||
@@ -615,6 +657,17 @@ tagged_instances:
|
||||
"TE_GALAXY": "bar",
|
||||
"USER_INPUTS": null
|
||||
}
|
||||
updateconf:
|
||||
description: A dictionary of key/values attributes that are set with the updateconf API call
|
||||
type: dict
|
||||
version_added: 6.3.0
|
||||
sample: {
|
||||
"OS": { "ARCH": "x86_64" },
|
||||
"CONTEXT": {
|
||||
"START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
|
||||
"SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
@@ -623,9 +676,52 @@ try:
|
||||
except ImportError:
|
||||
HAS_PYONE = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render
|
||||
|
||||
|
||||
UPDATECONF_ATTRIBUTES = {
|
||||
"OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID"],
|
||||
"FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"],
|
||||
"INPUT": ["TYPE", "BUS"],
|
||||
"GRAPHICS": ["TYPE", "LISTEN", "PASSWD", "KEYMAP"],
|
||||
"RAW": ["DATA", "DATA_VMX", "TYPE"],
|
||||
"CONTEXT": [],
|
||||
}
|
||||
|
||||
|
||||
def check_updateconf(module, to_check):
|
||||
'''Checks if attributes are compatible with one.vm.updateconf API call.'''
|
||||
for attr, subattributes in to_check.items():
|
||||
if attr not in UPDATECONF_ATTRIBUTES:
|
||||
module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr))
|
||||
if not UPDATECONF_ATTRIBUTES[attr]:
|
||||
continue
|
||||
for subattr in subattributes:
|
||||
if subattr not in UPDATECONF_ATTRIBUTES[attr]:
|
||||
module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr))
|
||||
|
||||
|
||||
def parse_updateconf(vm_template):
|
||||
'''Extracts 'updateconf' attributes from a VM template.'''
|
||||
updateconf = {}
|
||||
for attr, subattributes in vm_template.items():
|
||||
if attr not in UPDATECONF_ATTRIBUTES:
|
||||
continue
|
||||
tmp = {}
|
||||
for subattr, value in subattributes.items():
|
||||
if UPDATECONF_ATTRIBUTES[attr] and subattr not in UPDATECONF_ATTRIBUTES[attr]:
|
||||
continue
|
||||
tmp[subattr] = value
|
||||
if tmp:
|
||||
updateconf[attr] = tmp
|
||||
return updateconf
|
||||
|
||||
|
||||
def get_template(module, client, predicate):
|
||||
|
||||
@@ -767,6 +863,8 @@ def get_vm_info(client, vm):
|
||||
|
||||
vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
|
||||
|
||||
updateconf = parse_updateconf(vm.TEMPLATE)
|
||||
|
||||
info = {
|
||||
'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
|
||||
'vm_id': vm.ID,
|
||||
@@ -785,7 +883,8 @@ def get_vm_info(client, vm):
|
||||
'uptime_h': int(vm_uptime),
|
||||
'attributes': vm_attributes,
|
||||
'mode': permissions_str,
|
||||
'labels': vm_labels
|
||||
'labels': vm_labels,
|
||||
'updateconf': updateconf,
|
||||
}
|
||||
|
||||
return info
|
||||
@@ -844,6 +943,28 @@ def set_vm_ownership(module, client, vms, owner_id, group_id):
|
||||
return changed
|
||||
|
||||
|
||||
def update_vm(module, client, vm, updateconf_dict):
|
||||
changed = False
|
||||
if not updateconf_dict:
|
||||
return changed
|
||||
|
||||
before = client.vm.info(vm.ID).TEMPLATE
|
||||
|
||||
client.vm.updateconf(vm.ID, render(updateconf_dict), 1) # 1: Merge new template with the existing one.
|
||||
|
||||
after = client.vm.info(vm.ID).TEMPLATE
|
||||
|
||||
changed = before != after
|
||||
return changed
|
||||
|
||||
|
||||
def update_vms(module, client, vms, *args):
|
||||
changed = False
|
||||
for vm in vms:
|
||||
changed = update_vm(module, client, vm, *args) or changed
|
||||
return changed
|
||||
|
||||
|
||||
def get_size_in_MB(module, size_str):
|
||||
|
||||
SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
|
||||
@@ -871,81 +992,46 @@ def get_size_in_MB(module, size_str):
|
||||
return size_in_MB
|
||||
|
||||
|
||||
def create_disk_str(module, client, template_id, disk_size_list):
|
||||
|
||||
if not disk_size_list:
|
||||
return ''
|
||||
|
||||
template = client.template.info(template_id)
|
||||
if isinstance(template.TEMPLATE['DISK'], list):
|
||||
# check if the number of disks is correct
|
||||
if len(template.TEMPLATE['DISK']) != len(disk_size_list):
|
||||
module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
|
||||
result = ''
|
||||
index = 0
|
||||
for DISKS in template.TEMPLATE['DISK']:
|
||||
disk = {}
|
||||
diskresult = ''
|
||||
# Get all info about existed disk e.g. IMAGE_ID,...
|
||||
for key, value in DISKS.items():
|
||||
disk[key] = value
|
||||
# copy disk attributes if it is not the size attribute
|
||||
diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
|
||||
# Set the Disk Size
|
||||
diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
|
||||
result += diskresult
|
||||
index += 1
|
||||
else:
|
||||
if len(disk_size_list) > 1:
|
||||
module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
|
||||
disk = {}
|
||||
# Get all info about existed disk e.g. IMAGE_ID,...
|
||||
for key, value in template.TEMPLATE['DISK'].items():
|
||||
disk[key] = value
|
||||
# copy disk attributes if it is not the size attribute
|
||||
result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
|
||||
# Set the Disk Size
|
||||
result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def create_attributes_str(attributes_dict, labels_list):
|
||||
|
||||
attributes_str = ''
|
||||
|
||||
if labels_list:
|
||||
attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
|
||||
if attributes_dict:
|
||||
attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
|
||||
|
||||
return attributes_str
|
||||
|
||||
|
||||
def create_nics_str(network_attrs_list):
|
||||
nics_str = ''
|
||||
|
||||
for network in network_attrs_list:
|
||||
# Packing key-value dict in string with format key="value", key="value"
|
||||
network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
|
||||
nics_str = nics_str + 'NIC = [' + network_str + ']\n'
|
||||
|
||||
return nics_str
|
||||
|
||||
|
||||
def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
|
||||
|
||||
def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict):
|
||||
if attributes_dict:
|
||||
vm_name = attributes_dict.get('NAME', '')
|
||||
|
||||
disk_str = create_disk_str(module, client, template_id, disk_size)
|
||||
vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
|
||||
template = client.template.info(template_id).TEMPLATE
|
||||
|
||||
disk_count = len(flatten(template.get('DISK', [])))
|
||||
if disk_size:
|
||||
size_count = len(flatten(disk_size))
|
||||
# check if the number of disks is correct
|
||||
if disk_count != size_count:
|
||||
module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count))
|
||||
|
||||
vm_extra_template = dict_merge(template or {}, attributes_dict or {})
|
||||
vm_extra_template = dict_merge(vm_extra_template, {
|
||||
'LABELS': ','.join(labels_list),
|
||||
'NIC': flatten(network_attrs_list, extract=True),
|
||||
'DISK': flatten([
|
||||
disk if not size else dict_merge(disk, {
|
||||
'SIZE': str(int(get_size_in_MB(module, size))),
|
||||
})
|
||||
for disk, size in zip(
|
||||
flatten(template.get('DISK', [])),
|
||||
flatten(disk_size or [None] * disk_count),
|
||||
)
|
||||
if disk is not None
|
||||
], extract=True)
|
||||
})
|
||||
vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {})
|
||||
|
||||
try:
|
||||
vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
|
||||
vm_id = client.template.instantiate(template_id,
|
||||
vm_name,
|
||||
vm_start_on_hold,
|
||||
render(vm_extra_template),
|
||||
vm_persistent)
|
||||
except pyone.OneException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
vm = get_vm_by_id(client, vm_id)
|
||||
|
||||
vm = get_vm_by_id(client, vm_id)
|
||||
return get_vm_info(client, vm)
|
||||
|
||||
|
||||
@@ -1028,8 +1114,10 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list):
|
||||
return vm_list
|
||||
|
||||
|
||||
def create_count_of_vms(
|
||||
module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
|
||||
def create_count_of_vms(module, client,
|
||||
template_id, count,
|
||||
attributes_dict, labels_list, disk_size, network_attrs_list,
|
||||
wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict):
|
||||
new_vms_list = []
|
||||
|
||||
vm_name = ''
|
||||
@@ -1058,7 +1146,9 @@ def create_count_of_vms(
|
||||
new_vm_name += next_index
|
||||
# Update NAME value in the attributes in case there is index
|
||||
attributes_dict['NAME'] = new_vm_name
|
||||
new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
|
||||
new_vm_dict = create_vm(module, client,
|
||||
template_id, attributes_dict, labels_list, disk_size, network_attrs_list,
|
||||
vm_start_on_hold, vm_persistent, updateconf_dict)
|
||||
new_vm_id = new_vm_dict.get('vm_id')
|
||||
new_vm = get_vm_by_id(client, new_vm_id)
|
||||
new_vms_list.append(new_vm)
|
||||
@@ -1076,9 +1166,10 @@ def create_count_of_vms(
|
||||
return True, new_vms_list, []
|
||||
|
||||
|
||||
def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
|
||||
labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
|
||||
|
||||
def create_exact_count_of_vms(module, client,
|
||||
template_id, exact_count, attributes_dict, count_attributes_dict,
|
||||
labels_list, count_labels_list, disk_size, network_attrs_list,
|
||||
hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict):
|
||||
vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
|
||||
|
||||
vm_count_diff = exact_count - len(vm_list)
|
||||
@@ -1095,7 +1186,7 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut
|
||||
# Add more VMs
|
||||
changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
|
||||
labels_list, disk_size, network_attrs_list, wait, wait_timeout,
|
||||
vm_start_on_hold, vm_persistent)
|
||||
vm_start_on_hold, vm_persistent, updateconf_dict)
|
||||
|
||||
tagged_instances_list += instances_list
|
||||
elif vm_count_diff < 0:
|
||||
@@ -1398,7 +1489,8 @@ def main():
|
||||
"labels": {"default": [], "type": "list", "elements": "str"},
|
||||
"count_labels": {"required": False, "type": "list", "elements": "str"},
|
||||
"disk_saveas": {"type": "dict"},
|
||||
"persistent": {"default": False, "type": "bool"}
|
||||
"persistent": {"default": False, "type": "bool"},
|
||||
"updateconf": {"type": "dict"},
|
||||
}
|
||||
|
||||
module = AnsibleModule(argument_spec=fields,
|
||||
@@ -1452,6 +1544,7 @@ def main():
|
||||
count_labels = params.get('count_labels')
|
||||
disk_saveas = params.get('disk_saveas')
|
||||
persistent = params.get('persistent')
|
||||
updateconf = params.get('updateconf')
|
||||
|
||||
if not (auth.username and auth.password):
|
||||
module.warn("Credentials missing")
|
||||
@@ -1470,6 +1563,9 @@ def main():
|
||||
attributes = copy.copy(count_attributes)
|
||||
check_attributes(module, count_attributes)
|
||||
|
||||
if updateconf:
|
||||
check_updateconf(module, updateconf)
|
||||
|
||||
if count_labels and not labels:
|
||||
module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
|
||||
labels = count_labels
|
||||
@@ -1529,13 +1625,13 @@ def main():
|
||||
# Deploy an exact count of VMs
|
||||
changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
|
||||
count_attributes, labels, count_labels, disk_size,
|
||||
networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
|
||||
networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf)
|
||||
vms = tagged_instances_list
|
||||
elif template_id is not None and state == 'present':
|
||||
# Deploy count VMs
|
||||
changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
|
||||
attributes, labels, disk_size, networks, wait, wait_timeout,
|
||||
put_vm_on_hold, persistent)
|
||||
put_vm_on_hold, persistent, updateconf)
|
||||
# instances_list - new instances
|
||||
# tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
|
||||
vms = instances_list
|
||||
@@ -1587,6 +1683,9 @@ def main():
|
||||
if owner_id is not None or group_id is not None:
|
||||
changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
|
||||
|
||||
if template_id is None and updateconf is not None:
|
||||
changed = update_vms(module, one_client, vms, updateconf) or changed
|
||||
|
||||
if wait and not module.check_mode and state != 'present':
|
||||
wait_for = {
|
||||
'absent': wait_for_done,
|
||||
|
||||
@@ -15,16 +15,17 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: opkg
|
||||
author: "Patrick Pelletier (@skinp)"
|
||||
short_description: Package manager for OpenWrt
|
||||
short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions
|
||||
description:
|
||||
- Manages OpenWrt packages
|
||||
- Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of package(s) to install/remove.
|
||||
- C(NAME=VERSION) syntax is also supported to install a package
|
||||
in a certain version. See the examples. This is supported since
|
||||
community.general 6.2.0.
|
||||
in a certain version. See the examples. This only works on Yocto based
|
||||
Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is
|
||||
supported since community.general 6.2.0.
|
||||
aliases: [pkg]
|
||||
required: true
|
||||
type: list
|
||||
@@ -67,7 +68,7 @@ EXAMPLES = '''
|
||||
name: foo
|
||||
state: present
|
||||
|
||||
- name: Install foo in version 1.2
|
||||
- name: Install foo in version 1.2 (opkg>=0.3.2 on Yocto based Linux distributions)
|
||||
community.general.opkg:
|
||||
name: foo=1.2
|
||||
state: present
|
||||
@@ -97,144 +98,106 @@ EXAMPLES = '''
|
||||
force: overwrite
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
|
||||
|
||||
|
||||
def update_package_db(module, opkg_path):
|
||||
""" Updates packages list. """
|
||||
|
||||
rc, out, err = module.run_command([opkg_path, "update"])
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not update package db")
|
||||
|
||||
|
||||
def query_package(module, opkg_path, name, version=None, state="present"):
|
||||
""" Returns whether a package is installed or not. """
|
||||
|
||||
if state == "present":
|
||||
rc, out, err = module.run_command([opkg_path, "list-installed", name])
|
||||
if rc != 0:
|
||||
return False
|
||||
# variable out is one line if the package is installed:
|
||||
# "NAME - VERSION - DESCRIPTION"
|
||||
if version is not None:
|
||||
if not out.startswith("%s - %s " % (name, version)):
|
||||
return False
|
||||
else:
|
||||
if not out.startswith(name + " "):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def split_name_and_version(module, package):
|
||||
""" Split the name and the version when using the NAME=VERSION syntax """
|
||||
splitted = package.split('=', 1)
|
||||
if len(splitted) == 1:
|
||||
return splitted[0], None
|
||||
else:
|
||||
return splitted[0], splitted[1]
|
||||
|
||||
|
||||
def remove_packages(module, opkg_path, packages):
|
||||
""" Uninstalls one or more packages if installed. """
|
||||
|
||||
p = module.params
|
||||
force = p["force"]
|
||||
if force:
|
||||
force = "--force-%s" % force
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
package, version = split_name_and_version(module, package)
|
||||
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, opkg_path, package):
|
||||
continue
|
||||
|
||||
if force:
|
||||
rc, out, err = module.run_command([opkg_path, "remove", force, package])
|
||||
else:
|
||||
rc, out, err = module.run_command([opkg_path, "remove", package])
|
||||
|
||||
if query_package(module, opkg_path, package):
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, out))
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
|
||||
|
||||
def install_packages(module, opkg_path, packages):
|
||||
""" Installs one or more packages if not already installed. """
|
||||
|
||||
p = module.params
|
||||
force = p["force"]
|
||||
if force:
|
||||
force = "--force-%s" % force
|
||||
|
||||
install_c = 0
|
||||
|
||||
for package in packages:
|
||||
package, version = split_name_and_version(module, package)
|
||||
|
||||
if query_package(module, opkg_path, package, version) and (force != '--force-reinstall'):
|
||||
continue
|
||||
|
||||
if version is not None:
|
||||
version_str = "=%s" % version
|
||||
else:
|
||||
version_str = ""
|
||||
|
||||
if force:
|
||||
rc, out, err = module.run_command([opkg_path, "install", force, package + version_str])
|
||||
else:
|
||||
rc, out, err = module.run_command([opkg_path, "install", package + version_str])
|
||||
|
||||
if not query_package(module, opkg_path, package, version):
|
||||
module.fail_json(msg="failed to install %s%s: %s" % (package, version_str, out))
|
||||
|
||||
install_c += 1
|
||||
|
||||
if install_c > 0:
|
||||
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already present")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
class Opkg(StateModuleHelper):
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
name=dict(aliases=["pkg"], required=True, type="list", elements="str"),
|
||||
state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
|
||||
force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
|
||||
"checksum", "removal-of-dependent-packages"]),
|
||||
update_cache=dict(default=False, type='bool'),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
opkg_path = module.get_bin_path('opkg', True, ['/bin'])
|
||||
def __init_module__(self):
|
||||
self.vars.set("install_c", 0, output=False, change=True)
|
||||
self.vars.set("remove_c", 0, output=False, change=True)
|
||||
|
||||
p = module.params
|
||||
state_map = dict(
|
||||
query="list-installed",
|
||||
present="install",
|
||||
installed="install",
|
||||
absent="remove",
|
||||
removed="remove",
|
||||
)
|
||||
|
||||
if p["update_cache"]:
|
||||
update_package_db(module, opkg_path)
|
||||
def _force(value):
|
||||
if value == "":
|
||||
value = None
|
||||
return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True)
|
||||
|
||||
pkgs = p["name"]
|
||||
self.runner = CmdRunner(
|
||||
self.module,
|
||||
command="opkg",
|
||||
arg_formats=dict(
|
||||
package=cmd_runner_fmt.as_list(),
|
||||
state=cmd_runner_fmt.as_map(state_map),
|
||||
force=cmd_runner_fmt.as_func(_force),
|
||||
update_cache=cmd_runner_fmt.as_bool("update")
|
||||
),
|
||||
)
|
||||
|
||||
if p["state"] in ["present", "installed"]:
|
||||
install_packages(module, opkg_path, pkgs)
|
||||
@staticmethod
|
||||
def split_name_and_version(package):
|
||||
""" Split the name and the version when using the NAME=VERSION syntax """
|
||||
splitted = package.split('=', 1)
|
||||
if len(splitted) == 1:
|
||||
return splitted[0], None
|
||||
else:
|
||||
return splitted[0], splitted[1]
|
||||
|
||||
elif p["state"] in ["absent", "removed"]:
|
||||
remove_packages(module, opkg_path, pkgs)
|
||||
def _package_in_desired_state(self, name, want_installed, version=None):
|
||||
dummy, out, dummy = self.runner("state package").run(state="query", package=name)
|
||||
|
||||
has_package = out.startswith(name + " - %s" % ("" if not version else (version + " ")))
|
||||
return want_installed == has_package
|
||||
|
||||
def state_present(self):
|
||||
if self.vars.update_cache:
|
||||
dummy, rc, dummy = self.runner("update_cache").run()
|
||||
if rc != 0:
|
||||
self.do_raise("could not update package db")
|
||||
with self.runner("state force package") as ctx:
|
||||
for package in self.vars.name:
|
||||
pkg_name, pkg_version = self.split_name_and_version(package)
|
||||
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall":
|
||||
ctx.run(package=package)
|
||||
if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version):
|
||||
self.do_raise("failed to install %s" % package)
|
||||
self.vars.install_c += 1
|
||||
if self.vars.install_c > 0:
|
||||
self.vars.msg = "installed %s package(s)" % (self.vars.install_c)
|
||||
else:
|
||||
self.vars.msg = "package(s) already present"
|
||||
|
||||
def state_absent(self):
|
||||
if self.vars.update_cache:
|
||||
dummy, rc, dummy = self.runner("update_cache").run()
|
||||
if rc != 0:
|
||||
self.do_raise("could not update package db")
|
||||
with self.runner("state force package") as ctx:
|
||||
for package in self.vars.name:
|
||||
package, dummy = self.split_name_and_version(package)
|
||||
if not self._package_in_desired_state(package, want_installed=False):
|
||||
ctx.run(package=package)
|
||||
if not self._package_in_desired_state(package, want_installed=False):
|
||||
self.do_raise("failed to remove %s" % package)
|
||||
self.vars.remove_c += 1
|
||||
if self.vars.remove_c > 0:
|
||||
self.vars.msg = "removed %s package(s)" % (self.vars.remove_c)
|
||||
else:
|
||||
self.vars.msg = "package(s) already absent"
|
||||
|
||||
state_installed = state_present
|
||||
state_removed = state_absent
|
||||
|
||||
|
||||
def main():
|
||||
Opkg.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -95,6 +95,9 @@ options:
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- >
|
||||
This first implementation does not verify whether a specified version constraint has been installed or not.
|
||||
|
||||
@@ -50,6 +50,9 @@ options:
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
- >
|
||||
This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
|
||||
passed using the R(environment Ansible keyword, playbooks_environment).
|
||||
- Please note that C(pipx) requires Python 3.6 or above.
|
||||
- See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
|
||||
author:
|
||||
|
||||
@@ -104,6 +104,7 @@ options:
|
||||
- Move the disk to this storage when I(state=moved).
|
||||
- You can move between storages only in scope of one VM.
|
||||
- Mutually exclusive with I(target_vmid).
|
||||
- Consider increasing I(timeout) in case of large disk images or slow storage backend.
|
||||
type: str
|
||||
target_vmid:
|
||||
description:
|
||||
@@ -113,8 +114,8 @@ options:
|
||||
type: int
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds to wait when moving disk.
|
||||
- Used only when I(state=moved).
|
||||
- Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages.
|
||||
- Used only when I(state) is C(present) or C(moved).
|
||||
type: int
|
||||
default: 600
|
||||
aio:
|
||||
@@ -172,6 +173,7 @@ options:
|
||||
- C(<STORAGE>:<VMID>/<FULL_NAME>) or C(<ABSOLUTE_PATH>/<FULL_NAME>)
|
||||
- Attention! Only root can use absolute paths.
|
||||
- This parameter is mutually exclusive with I(size).
|
||||
- Increase I(timeout) parameter when importing large disk images or using slow storage.
|
||||
type: str
|
||||
iops:
|
||||
description:
|
||||
@@ -471,6 +473,16 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool)))
|
||||
return params
|
||||
|
||||
def wait_till_complete_or_timeout(self, node_name, task_id):
|
||||
timeout = self.module.params['timeout']
|
||||
while timeout:
|
||||
if self.api_task_ok(node_name, task_id):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout <= 0:
|
||||
return False
|
||||
sleep(1)
|
||||
|
||||
def create_disk(self, disk, vmid, vm, vm_config):
|
||||
create = self.module.params['create']
|
||||
if create == 'disabled' and disk not in vm_config:
|
||||
@@ -484,20 +496,23 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
|
||||
if import_string:
|
||||
config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
|
||||
timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
|
||||
ok_str = "Disk %s imported into VM %s"
|
||||
else:
|
||||
config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
|
||||
ok_str = "Disk %s created in VM %s"
|
||||
timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
|
||||
|
||||
for k, v in attributes.items():
|
||||
config_str += ',%s=%s' % (k, v)
|
||||
|
||||
create_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**create_disk)
|
||||
return True, "Disk %s created in VM %s" % (disk, vmid)
|
||||
disk_config_to_apply = {self.module.params["disk"]: config_str}
|
||||
|
||||
if create in ['disabled', 'regular'] and disk in vm_config:
|
||||
# UPDATE
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
config_str = disk_config["volume"]
|
||||
ok_str = "Disk %s updated in VM %s"
|
||||
attributes = self.get_create_attributes()
|
||||
# 'import_from' fails on disk updates
|
||||
attributes.pop('import_from', None)
|
||||
@@ -513,9 +528,16 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
if disk_config == attributes:
|
||||
return False, "Disk %s is up to date in VM %s" % (disk, vmid)
|
||||
|
||||
update_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**update_disk)
|
||||
return True, "Disk %s updated in VM %s" % (disk, vmid)
|
||||
disk_config_to_apply = {self.module.params["disk"]: config_str}
|
||||
|
||||
current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply)
|
||||
task_success = self.wait_till_complete_or_timeout(vm['node'], current_task_id)
|
||||
if task_success:
|
||||
return True, ok_str % (disk, vmid)
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
|
||||
)
|
||||
|
||||
def move_disk(self, disk, vmid, vm, vm_config):
|
||||
params = dict()
|
||||
@@ -535,20 +557,15 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
if params['storage'] == disk_config['storage_name']:
|
||||
return False
|
||||
|
||||
taskid = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
|
||||
timeout = self.module.params['timeout']
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
return True
|
||||
if timeout <= 0:
|
||||
self.module.fail_json(
|
||||
msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
sleep(1)
|
||||
timeout -= 1
|
||||
return True
|
||||
task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
|
||||
task_success = self.wait_till_complete_or_timeout(vm['node'], task_id)
|
||||
if task_success:
|
||||
return True
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(task_id).log.get()[:1]
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -40,6 +40,11 @@ options:
|
||||
description:
|
||||
- access.redhat.com or Red Hat Satellite or Katello password
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- sso.redhat.com API access token.
|
||||
type: str
|
||||
version_added: 6.3.0
|
||||
server_hostname:
|
||||
description:
|
||||
- Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server
|
||||
@@ -294,10 +299,11 @@ class RegistrationBase(object):
|
||||
|
||||
REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
|
||||
|
||||
def __init__(self, module, username=None, password=None):
|
||||
def __init__(self, module, username=None, password=None, token=None):
|
||||
self.module = module
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.token = token
|
||||
|
||||
def configure(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
@@ -340,8 +346,8 @@ class RegistrationBase(object):
|
||||
|
||||
|
||||
class Rhsm(RegistrationBase):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
RegistrationBase.__init__(self, module, username, password)
|
||||
def __init__(self, module, username=None, password=None, token=None):
|
||||
RegistrationBase.__init__(self, module, username, password, token)
|
||||
self.module = module
|
||||
|
||||
def enable(self):
|
||||
@@ -397,7 +403,7 @@ class Rhsm(RegistrationBase):
|
||||
else:
|
||||
return False
|
||||
|
||||
def register(self, username, password, auto_attach, activationkey, org_id,
|
||||
def register(self, username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register, environment,
|
||||
release):
|
||||
'''
|
||||
@@ -433,6 +439,8 @@ class Rhsm(RegistrationBase):
|
||||
|
||||
if activationkey:
|
||||
args.extend(['--activationkey', activationkey])
|
||||
elif token:
|
||||
args.extend(['--token', token])
|
||||
else:
|
||||
if username:
|
||||
args.extend(['--username', username])
|
||||
@@ -794,6 +802,7 @@ def main():
|
||||
'state': {'default': 'present', 'choices': ['present', 'absent']},
|
||||
'username': {},
|
||||
'password': {'no_log': True},
|
||||
'token': {'no_log': True},
|
||||
'server_hostname': {},
|
||||
'server_insecure': {},
|
||||
'server_prefix': {},
|
||||
@@ -831,17 +840,20 @@ def main():
|
||||
['server_proxy_hostname', 'server_proxy_port'],
|
||||
['server_proxy_user', 'server_proxy_password']],
|
||||
mutually_exclusive=[['activationkey', 'username'],
|
||||
['activationkey', 'token'],
|
||||
['token', 'username'],
|
||||
['activationkey', 'consumer_id'],
|
||||
['activationkey', 'environment'],
|
||||
['activationkey', 'auto_attach'],
|
||||
['pool', 'pool_ids']],
|
||||
required_if=[['state', 'present', ['username', 'activationkey'], True]],
|
||||
required_if=[['state', 'present', ['username', 'activationkey', 'token'], True]],
|
||||
)
|
||||
|
||||
rhsm.module = module
|
||||
state = module.params['state']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
token = module.params['token']
|
||||
server_hostname = module.params['server_hostname']
|
||||
server_insecure = module.params['server_insecure']
|
||||
server_prefix = module.params['server_prefix']
|
||||
@@ -914,7 +926,7 @@ def main():
|
||||
try:
|
||||
rhsm.enable()
|
||||
rhsm.configure(**module.params)
|
||||
rhsm.register(username, password, auto_attach, activationkey, org_id,
|
||||
rhsm.register(username, password, token, auto_attach, activationkey, org_id,
|
||||
consumer_type, consumer_name, consumer_id, force_register,
|
||||
environment, release)
|
||||
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
|
||||
|
||||
@@ -92,7 +92,7 @@ EXAMPLES = '''
|
||||
RETURN = '''
|
||||
scaleway_compute_private_network:
|
||||
description: Information on the VPC.
|
||||
returned: success when C(state=present)
|
||||
returned: success when I(state=present)
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@ short_description: Scaleway database backups management module
|
||||
version_added: 1.2.0
|
||||
author: Guillaume Rodriguez (@guillaume_ro_fr)
|
||||
description:
|
||||
- This module manages database backups on Scaleway account U(https://developer.scaleway.com).
|
||||
- "This module manages database backups on Scaleway account U(https://developer.scaleway.com)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
options:
|
||||
@@ -58,7 +58,7 @@ options:
|
||||
description:
|
||||
- Name used to identify the database backup.
|
||||
- Required for C(present) state.
|
||||
- Ignored when C(state=absent), C(state=exported) or C(state=restored).
|
||||
- Ignored when I(state=absent), I(state=exported) or I(state=restored).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
@@ -66,7 +66,7 @@ options:
|
||||
description:
|
||||
- Name used to identify the database.
|
||||
- Required for C(present) and C(restored) states.
|
||||
- Ignored when C(state=absent) or C(state=exported).
|
||||
- Ignored when I(state=absent) or I(state=exported).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
@@ -74,14 +74,14 @@ options:
|
||||
description:
|
||||
- UUID of the instance associated to the database backup.
|
||||
- Required for C(present) and C(restored) states.
|
||||
- Ignored when C(state=absent) or C(state=exported).
|
||||
- Ignored when I(state=absent) or I(state=exported).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
expires_at:
|
||||
description:
|
||||
- Expiration datetime of the database backup (ISO 8601 format).
|
||||
- Ignored when C(state=absent), C(state=exported) or C(state=restored).
|
||||
- Ignored when I(state=absent), I(state=exported) or I(state=restored).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
@@ -139,7 +139,7 @@ EXAMPLES = '''
|
||||
RETURN = '''
|
||||
metadata:
|
||||
description: Backup metadata.
|
||||
returned: when C(state=present), C(state=exported) or C(state=restored)
|
||||
returned: when I(state=present), I(state=exported) or I(state=restored)
|
||||
type: dict
|
||||
sample: {
|
||||
"metadata": {
|
||||
|
||||
@@ -26,7 +26,7 @@ options:
|
||||
region:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway compute zone
|
||||
- Scaleway compute zone.
|
||||
required: true
|
||||
choices:
|
||||
- ams1
|
||||
|
||||
@@ -88,8 +88,8 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: This is only present when C(state=present)
|
||||
returned: when C(state=present)
|
||||
description: This is only present when I(state=present).
|
||||
returned: when I(state=present)
|
||||
type: dict
|
||||
sample: {
|
||||
"ips": [
|
||||
|
||||
@@ -29,19 +29,19 @@ options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- Name of the load-balancer
|
||||
- Name of the load-balancer.
|
||||
required: true
|
||||
|
||||
description:
|
||||
type: str
|
||||
description:
|
||||
- Description of the load-balancer
|
||||
- Description of the load-balancer.
|
||||
required: true
|
||||
|
||||
organization_id:
|
||||
type: str
|
||||
description:
|
||||
- Organization identifier
|
||||
- Organization identifier.
|
||||
required: true
|
||||
|
||||
state:
|
||||
@@ -56,7 +56,7 @@ options:
|
||||
region:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway zone
|
||||
- Scaleway zone.
|
||||
required: true
|
||||
choices:
|
||||
- nl-ams
|
||||
@@ -68,7 +68,7 @@ options:
|
||||
elements: str
|
||||
default: []
|
||||
description:
|
||||
- List of tags to apply to the load-balancer
|
||||
- List of tags to apply to the load-balancer.
|
||||
|
||||
wait:
|
||||
description:
|
||||
@@ -79,14 +79,14 @@ options:
|
||||
wait_timeout:
|
||||
type: int
|
||||
description:
|
||||
- Time to wait for the load-balancer to reach the expected state
|
||||
- Time to wait for the load-balancer to reach the expected state.
|
||||
required: false
|
||||
default: 300
|
||||
|
||||
wait_sleep_time:
|
||||
type: int
|
||||
description:
|
||||
- Time to wait before every attempt to check the state of the load-balancer
|
||||
- Time to wait before every attempt to check the state of the load-balancer.
|
||||
required: false
|
||||
default: 3
|
||||
'''
|
||||
|
||||
@@ -20,7 +20,7 @@ author:
|
||||
options:
|
||||
api_url:
|
||||
description:
|
||||
- Scaleway API URL
|
||||
- Scaleway API URL.
|
||||
default: 'https://account.scaleway.com'
|
||||
aliases: ['base_url']
|
||||
extends_documentation_fragment:
|
||||
@@ -42,7 +42,7 @@ EXAMPLES = r'''
|
||||
RETURN = r'''
|
||||
---
|
||||
scaleway_organization_info:
|
||||
description: Response from Scaleway API
|
||||
description: Response from Scaleway API.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
|
||||
@@ -18,8 +18,7 @@ short_description: Scaleway private network management
|
||||
version_added: 4.5.0
|
||||
author: Pascal MANGIN (@pastral)
|
||||
description:
|
||||
- This module manages private network on Scaleway account
|
||||
(U(https://developer.scaleway.com)).
|
||||
- "This module manages private network on Scaleway account (U(https://developer.scaleway.com))."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
@@ -88,7 +87,7 @@ EXAMPLES = '''
|
||||
RETURN = '''
|
||||
scaleway_private_network:
|
||||
description: Information on the VPC.
|
||||
returned: success when C(state=present)
|
||||
returned: success when I(state=present)
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
|
||||
@@ -18,8 +18,7 @@ module: scaleway_security_group
|
||||
short_description: Scaleway Security Group management module
|
||||
author: Antoine Barbare (@abarbare)
|
||||
description:
|
||||
- This module manages Security Group on Scaleway account
|
||||
U(https://developer.scaleway.com).
|
||||
- "This module manages Security Group on Scaleway account U(https://developer.scaleway.com)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
@@ -105,8 +104,8 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: This is only present when C(state=present)
|
||||
returned: when C(state=present)
|
||||
description: This is only present when I(state=present).
|
||||
returned: when I(state=present)
|
||||
type: dict
|
||||
sample: {
|
||||
"scaleway_security_group": {
|
||||
|
||||
@@ -18,8 +18,7 @@ module: scaleway_security_group_rule
|
||||
short_description: Scaleway Security Group Rule management module
|
||||
author: Antoine Barbare (@abarbare)
|
||||
description:
|
||||
- This module manages Security Group Rule on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
- "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
requirements:
|
||||
@@ -53,7 +52,7 @@ options:
|
||||
protocol:
|
||||
type: str
|
||||
description:
|
||||
- Network protocol to use
|
||||
- Network protocol to use.
|
||||
choices:
|
||||
- TCP
|
||||
- UDP
|
||||
@@ -62,20 +61,20 @@ options:
|
||||
|
||||
port:
|
||||
description:
|
||||
- Port related to the rule, null value for all the ports
|
||||
- Port related to the rule, null value for all the ports.
|
||||
required: true
|
||||
type: int
|
||||
|
||||
ip_range:
|
||||
type: str
|
||||
description:
|
||||
- IPV4 CIDR notation to apply to the rule
|
||||
- IPV4 CIDR notation to apply to the rule.
|
||||
default: 0.0.0.0/0
|
||||
|
||||
direction:
|
||||
type: str
|
||||
description:
|
||||
- Rule direction
|
||||
- Rule direction.
|
||||
choices:
|
||||
- inbound
|
||||
- outbound
|
||||
@@ -84,7 +83,7 @@ options:
|
||||
action:
|
||||
type: str
|
||||
description:
|
||||
- Rule action
|
||||
- Rule action.
|
||||
choices:
|
||||
- accept
|
||||
- drop
|
||||
@@ -93,7 +92,7 @@ options:
|
||||
security_group:
|
||||
type: str
|
||||
description:
|
||||
- Security Group unique identifier
|
||||
- Security Group unique identifier.
|
||||
required: true
|
||||
'''
|
||||
|
||||
@@ -113,8 +112,8 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: This is only present when C(state=present)
|
||||
returned: when C(state=present)
|
||||
description: This is only present when I(state=present).
|
||||
returned: when I(state=present)
|
||||
type: dict
|
||||
sample: {
|
||||
"scaleway_security_group_rule": {
|
||||
|
||||
@@ -19,8 +19,7 @@ module: scaleway_sshkey
|
||||
short_description: Scaleway SSH keys management module
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- This module manages SSH keys on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
- "This module manages SSH keys on Scaleway account U(https://developer.scaleway.com)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
@@ -42,7 +41,7 @@ options:
|
||||
api_url:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway API URL
|
||||
- Scaleway API URL.
|
||||
default: 'https://account.scaleway.com'
|
||||
aliases: ['base_url']
|
||||
'''
|
||||
@@ -67,8 +66,8 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: This is only present when C(state=present)
|
||||
returned: when C(state=present)
|
||||
description: This is only present when I(state=present).
|
||||
returned: when I(state=present)
|
||||
type: dict
|
||||
sample: {
|
||||
"ssh_public_keys": [
|
||||
|
||||
@@ -19,8 +19,8 @@ module: scaleway_user_data
|
||||
short_description: Scaleway user_data management module
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- "This module manages user_data on compute instances on Scaleway."
|
||||
- "It can be used to configure cloud-init for instance"
|
||||
- This module manages user_data on compute instances on Scaleway.
|
||||
- It can be used to configure cloud-init for instance.
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
@@ -30,20 +30,20 @@ options:
|
||||
server_id:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway Compute instance ID of the server
|
||||
- Scaleway Compute instance ID of the server.
|
||||
required: true
|
||||
|
||||
user_data:
|
||||
type: dict
|
||||
description:
|
||||
- User defined data. Typically used with C(cloud-init).
|
||||
- Pass your cloud-init script here as a string
|
||||
- Pass your C(cloud-init) script here as a string.
|
||||
required: false
|
||||
|
||||
region:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway compute zone
|
||||
- Scaleway compute zone.
|
||||
required: true
|
||||
choices:
|
||||
- ams1
|
||||
|
||||
@@ -18,8 +18,7 @@ module: scaleway_volume
|
||||
short_description: Scaleway volumes management module
|
||||
author: Henryk Konsek (@hekonsek)
|
||||
description:
|
||||
- This module manages volumes on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
- "This module manages volumes on Scaleway account U(https://developer.scaleway.com)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
@@ -28,7 +27,7 @@ options:
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- Indicate desired state of the volume.
|
||||
- Indicate desired state of the volume.
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
@@ -36,7 +35,7 @@ options:
|
||||
region:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway region to use (for example par1).
|
||||
- Scaleway region to use (for example par1).
|
||||
required: true
|
||||
choices:
|
||||
- ams1
|
||||
@@ -50,25 +49,25 @@ options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- Name used to identify the volume.
|
||||
- Name used to identify the volume.
|
||||
required: true
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway project ID to which volume belongs.
|
||||
- Scaleway project ID to which volume belongs.
|
||||
version_added: 4.3.0
|
||||
organization:
|
||||
type: str
|
||||
description:
|
||||
- ScaleWay organization ID to which volume belongs.
|
||||
- ScaleWay organization ID to which volume belongs.
|
||||
size:
|
||||
type: int
|
||||
description:
|
||||
- Size of the volume in bytes.
|
||||
- Size of the volume in bytes.
|
||||
volume_type:
|
||||
type: str
|
||||
description:
|
||||
- Type of the volume (for example 'l_ssd').
|
||||
- Type of the volume (for example 'l_ssd').
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -91,8 +90,8 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
data:
|
||||
description: This is only present when C(state=present)
|
||||
returned: when C(state=present)
|
||||
description: This is only present when I(state=present).
|
||||
returned: when I(state=present)
|
||||
type: dict
|
||||
sample: {
|
||||
"volume": {
|
||||
@@ -100,9 +99,9 @@ data:
|
||||
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
|
||||
"name": "volume-0-3",
|
||||
"project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||
"server": null,
|
||||
"size": 10000000000,
|
||||
"volume_type": "l_ssd"
|
||||
"server": null,
|
||||
"size": 10000000000,
|
||||
"volume_type": "l_ssd"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
@@ -399,8 +399,7 @@ class Snap(CmdStateModuleHelper):
|
||||
|
||||
|
||||
def main():
|
||||
snap = Snap()
|
||||
snap.run()
|
||||
Snap.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -79,9 +79,8 @@ snap_aliases:
|
||||
|
||||
import re
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import (
|
||||
CmdStateModuleHelper
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
|
||||
|
||||
|
||||
_state_map = dict(
|
||||
@@ -91,7 +90,7 @@ _state_map = dict(
|
||||
)
|
||||
|
||||
|
||||
class SnapAlias(CmdStateModuleHelper):
|
||||
class SnapAlias(StateModuleHelper):
|
||||
_RE_ALIAS_LIST = re.compile(r"^(?P<snap>[\w-]+)\s+(?P<alias>[\w-]+)\s+.*$")
|
||||
|
||||
module = dict(
|
||||
@@ -106,25 +105,26 @@ class SnapAlias(CmdStateModuleHelper):
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
command = "snap"
|
||||
command_args_formats = dict(
|
||||
_alias=dict(fmt=lambda v: [v]),
|
||||
state=dict(fmt=lambda v: [_state_map[v]]),
|
||||
)
|
||||
check_rc = False
|
||||
|
||||
command_args_formats = {
|
||||
"state": cmd_runner_fmt.as_map(_state_map),
|
||||
"name": cmd_runner_fmt.as_list(),
|
||||
"alias": cmd_runner_fmt.as_list(),
|
||||
}
|
||||
|
||||
def _aliases(self):
|
||||
n = self.vars.name
|
||||
return {n: self._get_aliases_for(n)} if n else self._get_aliases()
|
||||
|
||||
def __init_module__(self):
|
||||
self.runner = CmdRunner(self.module, "snap", self.command_args_formats, check_rc=False)
|
||||
self.vars.set("snap_aliases", self._aliases(), change=True, diff=True)
|
||||
|
||||
def __quit_module__(self):
|
||||
self.vars.snap_aliases = self._aliases()
|
||||
|
||||
def _get_aliases(self):
|
||||
def process_get_aliases(rc, out, err):
|
||||
def process(rc, out, err):
|
||||
if err:
|
||||
return {}
|
||||
aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]]
|
||||
@@ -134,9 +134,8 @@ class SnapAlias(CmdStateModuleHelper):
|
||||
results[snap] = results.get(snap, []) + [alias]
|
||||
return results
|
||||
|
||||
return self.run_command(params=[{'state': 'info'}, 'name'], check_rc=True,
|
||||
publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False,
|
||||
process_output=process_get_aliases)
|
||||
with self.runner("state name", check_rc=True, output_process=process) as ctx:
|
||||
return ctx.run(state="info")
|
||||
|
||||
def _get_aliases_for(self, name):
|
||||
return self._get_aliases().get(name, [])
|
||||
@@ -152,24 +151,30 @@ class SnapAlias(CmdStateModuleHelper):
|
||||
return any(alias in aliases for aliases in self.vars.snap_aliases.values())
|
||||
|
||||
def state_present(self):
|
||||
for alias in self.vars.alias:
|
||||
if not self._has_alias(self.vars.name, alias):
|
||||
for _alias in self.vars.alias:
|
||||
if not self._has_alias(self.vars.name, _alias):
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name', {'_alias': alias}])
|
||||
with self.runner("state name alias", check_mode_skip=True) as ctx:
|
||||
ctx.run(alias=_alias)
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
|
||||
def state_absent(self):
|
||||
if not self.vars.alias:
|
||||
if self._has_alias(self.vars.name):
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'name'])
|
||||
with self.runner("state name", check_mode_skip=True) as ctx:
|
||||
ctx.run()
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
else:
|
||||
for alias in self.vars.alias:
|
||||
if self._has_alias(self.vars.name, alias):
|
||||
for _alias in self.vars.alias:
|
||||
if self._has_alias(self.vars.name, _alias):
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', {'_alias': alias}])
|
||||
with self.runner("state alias", check_mode_skip=True) as ctx:
|
||||
ctx.run(alias=_alias)
|
||||
if self.verbosity >= 4:
|
||||
self.vars.run_info = ctx.run_info
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -43,6 +43,12 @@ options:
|
||||
- Whether a password will be required to run the sudo'd command.
|
||||
default: true
|
||||
type: bool
|
||||
setenv:
|
||||
description:
|
||||
- Whether to allow keeping the environment when command is run with sudo.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 6.3.0
|
||||
host:
|
||||
description:
|
||||
- Specify the host the rule is for.
|
||||
@@ -123,6 +129,13 @@ EXAMPLES = '''
|
||||
community.general.sudoers:
|
||||
name: alice-service
|
||||
state: absent
|
||||
|
||||
- name: Allow alice to sudo /usr/local/bin/upload and keep env variables
|
||||
community.general.sudoers:
|
||||
name: allow-alice-upload
|
||||
user: alice
|
||||
commands: /usr/local/bin/upload
|
||||
setenv: true
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -143,6 +156,7 @@ class Sudoers(object):
|
||||
self.group = module.params['group']
|
||||
self.state = module.params['state']
|
||||
self.nopassword = module.params['nopassword']
|
||||
self.setenv = module.params['setenv']
|
||||
self.host = module.params['host']
|
||||
self.runas = module.params['runas']
|
||||
self.sudoers_path = module.params['sudoers_path']
|
||||
@@ -185,12 +199,14 @@ class Sudoers(object):
|
||||
|
||||
commands_str = ', '.join(self.commands)
|
||||
nopasswd_str = 'NOPASSWD:' if self.nopassword else ''
|
||||
setenv_str = 'SETENV:' if self.setenv else ''
|
||||
runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
|
||||
return "{owner} {host}={runas}{nopasswd} {commands}\n".format(
|
||||
return "{owner} {host}={runas}{nopasswd}{setenv} {commands}\n".format(
|
||||
owner=owner,
|
||||
host=self.host,
|
||||
runas=runas_str,
|
||||
nopasswd=nopasswd_str,
|
||||
setenv=setenv_str,
|
||||
commands=commands_str
|
||||
)
|
||||
|
||||
@@ -239,6 +255,10 @@ def main():
|
||||
'type': 'bool',
|
||||
'default': True,
|
||||
},
|
||||
'setenv': {
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
},
|
||||
'host': {
|
||||
'type': 'str',
|
||||
'default': 'ALL',
|
||||
|
||||
@@ -48,7 +48,9 @@ options:
|
||||
version_added: 3.0.0
|
||||
workspace:
|
||||
description:
|
||||
- The terraform workspace to work with.
|
||||
- The terraform workspace to work with. This sets the C(TF_WORKSPACE) environmental variable
|
||||
that is used to override workspace selection. For more information about workspaces
|
||||
have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces).
|
||||
type: str
|
||||
default: default
|
||||
purge_workspace:
|
||||
@@ -297,9 +299,9 @@ def preflight_validation(bin_path, project_path, version, variables_args=None, p
|
||||
if not os.path.isdir(project_path):
|
||||
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
|
||||
if LooseVersion(version) < LooseVersion('0.15.0'):
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path)
|
||||
module.run_command([bin_path, 'validate', '-no-color'] + variables_args, check_rc=True, cwd=project_path)
|
||||
else:
|
||||
rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path)
|
||||
module.run_command([bin_path, 'validate', '-no-color'], check_rc=True, cwd=project_path)
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
@@ -310,7 +312,7 @@ def _state_args(state_file):
|
||||
return []
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths):
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace):
|
||||
command = [bin_path, 'init', '-input=false', '-no-color']
|
||||
if backend_config:
|
||||
for key, val in backend_config.items():
|
||||
@@ -328,7 +330,7 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
|
||||
if plugin_paths:
|
||||
for plugin_path in plugin_paths:
|
||||
command.extend(['-plugin-dir', plugin_path])
|
||||
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
|
||||
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace})
|
||||
|
||||
|
||||
def get_workspace_context(bin_path, project_path):
|
||||
@@ -343,6 +345,7 @@ def get_workspace_context(bin_path, project_path):
|
||||
continue
|
||||
elif stripped_item.startswith('* '):
|
||||
workspace_ctx["current"] = stripped_item.replace('* ', '')
|
||||
workspace_ctx["all"].append(stripped_item.replace('* ', ''))
|
||||
else:
|
||||
workspace_ctx["all"].append(stripped_item)
|
||||
return workspace_ctx
|
||||
@@ -485,7 +488,7 @@ def main():
|
||||
|
||||
if force_init:
|
||||
if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")):
|
||||
init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths)
|
||||
init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace)
|
||||
|
||||
workspace_ctx = get_workspace_context(command[0], project_path)
|
||||
if workspace_ctx["current"] != workspace:
|
||||
|
||||
@@ -266,7 +266,7 @@ EXAMPLES = r'''
|
||||
community.general.xml:
|
||||
path: /foo/bar.xml
|
||||
xpath: /business/website
|
||||
children: []
|
||||
set_children: []
|
||||
|
||||
# In case of namespaces, like in below XML, they have to be explicitly stated.
|
||||
#
|
||||
@@ -961,7 +961,7 @@ def main():
|
||||
# add_children && set_children both set?: should have already aborted by now
|
||||
|
||||
# set_children set?
|
||||
if set_children:
|
||||
if set_children is not None:
|
||||
set_target_children(module, doc, xpath, namespaces, set_children, input_type)
|
||||
|
||||
# add_children set?
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# This test represent the misleading behavior of the following issue: https://github.com/ansible-collections/community.general/issues/635
|
||||
- name: Disable MPM event module
|
||||
apache2_module:
|
||||
name: "{{ item.module}}"
|
||||
state: "{{ item.state}}"
|
||||
ignore_configcheck: true
|
||||
register: disable_mpm_modules
|
||||
with_items:
|
||||
- { module: mpm_event, state: absent }
|
||||
- { module: mpm_prefork, state: present }
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "'warnings' in disable_mpm_modules"
|
||||
- disable_mpm_modules["warnings"] == [
|
||||
"No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.",
|
||||
"No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately."
|
||||
]
|
||||
|
||||
- name: Enable MPM event module - Revert previous change
|
||||
apache2_module:
|
||||
name: "{{ item.module}}"
|
||||
state: "{{ item.state}}"
|
||||
ignore_configcheck: true
|
||||
register: disable_mpm_modules
|
||||
with_items:
|
||||
- { module: mpm_prefork, state: absent }
|
||||
- { module: mpm_event, state: present }
|
||||
|
||||
- name: Disable MPM event module
|
||||
apache2_module:
|
||||
name: "{{ item.module}}"
|
||||
state: "{{ item.state}}"
|
||||
ignore_configcheck: true
|
||||
warn_mpm_absent: false
|
||||
register: disable_mpm_modules
|
||||
with_items:
|
||||
- { module: mpm_event, state: absent }
|
||||
- { module: mpm_prefork, state: present }
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "'warnings' not in disable_mpm_modules"
|
||||
@@ -180,7 +180,7 @@
|
||||
- mpm_worker
|
||||
- mpm_event
|
||||
- mpm_prefork
|
||||
ignore_errors: yes
|
||||
ignore_errors: true
|
||||
register: remove_with_configcheck
|
||||
|
||||
- name: ensure configcheck fails task with when run without mpm modules
|
||||
|
||||
@@ -45,3 +45,8 @@
|
||||
that: modules_before.stdout == modules_after.stdout
|
||||
when: ansible_os_family in ['Debian', 'Suse']
|
||||
# centos/RHEL does not have a2enmod/a2dismod
|
||||
|
||||
- name: include misleading warning test
|
||||
include: 635-apache2-misleading-warning.yml
|
||||
when: ansible_os_family in ['Debian']
|
||||
# Suse has mpm_event module compiled within the base apache2
|
||||
@@ -29,6 +29,12 @@
|
||||
- cloud-init
|
||||
- udev
|
||||
|
||||
- name: Ensure systemd-network user exists
|
||||
user:
|
||||
name: systemd-network
|
||||
state: present
|
||||
when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37
|
||||
|
||||
- name: setup run cloud-init
|
||||
service:
|
||||
name: cloud-init-local
|
||||
|
||||
@@ -12,3 +12,4 @@ skip/rhel8.3
|
||||
skip/rhel8.4
|
||||
skip/rhel8.5
|
||||
skip/rhel9.0
|
||||
skip/rhel9.1
|
||||
|
||||
@@ -8,3 +8,4 @@ skip/freebsd
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/rhel9.0 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/
|
||||
skip/rhel9.1 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/
|
||||
|
||||
@@ -8,3 +8,5 @@ destructive
|
||||
skip/aix
|
||||
skip/osx # FIXME
|
||||
skip/rhel9.0 # FIXME
|
||||
skip/rhel9.1 # FIXME
|
||||
skip/freebsd12.4 # FIXME
|
||||
|
||||
@@ -66,6 +66,59 @@
|
||||
- result.existing == {}
|
||||
- result.end_state.name == "{{ federation }}"
|
||||
|
||||
- name: Create new user federation in admin realm
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ admin_realm }}"
|
||||
name: "{{ federation }}"
|
||||
state: present
|
||||
provider_id: ldap
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
enabled: true
|
||||
priority: 0
|
||||
fullSyncPeriod: -1
|
||||
changedSyncPeriod: -1
|
||||
cachePolicy: DEFAULT
|
||||
batchSizeForSync: 1000
|
||||
editMode: READ_ONLY
|
||||
importEnabled: true
|
||||
syncRegistrations: false
|
||||
vendor: other
|
||||
usernameLDAPAttribute: uid
|
||||
rdnLDAPAttribute: uid
|
||||
uuidLDAPAttribute: entryUUID
|
||||
userObjectClasses: "inetOrgPerson, organizationalPerson"
|
||||
connectionUrl: "ldaps://ldap.example.com:636"
|
||||
usersDn: "ou=Users,dc=example,dc=com"
|
||||
authType: simple
|
||||
bindDn: cn=directory reader
|
||||
bindCredential: secret
|
||||
searchScope: 1
|
||||
validatePasswordPolicy: false
|
||||
trustEmail: false
|
||||
useTruststoreSpi: "ldapsOnly"
|
||||
connectionPooling: true
|
||||
pagination: true
|
||||
allowKerberosAuthentication: false
|
||||
useKerberosForPasswordAuthentication: false
|
||||
debug: false
|
||||
register: result
|
||||
|
||||
- name: Debug
|
||||
debug:
|
||||
var: result
|
||||
|
||||
- name: Assert user federation created (admin realm)
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.existing == {}
|
||||
- result.end_state.name == "{{ federation }}"
|
||||
|
||||
- name: Update existing user federation (no change)
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
@@ -121,6 +174,61 @@
|
||||
- result.end_state != {}
|
||||
- result.end_state.name == "{{ federation }}"
|
||||
|
||||
- name: Update existing user federation (no change, admin realm)
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ admin_realm }}"
|
||||
name: "{{ federation }}"
|
||||
state: present
|
||||
provider_id: ldap
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
enabled: true
|
||||
priority: 0
|
||||
fullSyncPeriod: -1
|
||||
changedSyncPeriod: -1
|
||||
cachePolicy: DEFAULT
|
||||
batchSizeForSync: 1000
|
||||
editMode: READ_ONLY
|
||||
importEnabled: true
|
||||
syncRegistrations: false
|
||||
vendor: other
|
||||
usernameLDAPAttribute: uid
|
||||
rdnLDAPAttribute: uid
|
||||
uuidLDAPAttribute: entryUUID
|
||||
userObjectClasses: "inetOrgPerson, organizationalPerson"
|
||||
connectionUrl: "ldaps://ldap.example.com:636"
|
||||
usersDn: "ou=Users,dc=example,dc=com"
|
||||
authType: simple
|
||||
bindDn: cn=directory reader
|
||||
bindCredential: "**********"
|
||||
searchScope: 1
|
||||
validatePasswordPolicy: false
|
||||
trustEmail: false
|
||||
useTruststoreSpi: "ldapsOnly"
|
||||
connectionPooling: true
|
||||
pagination: true
|
||||
allowKerberosAuthentication: false
|
||||
useKerberosForPasswordAuthentication: false
|
||||
debug: false
|
||||
register: result
|
||||
|
||||
- name: Debug
|
||||
debug:
|
||||
var: result
|
||||
|
||||
- name: Assert user federation unchanged (admin realm)
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.existing != {}
|
||||
- result.existing.name == "{{ federation }}"
|
||||
- result.end_state != {}
|
||||
- result.end_state.name == "{{ federation }}"
|
||||
|
||||
- name: Update existing user federation (with change)
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
@@ -162,6 +270,14 @@
|
||||
useKerberosForPasswordAuthentication: false
|
||||
debug: false
|
||||
mappers:
|
||||
# overwrite / update pre existing default mapper
|
||||
- name: "username"
|
||||
providerId: "user-attribute-ldap-mapper"
|
||||
config:
|
||||
ldap.attribute: ldap_user
|
||||
user.model.attribute: usr
|
||||
read.only: true
|
||||
# create new mapper
|
||||
- name: "full name"
|
||||
providerId: "full-name-ldap-mapper"
|
||||
providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
@@ -227,3 +343,83 @@
|
||||
- result is not changed
|
||||
- result.existing == {}
|
||||
- result.end_state == {}
|
||||
|
||||
- name: Create new user federation together with mappers
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
name: "{{ federation }}"
|
||||
state: present
|
||||
provider_id: ldap
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
enabled: true
|
||||
priority: 0
|
||||
fullSyncPeriod: -1
|
||||
changedSyncPeriod: -1
|
||||
cachePolicy: DEFAULT
|
||||
batchSizeForSync: 1000
|
||||
editMode: READ_ONLY
|
||||
importEnabled: true
|
||||
syncRegistrations: false
|
||||
vendor: other
|
||||
usernameLDAPAttribute: uid
|
||||
rdnLDAPAttribute: uid
|
||||
uuidLDAPAttribute: entryUUID
|
||||
userObjectClasses: "inetOrgPerson, organizationalPerson"
|
||||
connectionUrl: "ldaps://ldap.example.com:636"
|
||||
usersDn: "ou=Users,dc=example,dc=com"
|
||||
authType: simple
|
||||
bindDn: cn=directory reader
|
||||
bindCredential: secret
|
||||
searchScope: 1
|
||||
validatePasswordPolicy: false
|
||||
trustEmail: false
|
||||
useTruststoreSpi: "ldapsOnly"
|
||||
connectionPooling: true
|
||||
pagination: true
|
||||
allowKerberosAuthentication: false
|
||||
useKerberosForPasswordAuthentication: false
|
||||
debug: false
|
||||
mappers:
|
||||
# overwrite / update pre existing default mapper
|
||||
- name: "username"
|
||||
providerId: "user-attribute-ldap-mapper"
|
||||
config:
|
||||
ldap.attribute: ldap_user
|
||||
user.model.attribute: usr
|
||||
read.only: true
|
||||
# create new mapper
|
||||
- name: "full name"
|
||||
providerId: "full-name-ldap-mapper"
|
||||
providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
|
||||
config:
|
||||
ldap.full.name.attribute: cn
|
||||
read.only: true
|
||||
write.only: false
|
||||
register: result
|
||||
|
||||
- name: Debug
|
||||
debug:
|
||||
var: result
|
||||
|
||||
- name: Assert user federation created
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.existing == {}
|
||||
- result.end_state.name == "{{ federation }}"
|
||||
|
||||
## no point in retesting this, just doing it to clean up introduced server changes
|
||||
- name: Delete absent user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
name: "{{ federation }}"
|
||||
state: absent
|
||||
|
||||
@@ -35,12 +35,13 @@ from ansible_collections.community.general.plugins.module_utils.mh.deco import c
|
||||
|
||||
|
||||
class MSimple(ModuleHelper):
|
||||
output_params = ('a', 'b', 'c')
|
||||
output_params = ('a', 'b', 'c', 'm')
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
a=dict(type='int', default=0),
|
||||
b=dict(type='str'),
|
||||
c=dict(type='str'),
|
||||
m=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -56,6 +57,8 @@ class MSimple(ModuleHelper):
|
||||
self.vars['c'] = str(self.vars.c) * 3
|
||||
|
||||
def __run__(self):
|
||||
if self.vars.m:
|
||||
self.vars.msg = self.vars.m
|
||||
if self.vars.a >= 100:
|
||||
raise Exception("a >= 100")
|
||||
if self.vars.c == "abc change":
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- include_tasks: msimple.yml
|
||||
- include_tasks: msimple_output_conflict.yml
|
||||
- include_tasks: mdepfail.yml
|
||||
- include_tasks: mstate.yml
|
||||
- include_tasks: msimpleda.yml
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
# Copyright (c) 2023, Alexei Znamensky
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: test msimple conflict output (set a=80)
|
||||
msimple:
|
||||
a: 80
|
||||
register: simple1
|
||||
|
||||
- name: assert simple1
|
||||
assert:
|
||||
that:
|
||||
- simple1.a == 80
|
||||
- simple1.abc == "abc"
|
||||
- simple1 is not changed
|
||||
- simple1.value is none
|
||||
|
||||
- name: test msimple conflict output 2
|
||||
msimple:
|
||||
a: 80
|
||||
m: a message in a bottle
|
||||
register: simple2
|
||||
|
||||
- name: assert simple2
|
||||
assert:
|
||||
that:
|
||||
- simple1.a == 80
|
||||
- simple1.abc == "abc"
|
||||
- simple1 is not changed
|
||||
- simple1.value is none
|
||||
- >
|
||||
"_msg" not in simple2
|
||||
- >
|
||||
simple2.msg == "a message in a bottle"
|
||||
|
||||
- name: test msimple 3
|
||||
msimple:
|
||||
a: 101
|
||||
m: a message in a bottle
|
||||
ignore_errors: yes
|
||||
register: simple3
|
||||
|
||||
- name: assert simple3
|
||||
assert:
|
||||
that:
|
||||
- simple3.a == 101
|
||||
- >
|
||||
simple3.msg == "Module failed with exception: a >= 100"
|
||||
- >
|
||||
simple3._msg == "a message in a bottle"
|
||||
- simple3.abc == "abc"
|
||||
- simple3 is failed
|
||||
- simple3 is not changed
|
||||
- simple3.value is none
|
||||
@@ -8,4 +8,5 @@ skip/osx
|
||||
skip/macos
|
||||
skip/rhel8.0
|
||||
skip/rhel9.0
|
||||
skip/rhel9.1
|
||||
skip/freebsd
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
|
||||
azp/generic/1
|
||||
cloud/opennebula
|
||||
disabled # FIXME
|
||||
disabled # FIXME - when this is fixed, also re-enable the generic tests in CI!
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
|
||||
azp/generic/1
|
||||
cloud/opennebula
|
||||
disabled # FIXME
|
||||
disabled # FIXME - when this is fixed, also re-enable the generic tests in CI!
|
||||
|
||||
@@ -230,3 +230,30 @@
|
||||
that:
|
||||
- install_jupyter is changed
|
||||
- '"ipython" in install_jupyter.stdout'
|
||||
|
||||
##############################################################################
|
||||
- name: ensure /opt/pipx
|
||||
ansible.builtin.file:
|
||||
path: /opt/pipx
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: install tox site-wide
|
||||
community.general.pipx:
|
||||
name: tox
|
||||
state: latest
|
||||
register: install_tox_sitewide
|
||||
environment:
|
||||
PIPX_HOME: /opt/pipx
|
||||
PIPX_BIN_DIR: /usr/local/bin
|
||||
|
||||
- name: stat /usr/local/bin/tox
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/tox
|
||||
register: usrlocaltox
|
||||
|
||||
- name: check assertions
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- install_tox_sitewide is changed
|
||||
- usrlocaltox.stat.exists
|
||||
|
||||
@@ -501,14 +501,19 @@
|
||||
# NOTE: FreeBSD 12.0 test runner receives a "connection reset by peer" after ~20% downloaded so we are
|
||||
# only running this on 12.1 or higher
|
||||
#
|
||||
# NOTE: FreeBSD 12.4 fails to update repositories because it cannot load certificates from /usr/share/keys/pkg/trusted
|
||||
# knowledge has to take a look)
|
||||
#
|
||||
# NOTE: FreeBSD 13.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD
|
||||
# knowledge has to take a look)
|
||||
#
|
||||
# NOTE: FreeBSD 13.1 fails to update the package catalogue for unknown reasons (someone with FreeBSD
|
||||
# knowledge has to take a look)
|
||||
#
|
||||
# See also
|
||||
# https://github.com/ansible-collections/community.general/issues/5795
|
||||
when: >-
|
||||
(ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('13.0', '<'))
|
||||
(ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.4', '<'))
|
||||
or ansible_distribution_version is version('13.2', '>=')
|
||||
block:
|
||||
- name: Setup testjail
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Do nothing
|
||||
@@ -5,7 +5,9 @@
|
||||
|
||||
- name: Install required libs
|
||||
pip:
|
||||
name: stormssh
|
||||
name:
|
||||
- stormssh
|
||||
- 'paramiko<3.0.0'
|
||||
state: present
|
||||
extra_args: "-c {{ remote_constraints }}"
|
||||
|
||||
|
||||
@@ -145,6 +145,20 @@
|
||||
src: "{{ sudoers_path }}/my-sudo-rule-7"
|
||||
register: rule_7_contents
|
||||
|
||||
- name: Create rule with setenv parameters
|
||||
community.general.sudoers:
|
||||
name: my-sudo-rule-8
|
||||
state: present
|
||||
user: alice
|
||||
commands: /usr/local/bin/command
|
||||
setenv: true
|
||||
register: rule_8
|
||||
|
||||
- name: Grab contents of my-sudo-rule-8
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ sudoers_path }}/my-sudo-rule-8"
|
||||
register: rule_8_contents
|
||||
|
||||
- name: Revoke rule 1
|
||||
community.general.sudoers:
|
||||
name: my-sudo-rule-1
|
||||
@@ -202,7 +216,6 @@
|
||||
when: ansible_os_family != 'Darwin'
|
||||
register: edge_case_3
|
||||
|
||||
|
||||
- name: Revoke non-existing rule
|
||||
community.general.sudoers:
|
||||
name: non-existing-rule
|
||||
@@ -243,6 +256,7 @@
|
||||
- "rule_5_contents['content'] | b64decode == 'alice ALL=NOPASSWD: /usr/local/bin/command\n'"
|
||||
- "rule_6_contents['content'] | b64decode == 'alice ALL=(bob)NOPASSWD: /usr/local/bin/command\n'"
|
||||
- "rule_7_contents['content'] | b64decode == 'alice host-1=NOPASSWD: /usr/local/bin/command\n'"
|
||||
- "rule_8_contents['content'] | b64decode == 'alice ALL=NOPASSWD:SETENV: /usr/local/bin/command\n'"
|
||||
|
||||
- name: Check revocation stat
|
||||
ansible.builtin.assert:
|
||||
|
||||
@@ -10,6 +10,7 @@ skip/macos
|
||||
skip/freebsd
|
||||
skip/rhel8.0 # FIXME
|
||||
skip/rhel9.0 # FIXME
|
||||
skip/rhel9.1 # FIXME
|
||||
skip/docker
|
||||
needs/root
|
||||
needs/target/setup_epel
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<business type="bar">
|
||||
<name>Tasty Beverage Co.</name>
|
||||
<beers>
|
||||
</beers>
|
||||
<rating subjective="true">10</rating>
|
||||
<website>
|
||||
<mobilefriendly/>
|
||||
<address>http://tastybeverageco.com</address>
|
||||
</website>
|
||||
</business>
|
||||
@@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
||||
@@ -8,6 +8,32 @@
|
||||
src: fixtures/ansible-xml-beers.xml
|
||||
dest: /tmp/ansible-xml-beers.xml
|
||||
|
||||
- name: Set child elements - empty list
|
||||
xml:
|
||||
path: /tmp/ansible-xml-beers.xml
|
||||
xpath: /business/beers
|
||||
set_children: []
|
||||
register: set_children_elements
|
||||
|
||||
- name: Compare to expected result
|
||||
copy:
|
||||
src: results/test-set-children-elements-empty-list.xml
|
||||
dest: /tmp/ansible-xml-beers.xml
|
||||
check_mode: yes
|
||||
diff: yes
|
||||
register: comparison
|
||||
|
||||
- name: Test expected result
|
||||
assert:
|
||||
that:
|
||||
- set_children_elements is changed
|
||||
- comparison is not changed # identical
|
||||
#command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
|
||||
|
||||
- name: Setup test fixture
|
||||
copy:
|
||||
src: fixtures/ansible-xml-beers.xml
|
||||
dest: /tmp/ansible-xml-beers.xml
|
||||
|
||||
- name: Set child elements
|
||||
xml:
|
||||
|
||||
@@ -111,15 +111,15 @@ MOCK_RECORDS = [
|
||||
|
||||
class MockBitwarden(Bitwarden):
|
||||
|
||||
logged_in = True
|
||||
unlocked = True
|
||||
|
||||
def _get_matches(self, search_value, search_field="name"):
|
||||
def _get_matches(self, search_value, search_field="name", collection_id=None):
|
||||
return list(filter(lambda record: record[search_field] == search_value, MOCK_RECORDS))
|
||||
|
||||
|
||||
class LoggedOutMockBitwarden(MockBitwarden):
|
||||
|
||||
logged_in = False
|
||||
unlocked = False
|
||||
|
||||
|
||||
class TestLookupModule(unittest.TestCase):
|
||||
@@ -155,7 +155,7 @@ class TestLookupModule(unittest.TestCase):
|
||||
self.lookup.run(['a_test'])[0])
|
||||
|
||||
@patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', LoggedOutMockBitwarden())
|
||||
def test_bitwarden_plugin_logged_out(self):
|
||||
def test_bitwarden_plugin_unlocked(self):
|
||||
record = MOCK_RECORDS[0]
|
||||
record_name = record['name']
|
||||
with self.assertRaises(AnsibleError):
|
||||
|
||||
54
tests/unit/plugins/module_utils/test_ocapi_utils.py
Normal file
54
tests/unit/plugins/module_utils/test_ocapi_utils.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from ansible_collections.community.general.tests.unit.compat import unittest
|
||||
from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
|
||||
|
||||
|
||||
class TestOcapiUtils(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.utils = OcapiUtils(creds={"user": "a_user", "pswd": "a_password"},
|
||||
base_uri="fakeUri",
|
||||
proxy_slot_number=None,
|
||||
timeout=30,
|
||||
module=None)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def test_prepare_multipart_firmware_upload(self):
|
||||
# Generate a binary file and save it
|
||||
filename = "fake_firmware.bin"
|
||||
filepath = os.path.join(self.tempdir, filename)
|
||||
file_contents = b'\x00\x01\x02\x03\x04'
|
||||
with open(filepath, 'wb+') as f:
|
||||
f.write(file_contents)
|
||||
|
||||
# Call prepare_mutipart_firmware_upload
|
||||
content_type, b_form_data = self.utils.prepare_multipart_firmware_upload(filepath)
|
||||
|
||||
# Check the returned content-type
|
||||
content_type_pattern = r"multipart/form-data; boundary=(.*)"
|
||||
m = re.match(content_type_pattern, content_type)
|
||||
self.assertIsNotNone(m)
|
||||
|
||||
# Check the returned binary data
|
||||
boundary = m.group(1)
|
||||
expected_content_text = '--%s\r\n' % boundary
|
||||
expected_content_text += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % filename
|
||||
expected_content_text += 'Content-Type: application/octet-stream\r\n\r\n'
|
||||
expected_content_bytes = bytearray(expected_content_text, 'utf-8')
|
||||
expected_content_bytes += file_contents
|
||||
expected_content_bytes += bytearray('\r\n--%s--' % boundary, 'utf-8')
|
||||
self.assertEqual(expected_content_bytes, b_form_data)
|
||||
91
tests/unit/plugins/module_utils/test_opennebula.py
Normal file
91
tests/unit/plugins/module_utils/test_opennebula.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2023, Michal Opala <mopala@opennebula.io>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
import pytest
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render
|
||||
|
||||
|
||||
FLATTEN_VALID = [
|
||||
(
|
||||
[[[1]], [2], 3],
|
||||
False,
|
||||
[1, 2, 3]
|
||||
),
|
||||
(
|
||||
[[[1]], [2], 3],
|
||||
True,
|
||||
[1, 2, 3]
|
||||
),
|
||||
(
|
||||
[[1]],
|
||||
False,
|
||||
[1]
|
||||
),
|
||||
(
|
||||
[[1]],
|
||||
True,
|
||||
1
|
||||
),
|
||||
(
|
||||
1,
|
||||
False,
|
||||
[1]
|
||||
),
|
||||
(
|
||||
1,
|
||||
True,
|
||||
1
|
||||
),
|
||||
]
|
||||
|
||||
RENDER_VALID = [
|
||||
(
|
||||
{
|
||||
"NIC": {"NAME": "NIC0", "NETWORK_ID": 0},
|
||||
"CPU": 1,
|
||||
"MEMORY": 1024,
|
||||
},
|
||||
textwrap.dedent('''
|
||||
CPU="1"
|
||||
MEMORY="1024"
|
||||
NIC=[NAME="NIC0",NETWORK_ID="0"]
|
||||
''').strip()
|
||||
),
|
||||
(
|
||||
{
|
||||
"NIC": [
|
||||
{"NAME": "NIC0", "NETWORK_ID": 0},
|
||||
{"NAME": "NIC1", "NETWORK_ID": 1},
|
||||
],
|
||||
"CPU": 1,
|
||||
"MEMORY": 1024,
|
||||
},
|
||||
textwrap.dedent('''
|
||||
CPU="1"
|
||||
MEMORY="1024"
|
||||
NIC=[NAME="NIC0",NETWORK_ID="0"]
|
||||
NIC=[NAME="NIC1",NETWORK_ID="1"]
|
||||
''').strip()
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('to_flatten,extract,expected_result', FLATTEN_VALID)
|
||||
def test_flatten(to_flatten, extract, expected_result):
|
||||
result = flatten(to_flatten, extract)
|
||||
assert result == expected_result, repr(result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('to_render,expected_result', RENDER_VALID)
|
||||
def test_render(to_render, expected_result):
|
||||
result = render(to_render)
|
||||
assert result == expected_result, repr(result)
|
||||
639
tests/unit/plugins/modules/test_ocapi_command.py
Normal file
639
tests/unit/plugins/modules/test_ocapi_command.py
Normal file
@@ -0,0 +1,639 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from ansible_collections.community.general.tests.unit.compat.mock import patch
|
||||
from ansible_collections.community.general.tests.unit.compat import unittest
|
||||
from ansible.module_utils import basic
|
||||
import ansible_collections.community.general.plugins.modules.ocapi_command as module
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote_plus, urljoin
|
||||
|
||||
|
||||
MOCK_BASE_URI = "mockBaseUri/"
|
||||
OPERATING_SYSTEM_URI = "OperatingSystem"
|
||||
MOCK_JOB_NAME = "MockJob"
|
||||
|
||||
ACTION_WAS_SUCCESSFUL = "Action was successful."
|
||||
UPDATE_NOT_PERFORMED_IN_CHECK_MODE = "Update not performed in check mode."
|
||||
NO_ACTION_PERFORMED_IN_CHECK_MODE = "No action performed in check mode."
|
||||
|
||||
MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG = {
|
||||
"ret": True,
|
||||
"data": {
|
||||
"IndicatorLED": {
|
||||
"ID": 4,
|
||||
"Name": "Off"
|
||||
},
|
||||
"PowerState": {
|
||||
"ID": 2,
|
||||
"Name": "On"
|
||||
}
|
||||
},
|
||||
"headers": {"etag": "MockETag"}
|
||||
}
|
||||
|
||||
MOCK_SUCCESSFUL_HTTP_RESPONSE = {
|
||||
"ret": True,
|
||||
"data": {}
|
||||
}
|
||||
|
||||
MOCK_404_RESPONSE = {
|
||||
"ret": False,
|
||||
"status": 404
|
||||
}
|
||||
|
||||
MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER = {
|
||||
"ret": True,
|
||||
"data": {},
|
||||
"headers": {"location": "mock_location"}
|
||||
}
|
||||
|
||||
MOCK_HTTP_RESPONSE_CONFLICT = {
|
||||
"ret": False,
|
||||
"msg": "Conflict",
|
||||
"status": 409
|
||||
}
|
||||
|
||||
MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = {
|
||||
"ret": True,
|
||||
"data": {
|
||||
"PercentComplete": 99
|
||||
},
|
||||
"headers": {
|
||||
"etag": "12345"
|
||||
}
|
||||
}
|
||||
|
||||
MOCK_HTTP_RESPONSE_JOB_COMPLETE = {
|
||||
"ret": True,
|
||||
"data": {
|
||||
"PercentComplete": 100
|
||||
},
|
||||
"headers": {
|
||||
"etag": "12345"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_bin_path(self, arg, required=False):
|
||||
"""Mock AnsibleModule.get_bin_path"""
|
||||
return arg
|
||||
|
||||
|
||||
def get_exception_message(ansible_exit_json):
|
||||
"""From an AnsibleExitJson exception, get the message string."""
|
||||
return ansible_exit_json.exception.args[0]["msg"]
|
||||
|
||||
|
||||
def is_changed(ansible_exit_json):
|
||||
"""From an AnsibleExitJson exception, return the value of the changed flag"""
|
||||
return ansible_exit_json.exception.args[0]["changed"]
|
||||
|
||||
|
||||
def mock_get_request(*args, **kwargs):
|
||||
"""Mock for get_request."""
|
||||
url = args[1]
|
||||
if url == 'https://' + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
|
||||
elif url == "mock_location":
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE
|
||||
raise RuntimeError("Illegal call to get_request in test: " + args[1])
|
||||
|
||||
|
||||
def mock_get_request_job_does_not_exist(*args, **kwargs):
|
||||
"""Mock for get_request."""
|
||||
url = args[1]
|
||||
if url == 'https://' + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
|
||||
elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
|
||||
return MOCK_404_RESPONSE
|
||||
raise RuntimeError("Illegal call to get_request in test: " + args[1])
|
||||
|
||||
|
||||
def mock_get_request_job_in_progress(*args, **kwargs):
|
||||
url = args[1]
|
||||
if url == 'https://' + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
|
||||
elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
|
||||
return MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS
|
||||
raise RuntimeError("Illegal call to get_request in test: " + args[1])
|
||||
|
||||
|
||||
def mock_get_request_job_complete(*args, **kwargs):
|
||||
url = args[1]
|
||||
if url == 'https://' + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
|
||||
elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
|
||||
return MOCK_HTTP_RESPONSE_JOB_COMPLETE
|
||||
raise RuntimeError("Illegal call to get_request in test: " + args[1])
|
||||
|
||||
|
||||
def mock_put_request(*args, **kwargs):
|
||||
"""Mock put_request."""
|
||||
url = args[1]
|
||||
if url == 'https://' + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER
|
||||
raise RuntimeError("Illegal PUT call to: " + args[1])
|
||||
|
||||
|
||||
def mock_delete_request(*args, **kwargs):
|
||||
"""Mock delete request."""
|
||||
url = args[1]
|
||||
if url == urljoin('https://' + MOCK_BASE_URI, 'Jobs/' + MOCK_JOB_NAME):
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE
|
||||
raise RuntimeError("Illegal DELETE call to: " + args[1])
|
||||
|
||||
|
||||
def mock_post_request(*args, **kwargs):
|
||||
"""Mock post_request."""
|
||||
url = args[1]
|
||||
if url == urljoin('https://' + MOCK_BASE_URI, OPERATING_SYSTEM_URI):
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE
|
||||
raise RuntimeError("Illegal POST call to: " + args[1])
|
||||
|
||||
|
||||
def mock_http_request_conflict(*args, **kwargs):
|
||||
"""Mock to make an HTTP request return 409 Conflict"""
|
||||
return MOCK_HTTP_RESPONSE_CONFLICT
|
||||
|
||||
|
||||
def mock_invalid_http_request(*args, **kwargs):
|
||||
"""Mock to make an HTTP request invalid. Raises an exception."""
|
||||
raise RuntimeError("Illegal HTTP call to " + args[1])
|
||||
|
||||
|
||||
class TestOcapiCommand(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
|
||||
exit_json=exit_json,
|
||||
fail_json=fail_json,
|
||||
get_bin_path=get_bin_path)
|
||||
self.mock_module_helper.start()
|
||||
self.addCleanup(self.mock_module_helper.stop)
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def test_module_fail_when_required_args_missing(self):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({})
|
||||
module.main()
|
||||
self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_module_fail_when_unknown_category(self):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'unknown',
|
||||
'command': 'IndicatorLedOn',
|
||||
'username': 'USERID',
|
||||
'password': 'PASSW0RD=21',
|
||||
'baseuri': MOCK_BASE_URI
|
||||
})
|
||||
module.main()
|
||||
self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_set_power_mode(self):
|
||||
"""Test that we can set chassis power mode"""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'PowerModeLow',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_chassis_led_indicator(self):
|
||||
"""Test that we can set chassis LED indicator."""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedOn',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_power_mode_already_set(self):
|
||||
"""Test that if we set Power Mode to normal when it's already normal, we get changed=False."""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'PowerModeNormal',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertFalse(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_power_mode_check_mode(self):
|
||||
"""Test check mode when setting chassis Power Mode."""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedOn',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_chassis_led_indicator_check_mode(self):
|
||||
"""Test check mode when setting chassis LED indicator"""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedOn',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_chassis_led_indicator_already_set(self):
|
||||
"""Test that if we set LED Indicator to off when it's already off, we get changed=False."""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedOff',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertFalse(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_chassis_led_indicator_already_set_check_mode(self):
|
||||
"""Test that if we set LED Indicator to off when it's already off, we get changed=False even in check mode."""
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedOff',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
"_ansible_check_mode": True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(NO_ACTION_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertFalse(is_changed(ansible_exit_json))
|
||||
|
||||
def test_set_chassis_invalid_indicator_command(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Chassis',
|
||||
'command': 'IndicatorLedBright',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertIn("Invalid Command", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_reset_enclosure(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Systems',
|
||||
'command': 'PowerGracefulRestart',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_reset_enclosure_check_mode(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Systems',
|
||||
'command': 'PowerGracefulRestart',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
"_ansible_check_mode": True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_upload_missing_update_image_path(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpload',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("Missing update_image_path.", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_firmware_upload_file_not_found(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpload',
|
||||
'update_image_path': 'nonexistentfile.bin',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("File does not exist.", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_firmware_upload(self):
|
||||
filename = "fake_firmware.bin"
|
||||
filepath = os.path.join(self.tempdir, filename)
|
||||
file_contents = b'\x00\x01\x02\x03\x04'
|
||||
with open(filepath, 'wb+') as f:
|
||||
f.write(file_contents)
|
||||
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
post_request=mock_post_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpload',
|
||||
'update_image_path': filepath,
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_upload_check_mode(self):
|
||||
filename = "fake_firmware.bin"
|
||||
filepath = os.path.join(self.tempdir, filename)
|
||||
file_contents = b'\x00\x01\x02\x03\x04'
|
||||
with open(filepath, 'wb+') as f:
|
||||
f.write(file_contents)
|
||||
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpload',
|
||||
'update_image_path': filepath,
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
"_ansible_check_mode": True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_update(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpdate',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_update_check_mode(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWUpdate',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
"_ansible_check_mode": True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_activate(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWActivate',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_firmware_activate_check_mode(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Update',
|
||||
'command': 'FWActivate',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
"_ansible_check_mode": True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_delete_job(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_complete,
|
||||
delete_request=mock_delete_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_delete_job_in_progress(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_in_progress,
|
||||
delete_request=mock_invalid_http_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_delete_job_in_progress_only_on_delete(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_complete,
|
||||
delete_request=mock_http_request_conflict,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_delete_job_check_mode(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_complete,
|
||||
delete_request=mock_delete_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
|
||||
self.assertTrue(is_changed(ansible_exit_json))
|
||||
|
||||
def test_delete_job_check_mode_job_not_found(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_does_not_exist,
|
||||
delete_request=mock_delete_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("Job already deleted.", get_exception_message(ansible_exit_json))
|
||||
self.assertFalse(is_changed(ansible_exit_json))
|
||||
|
||||
def test_delete_job_check_mode_job_in_progress(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request_job_in_progress,
|
||||
delete_request=mock_delete_request,
|
||||
put_request=mock_invalid_http_request,
|
||||
post_request=mock_invalid_http_request):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'DeleteJob',
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'job_name': MOCK_JOB_NAME,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21',
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
|
||||
240
tests/unit/plugins/modules/test_ocapi_info.py
Normal file
240
tests/unit/plugins/modules/test_ocapi_info.py
Normal file
@@ -0,0 +1,240 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.tests.unit.compat.mock import patch
|
||||
from ansible_collections.community.general.tests.unit.compat import unittest
|
||||
from ansible.module_utils import basic
|
||||
import ansible_collections.community.general.plugins.modules.ocapi_info as module
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
|
||||
from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
|
||||
|
||||
MOCK_BASE_URI = "mockBaseUri"
|
||||
MOCK_JOB_NAME_IN_PROGRESS = "MockJobInProgress"
|
||||
MOCK_JOB_NAME_COMPLETE = "MockJobComplete"
|
||||
MOCK_JOB_NAME_DOES_NOT_EXIST = "MockJobDoesNotExist"
|
||||
|
||||
ACTION_WAS_SUCCESSFUL = "Action was successful."
|
||||
|
||||
MOCK_SUCCESSFUL_HTTP_RESPONSE = {
|
||||
"ret": True,
|
||||
"data": {}
|
||||
}
|
||||
|
||||
MOCK_404_RESPONSE = {
|
||||
"ret": False,
|
||||
"status": 404
|
||||
}
|
||||
|
||||
MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = {
|
||||
"ret": True,
|
||||
"data": {
|
||||
"Self": "https://openflex-data24-usalp02120qo0012-iomb:443/Storage/Devices/openflex-data24-usalp02120qo0012/Jobs/FirmwareUpdate/",
|
||||
"ID": MOCK_JOB_NAME_IN_PROGRESS,
|
||||
"PercentComplete": 10,
|
||||
"Status": {
|
||||
"State": {
|
||||
"ID": 16,
|
||||
"Name": "In service"
|
||||
},
|
||||
"Health": [
|
||||
{
|
||||
"ID": 5,
|
||||
"Name": "OK"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MOCK_HTTP_RESPONSE_JOB_COMPLETE = {
|
||||
"ret": True,
|
||||
"data": {
|
||||
"Self": "https://openflex-data24-usalp02120qo0012-iomb:443/Storage/Devices/openflex-data24-usalp02120qo0012/Jobs/FirmwareUpdate/",
|
||||
"ID": MOCK_JOB_NAME_COMPLETE,
|
||||
"PercentComplete": 100,
|
||||
"Status": {
|
||||
"State": {
|
||||
"ID": 65540,
|
||||
"Name": "Activate needed"
|
||||
},
|
||||
"Health": [
|
||||
{
|
||||
"ID": 5,
|
||||
"Name": "OK"
|
||||
}
|
||||
],
|
||||
"Details": [
|
||||
"Completed."
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_bin_path(self, arg, required=False):
|
||||
"""Mock AnsibleModule.get_bin_path"""
|
||||
return arg
|
||||
|
||||
|
||||
def get_exception_message(ansible_exit_json):
|
||||
"""From an AnsibleExitJson exception, get the message string."""
|
||||
return ansible_exit_json.exception.args[0]["msg"]
|
||||
|
||||
|
||||
def mock_get_request(*args, **kwargs):
|
||||
"""Mock for get_request."""
|
||||
url = args[1]
|
||||
if url == "https://" + MOCK_BASE_URI:
|
||||
return MOCK_SUCCESSFUL_HTTP_RESPONSE
|
||||
elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_IN_PROGRESS:
|
||||
return MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS
|
||||
elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_COMPLETE:
|
||||
return MOCK_HTTP_RESPONSE_JOB_COMPLETE
|
||||
elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_DOES_NOT_EXIST:
|
||||
return MOCK_404_RESPONSE
|
||||
else:
|
||||
raise RuntimeError("Illegal GET call to: " + args[1])
|
||||
|
||||
|
||||
def mock_put_request(*args, **kwargs):
|
||||
"""Mock put_request. PUT should never happen so it will raise an error."""
|
||||
raise RuntimeError("Illegal PUT call to: " + args[1])
|
||||
|
||||
|
||||
def mock_delete_request(*args, **kwargs):
|
||||
"""Mock delete request. DELETE should never happen so it will raise an error."""
|
||||
raise RuntimeError("Illegal DELETE call to: " + args[1])
|
||||
|
||||
|
||||
def mock_post_request(*args, **kwargs):
|
||||
"""Mock post_request. POST should never happen so it will raise an error."""
|
||||
raise RuntimeError("Illegal POST call to: " + args[1])
|
||||
|
||||
|
||||
class TestOcapiInfo(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
|
||||
exit_json=exit_json,
|
||||
fail_json=fail_json,
|
||||
get_bin_path=get_bin_path)
|
||||
self.mock_module_helper.start()
|
||||
self.addCleanup(self.mock_module_helper.stop)
|
||||
|
||||
def test_module_fail_when_required_args_missing(self):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({})
|
||||
module.main()
|
||||
self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_module_fail_when_unknown_category(self):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'unknown',
|
||||
'command': 'JobStatus',
|
||||
'username': 'USERID',
|
||||
'password': 'PASSW0RD=21',
|
||||
'baseuri': MOCK_BASE_URI
|
||||
})
|
||||
module.main()
|
||||
self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_module_fail_when_unknown_command(self):
|
||||
with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'unknown',
|
||||
'username': 'USERID',
|
||||
'password': 'PASSW0RD=21',
|
||||
'baseuri': MOCK_BASE_URI
|
||||
})
|
||||
module.main()
|
||||
self.assertIn("Invalid Command 'unknown", get_exception_message(ansible_fail_json))
|
||||
|
||||
def test_job_status_in_progress(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
delete_request=mock_delete_request,
|
||||
post_request=mock_post_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'JobStatus',
|
||||
'job_name': MOCK_JOB_NAME_IN_PROGRESS,
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
response_data = ansible_exit_json.exception.args[0]
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["PercentComplete"], response_data["percentComplete"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["ID"], response_data["operationStatusId"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["Name"], response_data["operationStatus"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"])
|
||||
self.assertTrue(response_data["jobExists"])
|
||||
self.assertFalse(response_data["changed"])
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
|
||||
self.assertIsNone(response_data["details"])
|
||||
|
||||
def test_job_status_complete(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
delete_request=mock_delete_request,
|
||||
post_request=mock_post_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'JobStatus',
|
||||
'job_name': MOCK_JOB_NAME_COMPLETE,
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
response_data = ansible_exit_json.exception.args[0]
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["PercentComplete"], response_data["percentComplete"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["ID"], response_data["operationStatusId"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["Name"], response_data["operationStatus"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"])
|
||||
self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"])
|
||||
self.assertTrue(response_data["jobExists"])
|
||||
self.assertFalse(response_data["changed"])
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
|
||||
self.assertEqual(["Completed."], response_data["details"])
|
||||
|
||||
def test_job_status_not_found(self):
|
||||
with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
|
||||
get_request=mock_get_request,
|
||||
put_request=mock_put_request,
|
||||
delete_request=mock_delete_request,
|
||||
post_request=mock_post_request):
|
||||
with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
|
||||
set_module_args({
|
||||
'category': 'Jobs',
|
||||
'command': 'JobStatus',
|
||||
'job_name': MOCK_JOB_NAME_DOES_NOT_EXIST,
|
||||
'baseuri': MOCK_BASE_URI,
|
||||
'username': 'USERID',
|
||||
'password': 'PASSWORD=21'
|
||||
})
|
||||
module.main()
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
|
||||
response_data = ansible_exit_json.exception.args[0]
|
||||
self.assertFalse(response_data["jobExists"])
|
||||
self.assertEqual(0, response_data["percentComplete"])
|
||||
self.assertEqual(1, response_data["operationStatusId"])
|
||||
self.assertEqual("Not Available", response_data["operationStatus"])
|
||||
self.assertIsNone(response_data["operationHealth"])
|
||||
self.assertIsNone(response_data["operationHealthId"])
|
||||
self.assertFalse(response_data["changed"])
|
||||
self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
|
||||
self.assertEqual("Job does not exist.", response_data["details"])
|
||||
62
tests/unit/plugins/modules/test_one_vm.py
Normal file
62
tests/unit/plugins/modules/test_one_vm.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2023, Michal Opala <mopala@opennebula.io>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from ansible_collections.community.general.plugins.modules.one_vm import parse_updateconf
|
||||
|
||||
|
||||
PARSE_UPDATECONF_VALID = [
|
||||
(
|
||||
{
|
||||
"CPU": 1,
|
||||
"OS": {"ARCH": 2},
|
||||
},
|
||||
{
|
||||
"OS": {"ARCH": 2},
|
||||
}
|
||||
),
|
||||
(
|
||||
{
|
||||
"OS": {"ARCH": 1, "ASD": 2}, # "ASD" is an invalid attribute, we ignore it
|
||||
},
|
||||
{
|
||||
"OS": {"ARCH": 1},
|
||||
}
|
||||
),
|
||||
(
|
||||
{
|
||||
"OS": {"ASD": 1}, # "ASD" is an invalid attribute, we ignore it
|
||||
},
|
||||
{
|
||||
}
|
||||
),
|
||||
(
|
||||
{
|
||||
"MEMORY": 1,
|
||||
"CONTEXT": {
|
||||
"PASSWORD": 2,
|
||||
"SSH_PUBLIC_KEY": 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CONTEXT": {
|
||||
"PASSWORD": 2,
|
||||
"SSH_PUBLIC_KEY": 3,
|
||||
},
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('vm_template,expected_result', PARSE_UPDATECONF_VALID)
|
||||
def test_parse_updateconf(vm_template, expected_result):
|
||||
result = parse_updateconf(vm_template)
|
||||
assert result == expected_result, repr(result)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user