mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-01 02:43:16 +00:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f17b10bfa2 | ||
|
|
258eb68022 | ||
|
|
264c98189c | ||
|
|
7aec01190a | ||
|
|
00fd2847e4 | ||
|
|
94ea18f1cb | ||
|
|
0b42aca72f | ||
|
|
2658bf31cd | ||
|
|
869e1a1eab | ||
|
|
d25b6e7681 | ||
|
|
8beb5d70c5 | ||
|
|
f9fecf12e7 | ||
|
|
b165337bbe | ||
|
|
6572f46998 | ||
|
|
b4ae2ce44d | ||
|
|
baec510c40 | ||
|
|
96cda3a48a | ||
|
|
9dc2e2d032 | ||
|
|
86c0af6cbb |
@@ -73,6 +73,19 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.14/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
@@ -129,6 +142,18 @@ stages:
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- test: '3.11'
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.9
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
@@ -139,9 +164,7 @@ stages:
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
@@ -152,7 +175,6 @@ stages:
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.5
|
||||
- test: 3.8
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
@@ -163,11 +185,8 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.9
|
||||
|
||||
## Remote
|
||||
- stage: Remote_devel
|
||||
@@ -192,6 +211,22 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/{0}
|
||||
targets:
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
@@ -238,8 +273,6 @@ stages:
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
#- name: FreeBSD 12.2
|
||||
# test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -270,6 +303,20 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/linux/{0}
|
||||
targets:
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
@@ -356,6 +403,16 @@ stages:
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.11'
|
||||
- stage: Cloud_2_14
|
||||
displayName: Cloud 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/cloud/{0}/1
|
||||
targets:
|
||||
- test: '3.10'
|
||||
- stage: Cloud_2_13
|
||||
displayName: Cloud 2.13
|
||||
@@ -396,22 +453,27 @@ stages:
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Sanity_2_14
|
||||
- Units_devel
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Units_2_14
|
||||
- Remote_devel
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Remote_2_14
|
||||
- Docker_devel
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_2_14
|
||||
- Docker_community_devel
|
||||
- Cloud_devel
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
- Cloud_2_13
|
||||
- Cloud_2_14
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
4
.github/BOTMETA.yml
vendored
4
.github/BOTMETA.yml
vendored
@@ -378,6 +378,8 @@ files:
|
||||
$modules/cloud/misc/proxmox_template.py:
|
||||
maintainers: UnderGreen
|
||||
ignore: skvidal
|
||||
$modules/cloud/misc/proxmox_disk.py:
|
||||
maintainers: castorsky
|
||||
$modules/cloud/misc/rhevm.py:
|
||||
maintainers: $team_virt TimothyVandenbrande
|
||||
labels: rhevm virt
|
||||
@@ -587,6 +589,8 @@ files:
|
||||
maintainers: Gaetan2907
|
||||
$modules/identity/keycloak/keycloak_client_rolemapping.py:
|
||||
maintainers: Gaetan2907
|
||||
$modules/identity/keycloak/keycloak_user_rolemapping.py:
|
||||
maintainers: bratwurzt
|
||||
$modules/identity/keycloak/keycloak_group.py:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||
|
||||
@@ -6,6 +6,59 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 4.0.0.
|
||||
|
||||
v5.7.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- bitwarden lookup plugin - add option ``search`` to search for other attributes than name (https://github.com/ansible-collections/community.general/pull/5297).
|
||||
- machinectl become plugin - combine the success command when building the become command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
|
||||
- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
|
||||
- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- proxmox_snap - add ``unbind`` param to support snapshotting containers with configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- redfish_config - add ``SetSessionService`` to set default session timeout policy (https://github.com/ansible-collections/community.general/issues/5008).
|
||||
- terraform - adds capability to handle complex variable structures for ``variables`` parameter in the module. This must be enabled with the new ``complex_vars`` parameter (https://github.com/ansible-collections/community.general/pull/4797).
|
||||
- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout of the task (https://github.com/ansible-collections/community.general/pull/5147).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- lxc_container - the module will no longer make any effort to support Python 2 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
|
||||
- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
|
||||
- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
- nmcli - fix error when setting previously unset MAC address, ``gsm.apn`` or ``vpn.data``: current values were being normalized without checking if they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_disk - Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_user_rolemapping - Allows administration of Keycloak user_rolemapping with the Keycloak API
|
||||
|
||||
v5.6.0
|
||||
======
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
|
||||
|
||||
Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
|
||||
|
||||
|
||||
@@ -1160,3 +1160,57 @@ releases:
|
||||
name: pipx_info
|
||||
namespace: packaging.language
|
||||
release_date: '2022-09-13'
|
||||
5.7.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
|
||||
- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
|
||||
- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
- 'nmcli - fix error when setting previously unset MAC address, ``gsm.apn``
|
||||
or ``vpn.data``: current values were being normalized without checking if
|
||||
they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).'
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
deprecated_features:
|
||||
- lxc_container - the module will no longer make any effort to support Python
|
||||
2 (https://github.com/ansible-collections/community.general/pull/5304).
|
||||
minor_changes:
|
||||
- bitwarden lookup plugin - add option ``search`` to search for other attributes
|
||||
than name (https://github.com/ansible-collections/community.general/pull/5297).
|
||||
- machinectl become plugin - combine the success command when building the become
|
||||
command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
|
||||
- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
|
||||
- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to
|
||||
standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- proxmox_snap - add ``unbind`` param to support snapshotting containers with
|
||||
configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
|
||||
- redfish_config - add ``SetSessionService`` to set default session timeout
|
||||
policy (https://github.com/ansible-collections/community.general/issues/5008).
|
||||
- terraform - adds capability to handle complex variable structures for ``variables``
|
||||
parameter in the module. This must be enabled with the new ``complex_vars``
|
||||
parameter (https://github.com/ansible-collections/community.general/pull/4797).
|
||||
- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout
|
||||
of the task (https://github.com/ansible-collections/community.general/pull/5147).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 4797-terraform-complex-variables.yml
|
||||
- 5.7.0.yml
|
||||
- 5008-addSetSessionService.yml
|
||||
- 5147-terraform-init-no-color.yml
|
||||
- 5274-proxmox-snap-container-with-mountpoints.yml
|
||||
- 5280-lxc_container-py3.yaml
|
||||
- 5282-locale_gen.yaml
|
||||
- 5287-machinectl-become-success.yml
|
||||
- 5291-fix-nmcli-error-when-setting-unset-mac-address.yaml
|
||||
- 5297-bitwarden-add-search-field.yml
|
||||
- 5301-netcup_dnsapi-timeout.yml
|
||||
- 5307-ini_file-lint.yaml
|
||||
- 5313-fix-redhat_subscription-idempotency-pool_ids.yml
|
||||
modules:
|
||||
- description: Allows administration of Keycloak user_rolemapping with the Keycloak
|
||||
API
|
||||
name: keycloak_user_rolemapping
|
||||
namespace: identity.keycloak
|
||||
- description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
name: proxmox_disk
|
||||
namespace: cloud.misc
|
||||
release_date: '2022-10-04'
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace: community
|
||||
name: general
|
||||
version: 5.6.0
|
||||
version: 5.7.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -612,6 +612,8 @@ plugin_routing:
|
||||
redirect: community.general.identity.keycloak.keycloak_role
|
||||
keycloak_user_federation:
|
||||
redirect: community.general.identity.keycloak.keycloak_user_federation
|
||||
keycloak_user_rolemapping:
|
||||
redirect: community.general.identity.keycloak.keycloak_user_rolemapping
|
||||
keyring:
|
||||
redirect: community.general.system.keyring
|
||||
keyring_info:
|
||||
@@ -1213,6 +1215,8 @@ plugin_routing:
|
||||
redirect: community.general.cloud.profitbricks.profitbricks_volume_attachments
|
||||
proxmox:
|
||||
redirect: community.general.cloud.misc.proxmox
|
||||
proxmox_disk:
|
||||
redirect: community.general.cloud.misc.proxmox_disk
|
||||
proxmox_domain_info:
|
||||
redirect: community.general.cloud.misc.proxmox_domain_info
|
||||
proxmox_group_info:
|
||||
|
||||
@@ -117,7 +117,7 @@ class BecomeModule(BecomeBase):
|
||||
|
||||
flags = self.get_option('become_flags')
|
||||
user = self.get_option('become_user')
|
||||
return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
|
||||
return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell))
|
||||
|
||||
def check_success(self, b_output):
|
||||
b_output = self.remove_ansi_codes(b_output)
|
||||
|
||||
@@ -22,6 +22,11 @@ DOCUMENTATION = """
|
||||
required: true
|
||||
type: list
|
||||
elements: str
|
||||
search:
|
||||
description: Field to retrieve, for example C(name) or C(id).
|
||||
type: str
|
||||
default: name
|
||||
version_added: 5.7.0
|
||||
field:
|
||||
description: Field to fetch; leave unset to fetch whole response.
|
||||
type: str
|
||||
@@ -33,6 +38,11 @@ EXAMPLES = """
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
|
||||
|
||||
- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
|
||||
|
||||
- name: "Get full Bitwarden record named 'a_test'"
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
@@ -81,7 +91,7 @@ class Bitwarden(object):
|
||||
raise BitwardenException(err)
|
||||
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
|
||||
|
||||
def _get_matches(self, search_value, search_field="name"):
|
||||
def _get_matches(self, search_value, search_field):
|
||||
"""Return matching records whose search_field is equal to key.
|
||||
"""
|
||||
out, err = self._run(['list', 'items', '--search', search_value])
|
||||
@@ -97,7 +107,7 @@ class Bitwarden(object):
|
||||
|
||||
If field is None, return the whole record for each match.
|
||||
"""
|
||||
matches = self._get_matches(search_value)
|
||||
matches = self._get_matches(search_value, search_field)
|
||||
|
||||
if field:
|
||||
return [match['login'][field] for match in matches]
|
||||
@@ -110,10 +120,11 @@ class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
field = self.get_option('field')
|
||||
search_field = self.get_option('search')
|
||||
if not _bitwarden.logged_in:
|
||||
raise AnsibleError("Not logged into Bitwarden. Run 'bw login'.")
|
||||
|
||||
return [_bitwarden.get_field(field, term) for term in terms]
|
||||
return [_bitwarden.get_field(field, term, search_field) for term in terms]
|
||||
|
||||
|
||||
_bitwarden = Bitwarden()
|
||||
|
||||
@@ -29,8 +29,15 @@ URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{nam
|
||||
|
||||
URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
|
||||
URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}"
|
||||
URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm"
|
||||
URL_REALM_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/available"
|
||||
URL_REALM_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/composite"
|
||||
URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites"
|
||||
|
||||
URL_ROLES_BY_ID = "{url}/admin/realms/{realm}/roles-by-id/{id}"
|
||||
URL_ROLES_BY_ID_COMPOSITES_CLIENTS = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites/clients/{cid}"
|
||||
URL_ROLES_BY_ID_COMPOSITES = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites"
|
||||
|
||||
URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
|
||||
URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
|
||||
URL_GROUPS = "{url}/admin/realms/{realm}/groups"
|
||||
@@ -41,9 +48,15 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
|
||||
URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
|
||||
|
||||
URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
|
||||
|
||||
URL_USERS = "{url}/admin/realms/{realm}/users"
|
||||
URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available"
|
||||
URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite"
|
||||
|
||||
URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
|
||||
URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}"
|
||||
@@ -446,10 +459,9 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s"
|
||||
% (cid, realm, str(e)))
|
||||
|
||||
def get_client_role_by_name(self, gid, cid, name, realm="master"):
|
||||
def get_client_role_id_by_name(self, cid, name, realm="master"):
|
||||
""" Get the role ID of a client.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param name: Name of the role.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
@@ -461,7 +473,7 @@ class KeycloakAPI(object):
|
||||
return role['id']
|
||||
return None
|
||||
|
||||
def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'):
|
||||
def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'):
|
||||
""" Obtain client representation by id
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -470,7 +482,7 @@ class KeycloakAPI(object):
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
@@ -483,7 +495,7 @@ class KeycloakAPI(object):
|
||||
% (cid, gid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_client_available_rolemappings(self, gid, cid, realm="master"):
|
||||
def get_client_group_available_rolemappings(self, gid, cid, realm="master"):
|
||||
""" Fetch the available role of a client in a specified goup on the Keycloak server.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -491,7 +503,7 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
@@ -500,7 +512,7 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_client_composite_rolemappings(self, gid, cid, realm="master"):
|
||||
def get_client_group_composite_rolemappings(self, gid, cid, realm="master"):
|
||||
""" Fetch the composite role of a client in a specified group on the Keycloak server.
|
||||
|
||||
:param gid: ID of the group from which to obtain the rolemappings.
|
||||
@@ -508,15 +520,64 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_role_by_id(self, rid, realm="master"):
|
||||
""" Fetch a role by its id on the Keycloak server.
|
||||
|
||||
:param rid: ID of the role.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The role.
|
||||
"""
|
||||
client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch role for id %s in realm %s: %s"
|
||||
% (rid, realm, str(e)))
|
||||
|
||||
def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"):
|
||||
""" Fetch a role by its id on the Keycloak server.
|
||||
|
||||
:param rid: ID of the composite role.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The role.
|
||||
"""
|
||||
client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch role for id %s and cid %s in realm %s: %s"
|
||||
% (rid, cid, realm, str(e)))
|
||||
|
||||
def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"):
|
||||
""" Assign roles to composite role
|
||||
|
||||
:param rid: ID of the composite role.
|
||||
:param roles_rep: Representation of the roles to assign.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not assign roles to composite role %s and realm %s: %s"
|
||||
% (rid, realm, str(e)))
|
||||
|
||||
def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
|
||||
""" Fetch the composite role of a client in a specified goup on the Keycloak server.
|
||||
|
||||
@@ -526,7 +587,7 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
@@ -543,7 +604,7 @@ class KeycloakAPI(object):
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
|
||||
try:
|
||||
open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders,
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
@@ -551,6 +612,206 @@ class KeycloakAPI(object):
|
||||
self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
|
||||
% (cid, gid, realm, str(e)))
|
||||
|
||||
def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'):
|
||||
""" Obtain client representation by id
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param rid: ID of the role.
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for role in rolemappings:
|
||||
if rid == role['id']:
|
||||
return role
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
|
||||
% (cid, uid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_client_user_available_rolemappings(self, uid, cid, realm="master"):
|
||||
""" Fetch the available role of a client for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The effective rollemappings of specified client and user of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
|
||||
% (cid, uid, realm, str(e)))
|
||||
|
||||
def get_client_user_composite_rolemappings(self, uid, cid, realm="master"):
|
||||
""" Fetch the composite role of a client for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param cid: ID of the client from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'):
|
||||
""" Obtain role representation by id
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param rid: ID of the role.
|
||||
:param realm: client from this realm
|
||||
:return: dict of rolemapping representation or None if none matching exist
|
||||
"""
|
||||
rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
for role in rolemappings:
|
||||
if rid == role['id']:
|
||||
return role
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch rolemappings for user %s, realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
return None
|
||||
|
||||
def get_realm_user_available_rolemappings(self, uid, realm="master"):
|
||||
""" Fetch the available role of a realm for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The rollemappings of specified group and client of the realm (default "master").
|
||||
"""
|
||||
available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_realm_user_composite_rolemappings(self, uid, realm="master"):
|
||||
""" Fetch the composite role of a realm for a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to obtain the rolemappings.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: The effective rollemappings of specified client and user of the realm (default "master").
|
||||
"""
|
||||
composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
|
||||
% (uid, realm, str(e)))
|
||||
|
||||
def get_user_by_username(self, username, realm="master"):
|
||||
""" Fetch a keycloak user within a realm based on its username.
|
||||
|
||||
If the user does not exist, None is returned.
|
||||
:param username: Username of the user to fetch.
|
||||
:param realm: Realm in which the user resides; default 'master'
|
||||
"""
|
||||
users_url = URL_USERS.format(url=self.baseurl, realm=realm)
|
||||
users_url += '?username=%s&exact=true' % username
|
||||
try:
|
||||
return json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain the user for realm %s and username %s: %s'
|
||||
% (realm, username, str(e)))
|
||||
|
||||
def get_service_account_user_by_client_id(self, client_id, realm="master"):
|
||||
""" Fetch a keycloak service account user within a realm based on its client_id.
|
||||
|
||||
If the user does not exist, None is returned.
|
||||
:param client_id: clientId of the service account user to fetch.
|
||||
:param realm: Realm in which the user resides; default 'master'
|
||||
"""
|
||||
cid = self.get_client_id(client_id, realm=realm)
|
||||
|
||||
service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
|
||||
try:
|
||||
return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
|
||||
% (realm, client_id, str(e)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
|
||||
% (realm, client_id, str(e)))
|
||||
|
||||
def add_user_rolemapping(self, uid, cid, role_rep, realm="master"):
|
||||
""" Assign a realm or client role to a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user roles are assigned to.
|
||||
:param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm
|
||||
:param role_rep: Representation of the role to assign.
|
||||
:param realm: Realm from which to obtain the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
if cid is None:
|
||||
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not map roles to userId %s for realm %s and roles %s: %s"
|
||||
% (uid, realm, json.dumps(role_rep), str(e)))
|
||||
else:
|
||||
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
|
||||
% (cid, uid, realm, json.dumps(role_rep), str(e)))
|
||||
|
||||
def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"):
|
||||
""" Delete the rolemapping of a client in a specified user on the Keycloak server.
|
||||
|
||||
:param uid: ID of the user from which to remove the rolemappings.
|
||||
:param cid: ID of the client from which to remove the rolemappings.
|
||||
:param role_rep: Representation of the role to remove from rolemappings.
|
||||
:param realm: Realm from which to remove the rolemappings.
|
||||
:return: None.
|
||||
"""
|
||||
if cid is None:
|
||||
user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
|
||||
try:
|
||||
open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not remove roles %s from userId %s, realm %s: %s"
|
||||
% (json.dumps(role_rep), uid, realm, str(e)))
|
||||
else:
|
||||
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
|
||||
try:
|
||||
open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
|
||||
validate_certs=self.validate_certs, timeout=self.connection_timeout)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
|
||||
% (json.dumps(role_rep), cid, uid, realm, str(e)))
|
||||
|
||||
def get_client_templates(self, realm='master'):
|
||||
""" Obtains client template representations for client templates in a realm
|
||||
|
||||
@@ -930,7 +1191,6 @@ class KeycloakAPI(object):
|
||||
return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
|
||||
timeout=self.connection_timeout,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
|
||||
|
||||
|
||||
|
||||
@@ -137,3 +137,7 @@ class ProxmoxAnsible(object):
|
||||
return None
|
||||
|
||||
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
||||
|
||||
def api_task_ok(self, node, taskid):
|
||||
status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
|
||||
|
||||
@@ -240,6 +240,7 @@ class RedfishUtils(object):
|
||||
return {'ret': False, 'msg': "SessionService resource not found"}
|
||||
else:
|
||||
session_service = data["SessionService"]["@odata.id"]
|
||||
self.session_service_uri = session_service
|
||||
response = self.get_request(self.root_uri + session_service)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
@@ -3081,3 +3082,60 @@ class RedfishUtils(object):
|
||||
|
||||
def get_multi_manager_inventory(self):
|
||||
return self.aggregate_managers(self.get_manager_inventory)
|
||||
|
||||
def set_session_service(self, sessions_config):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + self.session_service_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
current_sessions_config = response['data']
|
||||
payload = {}
|
||||
for property, value in sessions_config.items():
|
||||
value = sessions_config[property]
|
||||
if property not in current_sessions_config:
|
||||
return {'ret': False, 'msg': "Property %s in sessions_config is invalid" % property}
|
||||
if isinstance(value, dict):
|
||||
if isinstance(current_sessions_config[property], dict):
|
||||
payload[property] = value
|
||||
elif isinstance(current_sessions_config[property], list):
|
||||
payload[property] = [value]
|
||||
else:
|
||||
return {'ret': False, 'msg': "Value of property %s in sessions_config is invalid" % property}
|
||||
else:
|
||||
payload[property] = value
|
||||
|
||||
need_change = False
|
||||
for property, set_value in payload.items():
|
||||
cur_value = current_sessions_config[property]
|
||||
if not isinstance(set_value, (dict, list)):
|
||||
if set_value != cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, dict):
|
||||
for subprop in set_value.keys():
|
||||
if subprop not in current_sessions_config[property]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = set_value[subprop]
|
||||
sub_cur_value = current_sessions_config[property][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in set_value[i].keys():
|
||||
if subprop not in current_sessions_config[property][i]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = set_value[i][subprop]
|
||||
sub_cur_value = current_sessions_config[property][i][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if not need_change:
|
||||
return {'ret': True, 'changed': False, 'msg': "SessionService already configured"}
|
||||
|
||||
response = self.patch_request(self.root_uri + self.session_service_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified SessionService"}
|
||||
|
||||
@@ -164,9 +164,9 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
requirements:
|
||||
- 'lxc >= 1.0 # OS package'
|
||||
- 'python >= 2.6 # OS Package'
|
||||
- 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
|
||||
- 'lxc >= 2.0 # OS package'
|
||||
- 'python3 >= 3.5 # OS Package'
|
||||
- 'python3-lxc # OS Package'
|
||||
notes:
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
with a name that already exists in the users namespace the module will
|
||||
@@ -184,10 +184,10 @@ notes:
|
||||
tarball of the running container. The "archive" option supports LVM backed
|
||||
containers and will create a snapshot of the running container when
|
||||
creating the archive.
|
||||
- If your distro does not have a package for "python2-lxc", which is a
|
||||
- If your distro does not have a package for C(python3-lxc), which is a
|
||||
requirement for this module, it can be installed from source at
|
||||
"https://github.com/lxc/python2-lxc" or installed via pip using the package
|
||||
name lxc-python2.
|
||||
U(https://github.com/lxc/python3-lxc) or installed via pip using the
|
||||
package name C(lxc).
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
@@ -434,7 +434,6 @@ else:
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
from ansible.module_utils.six.moves import xrange
|
||||
from ansible.module_utils.common.text.converters import to_text, to_bytes
|
||||
|
||||
|
||||
@@ -559,7 +558,7 @@ popd
|
||||
def create_script(command):
|
||||
"""Write out a script onto a target.
|
||||
|
||||
This method should be backward compatible with Python 2.4+ when executing
|
||||
This method should be backward compatible with Python when executing
|
||||
from within the container.
|
||||
|
||||
:param command: command to run, this can be a script and can use spacing
|
||||
@@ -939,7 +938,7 @@ class LxcContainerManagement(object):
|
||||
"""
|
||||
|
||||
self.container = self.get_container_bind()
|
||||
for dummy in xrange(timeout):
|
||||
for dummy in range(timeout):
|
||||
if self._get_state() != 'running':
|
||||
self.container.start()
|
||||
self.state_change = True
|
||||
@@ -992,7 +991,7 @@ class LxcContainerManagement(object):
|
||||
:type timeout: ``int``
|
||||
"""
|
||||
|
||||
for dummy in xrange(timeout):
|
||||
for dummy in range(timeout):
|
||||
if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
|
||||
break
|
||||
|
||||
|
||||
@@ -482,8 +482,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(node, taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -496,8 +495,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
def start_instance(self, vm, vmid, timeout):
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -513,8 +511,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
else:
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -527,8 +524,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||
def umount_instance(self, vm, vmid, timeout):
|
||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
@@ -775,8 +771,7 @@ def main():
|
||||
taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
|
||||
|
||||
while timeout:
|
||||
task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'):
|
||||
if proxmox.api_task_ok(vm['node'], taskid):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
|
||||
744
plugins/modules/cloud/misc/proxmox_disk.py
Normal file
744
plugins/modules/cloud/misc/proxmox_disk.py
Normal file
@@ -0,0 +1,744 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2022, Castor Sky (@castorsky) <csky57@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: proxmox_disk
|
||||
short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
|
||||
version_added: 5.7.0
|
||||
description:
|
||||
- Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
|
||||
author: "Castor Sky (@castorsky) <csky57@gmail.com>"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The unique name of the VM.
|
||||
- You can specify either I(name) or I(vmid) or both of them.
|
||||
type: str
|
||||
vmid:
|
||||
description:
|
||||
- The unique ID of the VM.
|
||||
- You can specify either I(vmid) or I(name) or both of them.
|
||||
type: int
|
||||
disk:
|
||||
description:
|
||||
- The disk key (C(unused[n]), C(ide[n]), C(sata[n]), C(scsi[n]) or C(virtio[n])) you want to operate on.
|
||||
- Disk buses (IDE, SATA and so on) have fixed ranges of C(n) that accepted by Proxmox API.
|
||||
- >
|
||||
For IDE: 0-3;
|
||||
for SCSI: 0-30;
|
||||
for SATA: 0-5;
|
||||
for VirtIO: 0-15;
|
||||
for Unused: 0-255.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the disk.
|
||||
- >
|
||||
I(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
|
||||
disk or update options in existing one by default. See the I(create) parameter description to control behavior
|
||||
of this option.
|
||||
- Some updates on options (like I(cache)) are not being applied instantly and require VM restart.
|
||||
- >
|
||||
Use I(state=detached) to detach existing disk from VM but do not remove it entirely.
|
||||
When I(state=detached) and disk is C(unused[n]) it will be left in same state (not removed).
|
||||
- >
|
||||
I(state=moved) may be used to change backing storage for the disk in bounds of the same VM
|
||||
or to send the disk to another VM (using the same backing storage).
|
||||
- >
|
||||
I(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
|
||||
because shrinking disks is not supported by the PVE API and has to be done manually.
|
||||
- To entirely remove the disk from backing storage use I(state=absent).
|
||||
type: str
|
||||
choices: ['present', 'resized', 'detached', 'moved', 'absent']
|
||||
default: present
|
||||
create:
|
||||
description:
|
||||
- With I(create) flag you can control behavior of I(state=present).
|
||||
- When I(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
|
||||
- When I(create=regular) it will either create new disk (if not exists) or update options in existing disk.
|
||||
- When I(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
|
||||
type: str
|
||||
choices: ['disabled', 'regular', 'forced']
|
||||
default: regular
|
||||
storage:
|
||||
description:
|
||||
- The drive's backing storage.
|
||||
- Used only when I(state) is C(present).
|
||||
type: str
|
||||
size:
|
||||
description:
|
||||
- Desired volume size in GB to allocate when I(state=present) (specify I(size) without suffix).
|
||||
- >
|
||||
New (or additional) size of volume when I(state=resized). With the C(+) sign
|
||||
the value is added to the actual size of the volume
|
||||
and without it, the value is taken as an absolute one.
|
||||
type: str
|
||||
bwlimit:
|
||||
description:
|
||||
- Override I/O bandwidth limit (in KB/s).
|
||||
- Used only when I(state=moved).
|
||||
type: int
|
||||
delete_moved:
|
||||
description:
|
||||
- Delete the original disk after successful copy.
|
||||
- By default the original disk is kept as unused disk.
|
||||
- Used only when I(state=moved).
|
||||
type: bool
|
||||
target_disk:
|
||||
description:
|
||||
- The config key the disk will be moved to on the target VM (for example, C(ide0) or C(scsi1)).
|
||||
- Default is the source disk key.
|
||||
- Used only when I(state=moved).
|
||||
type: str
|
||||
target_storage:
|
||||
description:
|
||||
- Move the disk to this storage when I(state=moved).
|
||||
- You can move between storages only in scope of one VM.
|
||||
- Mutually exclusive with I(target_vmid).
|
||||
type: str
|
||||
target_vmid:
|
||||
description:
|
||||
- The (unique) ID of the VM where disk will be placed when I(state=moved).
|
||||
- You can move disk between VMs only when the same storage is used.
|
||||
- Mutually exclusive with I(target_vmid).
|
||||
type: int
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds to wait when moving disk.
|
||||
- Used only when I(state=moved).
|
||||
type: int
|
||||
default: 600
|
||||
aio:
|
||||
description:
|
||||
- AIO type to use.
|
||||
type: str
|
||||
choices: ['native', 'threads', 'io_uring']
|
||||
backup:
|
||||
description:
|
||||
- Whether the drive should be included when making backups.
|
||||
type: bool
|
||||
bps_max_length:
|
||||
description:
|
||||
- Maximum length of total r/w I/O bursts in seconds.
|
||||
type: int
|
||||
bps_rd_max_length:
|
||||
description:
|
||||
- Maximum length of read I/O bursts in seconds.
|
||||
type: int
|
||||
bps_wr_max_length:
|
||||
description:
|
||||
- Maximum length of write I/O bursts in seconds.
|
||||
type: int
|
||||
cache:
|
||||
description:
|
||||
- The drive's cache mode.
|
||||
type: str
|
||||
choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync']
|
||||
cyls:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific cylinder count.
|
||||
type: int
|
||||
detect_zeroes:
|
||||
description:
|
||||
- Control whether to detect and try to optimize writes of zeroes.
|
||||
type: bool
|
||||
discard:
|
||||
description:
|
||||
- Control whether to pass discard/trim requests to the underlying storage.
|
||||
type: str
|
||||
choices: ['ignore', 'on']
|
||||
format:
|
||||
description:
|
||||
- The drive's backing file's data format.
|
||||
type: str
|
||||
choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']
|
||||
heads:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific head count.
|
||||
type: int
|
||||
import_from:
|
||||
description:
|
||||
- Import volume from this existing one.
|
||||
- Volume string format
|
||||
- C(<STORAGE>:<VMID>/<FULL_NAME>) or C(<ABSOLUTE_PATH>/<FULL_NAME>)
|
||||
- Attention! Only root can use absolute paths.
|
||||
- This parameter is mutually exclusive with I(size).
|
||||
type: str
|
||||
iops:
|
||||
description:
|
||||
- Maximum total r/w I/O in operations per second.
|
||||
- You can specify either total limit or per operation (mutually exclusive with I(iops_rd) and I(iops_wr)).
|
||||
type: int
|
||||
iops_max:
|
||||
description:
|
||||
- Maximum unthrottled total r/w I/O pool in operations per second.
|
||||
type: int
|
||||
iops_max_length:
|
||||
description:
|
||||
- Maximum length of total r/w I/O bursts in seconds.
|
||||
type: int
|
||||
iops_rd:
|
||||
description:
|
||||
- Maximum read I/O in operations per second.
|
||||
- You can specify either read or total limit (mutually exclusive with I(iops)).
|
||||
type: int
|
||||
iops_rd_max:
|
||||
description:
|
||||
- Maximum unthrottled read I/O pool in operations per second.
|
||||
type: int
|
||||
iops_rd_max_length:
|
||||
description:
|
||||
- Maximum length of read I/O bursts in seconds.
|
||||
type: int
|
||||
iops_wr:
|
||||
description:
|
||||
- Maximum write I/O in operations per second.
|
||||
- You can specify either write or total limit (mutually exclusive with I(iops)).
|
||||
type: int
|
||||
iops_wr_max:
|
||||
description:
|
||||
- Maximum unthrottled write I/O pool in operations per second.
|
||||
type: int
|
||||
iops_wr_max_length:
|
||||
description:
|
||||
- Maximum length of write I/O bursts in seconds.
|
||||
type: int
|
||||
iothread:
|
||||
description:
|
||||
- Whether to use iothreads for this drive (only for SCSI and VirtIO)
|
||||
type: bool
|
||||
mbps:
|
||||
description:
|
||||
- Maximum total r/w speed in megabytes per second.
|
||||
- Can be fractional but use with caution - fractionals less than 1 are not supported officially.
|
||||
- You can specify either total limit or per operation (mutually exclusive with I(mbps_rd) and I(mbps_wr)).
|
||||
type: float
|
||||
mbps_max:
|
||||
description:
|
||||
- Maximum unthrottled total r/w pool in megabytes per second.
|
||||
type: float
|
||||
mbps_rd:
|
||||
description:
|
||||
- Maximum read speed in megabytes per second.
|
||||
- You can specify either read or total limit (mutually exclusive with I(mbps)).
|
||||
type: float
|
||||
mbps_rd_max:
|
||||
description:
|
||||
- Maximum unthrottled read pool in megabytes per second.
|
||||
type: float
|
||||
mbps_wr:
|
||||
description:
|
||||
- Maximum write speed in megabytes per second.
|
||||
- You can specify either write or total limit (mutually exclusive with I(mbps)).
|
||||
type: float
|
||||
mbps_wr_max:
|
||||
description:
|
||||
- Maximum unthrottled write pool in megabytes per second.
|
||||
type: float
|
||||
media:
|
||||
description:
|
||||
- The drive's media type.
|
||||
type: str
|
||||
choices: ['cdrom', 'disk']
|
||||
queues:
|
||||
description:
|
||||
- Number of queues (SCSI only).
|
||||
type: int
|
||||
replicate:
|
||||
description:
|
||||
- Whether the drive should considered for replication jobs.
|
||||
type: bool
|
||||
rerror:
|
||||
description:
|
||||
- Read error action.
|
||||
type: str
|
||||
choices: ['ignore', 'report', 'stop']
|
||||
ro:
|
||||
description:
|
||||
- Whether the drive is read-only.
|
||||
type: bool
|
||||
scsiblock:
|
||||
description:
|
||||
- Whether to use scsi-block for full passthrough of host block device.
|
||||
- Can lead to I/O errors in combination with low memory or high memory fragmentation on host.
|
||||
type: bool
|
||||
secs:
|
||||
description:
|
||||
- Force the drive's physical geometry to have a specific sector count.
|
||||
type: int
|
||||
serial:
|
||||
description:
|
||||
- The drive's reported serial number, url-encoded, up to 20 bytes long.
|
||||
type: str
|
||||
shared:
|
||||
description:
|
||||
- Mark this locally-managed volume as available on all nodes.
|
||||
- This option does not share the volume automatically, it assumes it is shared already!
|
||||
type: bool
|
||||
snapshot:
|
||||
description:
|
||||
- Control qemu's snapshot mode feature.
|
||||
- If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown.
|
||||
type: bool
|
||||
ssd:
|
||||
description:
|
||||
- Whether to expose this drive as an SSD, rather than a rotational hard disk.
|
||||
type: bool
|
||||
trans:
|
||||
description:
|
||||
- Force disk geometry bios translation mode.
|
||||
type: str
|
||||
choices: ['auto', 'lba', 'none']
|
||||
werror:
|
||||
description:
|
||||
- Write error action.
|
||||
type: str
|
||||
choices: ['enospc', 'ignore', 'report', 'stop']
|
||||
wwn:
|
||||
description:
|
||||
- The drive's worldwide name, encoded as 16 bytes hex string, prefixed by C(0x).
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create new disk in VM (do not rewrite in case it exists already)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
name: vm-name
|
||||
disk: scsi3
|
||||
backup: true
|
||||
cache: none
|
||||
storage: local-zfs
|
||||
size: 5
|
||||
state: present
|
||||
|
||||
- name: Create new disk in VM (force rewrite in case it exists already)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: scsi3
|
||||
format: qcow2
|
||||
storage: local
|
||||
size: 16
|
||||
create: forced
|
||||
state: present
|
||||
|
||||
- name: Update existing disk
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: ide0
|
||||
backup: false
|
||||
ro: true
|
||||
aio: native
|
||||
state: present
|
||||
|
||||
- name: Grow existing disk
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: sata4
|
||||
size: +5G
|
||||
state: resized
|
||||
|
||||
- name: Detach disk (leave it unused)
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
name: vm-name
|
||||
disk: virtio0
|
||||
state: detached
|
||||
|
||||
- name: Move disk to another storage
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
vmid: 101
|
||||
disk: scsi7
|
||||
target_storage: local
|
||||
format: qcow2
|
||||
state: moved
|
||||
|
||||
- name: Move disk from one VM to another
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_token_id: token1
|
||||
api_token_secret: some-token-data
|
||||
vmid: 101
|
||||
disk: scsi7
|
||||
target_vmid: 201
|
||||
state: moved
|
||||
|
||||
- name: Remove disk permanently
|
||||
community.general.proxmox_disk:
|
||||
api_host: node1
|
||||
api_user: root@pam
|
||||
api_password: secret
|
||||
vmid: 101
|
||||
disk: scsi4
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
vmid:
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 101
|
||||
msg:
|
||||
description: A short message on what the module did.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Disk scsi3 created in VM 101"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec,
|
||||
ProxmoxAnsible)
|
||||
from re import compile, match, sub
|
||||
from time import sleep
|
||||
|
||||
|
||||
def disk_conf_str_to_dict(config_string):
|
||||
config = config_string.split(',')
|
||||
storage_volume = config.pop(0).split(':')
|
||||
config.sort()
|
||||
storage_name = storage_volume[0]
|
||||
volume_name = storage_volume[1]
|
||||
config_current = dict(
|
||||
volume='%s:%s' % (storage_name, volume_name),
|
||||
storage_name=storage_name,
|
||||
volume_name=volume_name
|
||||
)
|
||||
|
||||
for option in config:
|
||||
k, v = option.split('=')
|
||||
config_current[k] = v
|
||||
|
||||
return config_current
|
||||
|
||||
|
||||
class ProxmoxDiskAnsible(ProxmoxAnsible):
|
||||
create_update_fields = [
|
||||
'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length',
|
||||
'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max',
|
||||
'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max',
|
||||
'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max',
|
||||
'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot',
|
||||
'ssd', 'trans', 'werror', 'wwn'
|
||||
]
|
||||
supported_bus_num_ranges = dict(
|
||||
ide=range(0, 4),
|
||||
scsi=range(0, 31),
|
||||
sata=range(0, 6),
|
||||
virtio=range(0, 16),
|
||||
unused=range(0, 256)
|
||||
)
|
||||
|
||||
def get_create_attributes(self):
|
||||
# Sanitize parameters dictionary:
|
||||
# - Remove not defined args
|
||||
# - Ensure True and False converted to int.
|
||||
# - Remove unnecessary parameters
|
||||
params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields)
|
||||
params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool)))
|
||||
return params
|
||||
|
||||
def create_disk(self, disk, vmid, vm, vm_config):
|
||||
create = self.module.params['create']
|
||||
if create == 'disabled' and disk not in vm_config:
|
||||
# NOOP
|
||||
return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid)
|
||||
|
||||
if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
|
||||
# CREATE
|
||||
attributes = self.get_create_attributes()
|
||||
import_string = attributes.pop('import_from', None)
|
||||
|
||||
if import_string:
|
||||
config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
|
||||
else:
|
||||
config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
|
||||
|
||||
for k, v in attributes.items():
|
||||
config_str += ',%s=%s' % (k, v)
|
||||
|
||||
create_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**create_disk)
|
||||
return True, "Disk %s created in VM %s" % (disk, vmid)
|
||||
|
||||
if create in ['disabled', 'regular'] and disk in vm_config:
|
||||
# UPDATE
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
config_str = disk_config["volume"]
|
||||
attributes = self.get_create_attributes()
|
||||
# 'import_from' fails on disk updates
|
||||
attributes.pop('import_from', None)
|
||||
|
||||
for k, v in attributes.items():
|
||||
config_str += ',%s=%s' % (k, v)
|
||||
|
||||
# Now compare old and new config to detect if changes are needed
|
||||
for option in ['size', 'storage_name', 'volume', 'volume_name']:
|
||||
attributes.update({option: disk_config[option]})
|
||||
# Values in params are numbers, but strings are needed to compare with disk_config
|
||||
attributes = dict((k, str(v)) for k, v in attributes.items())
|
||||
if disk_config == attributes:
|
||||
return False, "Disk %s is up to date in VM %s" % (disk, vmid)
|
||||
|
||||
update_disk = {self.module.params["disk"]: config_str}
|
||||
self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**update_disk)
|
||||
return True, "Disk %s updated in VM %s" % (disk, vmid)
|
||||
|
||||
def move_disk(self, disk, vmid, vm, vm_config):
|
||||
params = dict()
|
||||
params['disk'] = disk
|
||||
params['vmid'] = vmid
|
||||
params['bwlimit'] = self.module.params['bwlimit']
|
||||
params['storage'] = self.module.params['target_storage']
|
||||
params['target-disk'] = self.module.params['target_disk']
|
||||
params['target-vmid'] = self.module.params['target_vmid']
|
||||
params['format'] = self.module.params['format']
|
||||
params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0
|
||||
# Remove not defined args
|
||||
params = dict((k, v) for k, v in params.items() if v is not None)
|
||||
|
||||
if params.get('storage', False):
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
if params['storage'] == disk_config['storage_name']:
|
||||
return False
|
||||
|
||||
taskid = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
|
||||
timeout = self.module.params['timeout']
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
return True
|
||||
if timeout <= 0:
|
||||
self.module.fail_json(
|
||||
msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
sleep(1)
|
||||
timeout -= 1
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
disk_args = dict(
|
||||
# Proxmox native parameters
|
||||
aio=dict(type='str', choices=['native', 'threads', 'io_uring']),
|
||||
backup=dict(type='bool'),
|
||||
bps_max_length=dict(type='int'),
|
||||
bps_rd_max_length=dict(type='int'),
|
||||
bps_wr_max_length=dict(type='int'),
|
||||
cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']),
|
||||
cyls=dict(type='int'),
|
||||
detect_zeroes=dict(type='bool'),
|
||||
discard=dict(type='str', choices=['ignore', 'on']),
|
||||
format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']),
|
||||
heads=dict(type='int'),
|
||||
import_from=dict(type='str'),
|
||||
iops=dict(type='int'),
|
||||
iops_max=dict(type='int'),
|
||||
iops_max_length=dict(type='int'),
|
||||
iops_rd=dict(type='int'),
|
||||
iops_rd_max=dict(type='int'),
|
||||
iops_rd_max_length=dict(type='int'),
|
||||
iops_wr=dict(type='int'),
|
||||
iops_wr_max=dict(type='int'),
|
||||
iops_wr_max_length=dict(type='int'),
|
||||
iothread=dict(type='bool'),
|
||||
mbps=dict(type='float'),
|
||||
mbps_max=dict(type='float'),
|
||||
mbps_rd=dict(type='float'),
|
||||
mbps_rd_max=dict(type='float'),
|
||||
mbps_wr=dict(type='float'),
|
||||
mbps_wr_max=dict(type='float'),
|
||||
media=dict(type='str', choices=['cdrom', 'disk']),
|
||||
queues=dict(type='int'),
|
||||
replicate=dict(type='bool'),
|
||||
rerror=dict(type='str', choices=['ignore', 'report', 'stop']),
|
||||
ro=dict(type='bool'),
|
||||
scsiblock=dict(type='bool'),
|
||||
secs=dict(type='int'),
|
||||
serial=dict(type='str'),
|
||||
shared=dict(type='bool'),
|
||||
snapshot=dict(type='bool'),
|
||||
ssd=dict(type='bool'),
|
||||
trans=dict(type='str', choices=['auto', 'lba', 'none']),
|
||||
werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']),
|
||||
wwn=dict(type='str'),
|
||||
|
||||
# Disk moving relates parameters
|
||||
bwlimit=dict(type='int'),
|
||||
target_storage=dict(type='str'),
|
||||
target_disk=dict(type='str'),
|
||||
target_vmid=dict(type='int'),
|
||||
delete_moved=dict(type='bool'),
|
||||
timeout=dict(type='int', default='600'),
|
||||
|
||||
# Module related parameters
|
||||
name=dict(type='str'),
|
||||
vmid=dict(type='int'),
|
||||
disk=dict(type='str', required=True),
|
||||
storage=dict(type='str'),
|
||||
size=dict(type='str'),
|
||||
state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'],
|
||||
default='present'),
|
||||
create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'),
|
||||
)
|
||||
|
||||
module_args.update(disk_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
|
||||
required_if=[
|
||||
('create', 'forced', ['storage']),
|
||||
('state', 'resized', ['size']),
|
||||
],
|
||||
required_by={
|
||||
'target_disk': 'target_vmid',
|
||||
'mbps_max': 'mbps',
|
||||
'mbps_rd_max': 'mbps_rd',
|
||||
'mbps_wr_max': 'mbps_wr',
|
||||
'bps_max_length': 'mbps_max',
|
||||
'bps_rd_max_length': 'mbps_rd_max',
|
||||
'bps_wr_max_length': 'mbps_wr_max',
|
||||
'iops_max': 'iops',
|
||||
'iops_rd_max': 'iops_rd',
|
||||
'iops_wr_max': 'iops_wr',
|
||||
'iops_max_length': 'iops_max',
|
||||
'iops_rd_max_length': 'iops_rd_max',
|
||||
'iops_wr_max_length': 'iops_wr_max',
|
||||
},
|
||||
supports_check_mode=False,
|
||||
mutually_exclusive=[
|
||||
('target_vmid', 'target_storage'),
|
||||
('mbps', 'mbps_rd'),
|
||||
('mbps', 'mbps_wr'),
|
||||
('iops', 'iops_rd'),
|
||||
('iops', 'iops_wr'),
|
||||
('import_from', 'size'),
|
||||
]
|
||||
)
|
||||
|
||||
proxmox = ProxmoxDiskAnsible(module)
|
||||
|
||||
disk = module.params['disk']
|
||||
# Verify disk name has appropriate name
|
||||
disk_regex = compile(r'^([a-z]+)([0-9]+)$')
|
||||
disk_bus = sub(disk_regex, r'\1', disk)
|
||||
disk_number = int(sub(disk_regex, r'\2', disk))
|
||||
if disk_bus not in proxmox.supported_bus_num_ranges:
|
||||
proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus)
|
||||
elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]:
|
||||
bus_range = proxmox.supported_bus_num_ranges[disk_bus]
|
||||
proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1]))
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
vmid = module.params['vmid'] or proxmox.get_vmid(name)
|
||||
|
||||
# Ensure VM id exists and retrieve its config
|
||||
vm = None
|
||||
vm_config = None
|
||||
try:
|
||||
vm = proxmox.get_vm(vmid)
|
||||
vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
|
||||
except Exception as e:
|
||||
proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e)))
|
||||
|
||||
# Do not try to perform actions on missing disk
|
||||
if disk not in vm_config and state in ['resized', 'moved']:
|
||||
module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid))
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
success, message = proxmox.create_disk(disk, vmid, vm, vm_config)
|
||||
if success:
|
||||
module.exit_json(changed=True, vmid=vmid, msg=message)
|
||||
else:
|
||||
module.exit_json(changed=False, vmid=vmid, msg=message)
|
||||
except Exception as e:
|
||||
module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'detached':
|
||||
try:
|
||||
if disk_bus == 'unused':
|
||||
module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid))
|
||||
if disk not in vm_config:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(vmid=vmid, idlist=disk, force=0)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'moved':
|
||||
try:
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
disk_storage = disk_config["storage_name"]
|
||||
if proxmox.move_disk(disk, vmid, vm, vm_config):
|
||||
module.exit_json(changed=True, vmid=vmid,
|
||||
msg="Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage))
|
||||
else:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s already at %s storage" % (disk, disk_storage))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'resized':
|
||||
try:
|
||||
size = module.params['size']
|
||||
if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size):
|
||||
module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size))
|
||||
disk_config = disk_conf_str_to_dict(vm_config[disk])
|
||||
actual_size = disk_config['size']
|
||||
if size == actual_size:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already %s size" % (disk, size))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(vmid=vmid, disk=disk, size=size)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s resized in VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
if disk not in vm_config:
|
||||
module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid))
|
||||
proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(vmid=vmid, idlist=disk, force=1)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -866,8 +866,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||
timeout = self.module.params['timeout']
|
||||
|
||||
while timeout:
|
||||
task = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(node, taskid):
|
||||
# Wait an extra second as the API can be a ahead of the hypervisor
|
||||
time.sleep(1)
|
||||
return True
|
||||
|
||||
@@ -38,6 +38,17 @@ options:
|
||||
- For removal from config file, even if removing disk snapshot fails.
|
||||
default: false
|
||||
type: bool
|
||||
unbind:
|
||||
description:
|
||||
- This option only applies to LXC containers.
|
||||
- Allows to snapshot a container even if it has configured mountpoints.
|
||||
- Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
|
||||
- If running, the container will be stopped and restarted to apply config changes.
|
||||
- Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either.
|
||||
- See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 5.7.0
|
||||
vmstate:
|
||||
description:
|
||||
- Snapshot includes RAM.
|
||||
@@ -78,6 +89,16 @@ EXAMPLES = r'''
|
||||
state: present
|
||||
snapname: pre-updates
|
||||
|
||||
- name: Create new snapshot for a container with configured mountpoints
|
||||
community.general.proxmox_snap:
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
vmid: 100
|
||||
state: present
|
||||
unbind: true # requires root@pam+password auth, API tokens are not supported
|
||||
snapname: pre-updates
|
||||
|
||||
- name: Remove container snapshot
|
||||
community.general.proxmox_snap:
|
||||
api_user: root@pam
|
||||
@@ -110,17 +131,89 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
def snapshot(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
|
||||
|
||||
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate):
|
||||
def vmconfig(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config
|
||||
|
||||
def vmstatus(self, vm, vmid):
|
||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status
|
||||
|
||||
def _container_mp_get(self, vm, vmid):
|
||||
cfg = self.vmconfig(vm, vmid).get()
|
||||
mountpoints = {}
|
||||
for key, value in cfg.items():
|
||||
if key.startswith('mp'):
|
||||
mountpoints[key] = value
|
||||
return mountpoints
|
||||
|
||||
def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||
# shutdown container if running
|
||||
if vmstatus == 'running':
|
||||
self.shutdown_instance(vm, vmid, timeout)
|
||||
# delete all mountpoints configs
|
||||
self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints))
|
||||
|
||||
def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||
# NOTE: requires auth as `root@pam`, API tokens are not supported
|
||||
# see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config
|
||||
# restore original config
|
||||
self.vmconfig(vm, vmid).put(**mountpoints)
|
||||
# start container (if was running before snap)
|
||||
if vmstatus == 'running':
|
||||
self.start_instance(vm, vmid, timeout)
|
||||
|
||||
def start_instance(self, vm, vmid, timeout):
|
||||
taskid = self.vmstatus(vm, vmid).start.post()
|
||||
while timeout:
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def shutdown_instance(self, vm, vmid, timeout):
|
||||
taskid = self.vmstatus(vm, vmid).shutdown.post()
|
||||
while timeout:
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' %
|
||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||
if self.module.check_mode:
|
||||
return True
|
||||
|
||||
if vm['type'] == 'lxc':
|
||||
if unbind is True:
|
||||
# check if credentials will work
|
||||
# WARN: it is crucial this check runs here!
|
||||
# The correct permissions are required only to reconfig mounts.
|
||||
# Not checking now would allow to remove the configuration BUT
|
||||
# fail later, leaving the container in a misconfigured state.
|
||||
if (
|
||||
self.module.params['api_user'] != 'root@pam'
|
||||
or not self.module.params['api_password']
|
||||
):
|
||||
self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.')
|
||||
return False
|
||||
mountpoints = self._container_mp_get(vm, vmid)
|
||||
vmstatus = self.vmstatus(vm, vmid).current().get()['status']
|
||||
if mountpoints:
|
||||
self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
|
||||
else:
|
||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
|
||||
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -128,6 +221,8 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
time.sleep(1)
|
||||
timeout -= 1
|
||||
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||
return False
|
||||
|
||||
def snapshot_remove(self, vm, vmid, timeout, snapname, force):
|
||||
@@ -136,8 +231,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -153,8 +247,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||
|
||||
taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
|
||||
while timeout:
|
||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(vm['node'], taskid):
|
||||
return True
|
||||
if timeout == 0:
|
||||
self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
|
||||
@@ -175,6 +268,7 @@ def main():
|
||||
description=dict(type='str'),
|
||||
snapname=dict(type='str', default='ansible_snap'),
|
||||
force=dict(type='bool', default=False),
|
||||
unbind=dict(type='bool', default=False),
|
||||
vmstate=dict(type='bool', default=False),
|
||||
)
|
||||
module_args.update(snap_args)
|
||||
@@ -193,6 +287,7 @@ def main():
|
||||
snapname = module.params['snapname']
|
||||
timeout = module.params['timeout']
|
||||
force = module.params['force']
|
||||
unbind = module.params['unbind']
|
||||
vmstate = module.params['vmstate']
|
||||
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
@@ -209,7 +304,7 @@ def main():
|
||||
if i['name'] == snapname:
|
||||
module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
|
||||
|
||||
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate):
|
||||
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
|
||||
else:
|
||||
|
||||
@@ -131,8 +131,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible):
|
||||
Check the task status and wait until the task is completed or the timeout is reached.
|
||||
"""
|
||||
while timeout:
|
||||
task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
if self.api_task_ok(node, taskid):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
|
||||
@@ -80,9 +80,25 @@ options:
|
||||
aliases: [ 'variables_file' ]
|
||||
variables:
|
||||
description:
|
||||
- A group of key-values to override template variables or those in
|
||||
variables files.
|
||||
- A group of key-values pairs to override template variables or those in variables files.
|
||||
By default, only string and number values are allowed, which are passed on unquoted.
|
||||
- Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when I(complex_vars=true).
|
||||
- Ansible integers or floats are mapped to terraform numbers.
|
||||
- Ansible strings are mapped to terraform strings.
|
||||
- Ansible dictionaries are mapped to terraform objects.
|
||||
- Ansible lists are mapped to terraform lists.
|
||||
- Ansible booleans are mapped to terraform booleans.
|
||||
- "B(Note) passwords passed as variables will be visible in the log output. Make sure to use I(no_log=true) in production!"
|
||||
type: dict
|
||||
complex_vars:
|
||||
description:
|
||||
- Enable/disable capability to handle complex variable structures for C(terraform).
|
||||
- If C(true) the I(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform).
|
||||
Strings that are passed are correctly quoted.
|
||||
- When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 5.7.0
|
||||
targets:
|
||||
description:
|
||||
- A list of specific resources to target in this plan/application. The
|
||||
@@ -188,6 +204,26 @@ EXAMPLES = """
|
||||
- /path/to/plugins_dir_1
|
||||
- /path/to/plugins_dir_2
|
||||
|
||||
- name: Complex variables example
|
||||
community.general.terraform:
|
||||
project_path: '{{ project_dir }}'
|
||||
state: present
|
||||
camplex_vars: true
|
||||
variables:
|
||||
vm_name: "{{ inventory_hostname }}"
|
||||
vm_vcpus: 2
|
||||
vm_mem: 2048
|
||||
vm_additional_disks:
|
||||
- label: "Third Disk"
|
||||
size: 40
|
||||
thin_provisioned: true
|
||||
unit_number: 2
|
||||
- label: "Fourth Disk"
|
||||
size: 22
|
||||
thin_provisioned: true
|
||||
unit_number: 3
|
||||
force_init: true
|
||||
|
||||
### Example directory structure for plugin_paths example
|
||||
# $ tree /path/to/plugins_dir_1
|
||||
# /path/to/plugins_dir_1/
|
||||
@@ -237,6 +273,7 @@ import os
|
||||
import json
|
||||
import tempfile
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.six import integer_types
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
@@ -273,7 +310,7 @@ def _state_args(state_file):
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths):
|
||||
command = [bin_path, 'init', '-input=false']
|
||||
command = [bin_path, 'init', '-input=false', '-no-color']
|
||||
if backend_config:
|
||||
for key, val in backend_config.items():
|
||||
command.extend([
|
||||
@@ -298,7 +335,7 @@ def get_workspace_context(bin_path, project_path):
|
||||
command = [bin_path, 'workspace', 'list', '-no-color']
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
if rc != 0:
|
||||
module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
|
||||
module.warn("Failed to list Terraform workspaces:\n{0}".format(err))
|
||||
for item in out.split('\n'):
|
||||
stripped_item = item.strip()
|
||||
if not stripped_item:
|
||||
@@ -360,12 +397,25 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
|
||||
return plan_path, False, out, err, plan_command if state == 'planned' else command
|
||||
elif rc == 1:
|
||||
# failure to plan
|
||||
module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
|
||||
module.fail_json(
|
||||
msg='Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
)
|
||||
)
|
||||
elif rc == 2:
|
||||
# changes, but successful
|
||||
return plan_path, True, out, err, plan_command if state == 'planned' else command
|
||||
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
|
||||
module.fail_json(msg='Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
|
||||
rc=rc,
|
||||
out=out,
|
||||
err=err,
|
||||
cmd=' '.join(plan_command),
|
||||
args=' '.join([shlex_quote(arg) for arg in variables_args])
|
||||
))
|
||||
|
||||
|
||||
def main():
|
||||
@@ -379,6 +429,7 @@ def main():
|
||||
purge_workspace=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'planned']),
|
||||
variables=dict(type='dict'),
|
||||
complex_vars=dict(type='bool', default=False),
|
||||
variables_files=dict(aliases=['variables_file'], type='list', elements='path'),
|
||||
plan_file=dict(type='path'),
|
||||
state_file=dict(type='path'),
|
||||
@@ -405,6 +456,7 @@ def main():
|
||||
purge_workspace = module.params.get('purge_workspace')
|
||||
state = module.params.get('state')
|
||||
variables = module.params.get('variables') or {}
|
||||
complex_vars = module.params.get('complex_vars')
|
||||
variables_files = module.params.get('variables_files')
|
||||
plan_file = module.params.get('plan_file')
|
||||
state_file = module.params.get('state_file')
|
||||
@@ -449,12 +501,77 @@ def main():
|
||||
if state == 'present' and module.params.get('parallelism') is not None:
|
||||
command.append('-parallelism=%d' % module.params.get('parallelism'))
|
||||
|
||||
def format_args(vars):
|
||||
if isinstance(vars, str):
|
||||
return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"'))
|
||||
elif isinstance(vars, bool):
|
||||
if vars:
|
||||
return 'true'
|
||||
else:
|
||||
return 'false'
|
||||
return str(vars)
|
||||
|
||||
def process_complex_args(vars):
|
||||
ret_out = []
|
||||
if isinstance(vars, dict):
|
||||
for k, v in vars.items():
|
||||
if isinstance(v, dict):
|
||||
ret_out.append('{0}={{{1}}}'.format(k, process_complex_args(v)))
|
||||
elif isinstance(v, list):
|
||||
ret_out.append("{0}={1}".format(k, process_complex_args(v)))
|
||||
elif isinstance(v, (integer_types, float, str, bool)):
|
||||
ret_out.append('{0}={1}'.format(k, format_args(v)))
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
|
||||
if isinstance(vars, list):
|
||||
l_out = []
|
||||
for item in vars:
|
||||
if isinstance(item, dict):
|
||||
l_out.append("{{{0}}}".format(process_complex_args(item)))
|
||||
elif isinstance(item, list):
|
||||
l_out.append("{0}".format(process_complex_args(item)))
|
||||
elif isinstance(item, (str, integer_types, float, bool)):
|
||||
l_out.append(format_args(item))
|
||||
else:
|
||||
# only to handle anything unforeseen
|
||||
module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
|
||||
|
||||
ret_out.append("[{0}]".format(",".join(l_out)))
|
||||
return ",".join(ret_out)
|
||||
|
||||
variables_args = []
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
])
|
||||
if complex_vars:
|
||||
for k, v in variables.items():
|
||||
if isinstance(v, dict):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={{{1}}}'.format(k, process_complex_args(v))
|
||||
])
|
||||
elif isinstance(v, list):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, process_complex_args(v))
|
||||
])
|
||||
# on the top-level we need to pass just the python string with necessary
|
||||
# terraform string escape sequences
|
||||
elif isinstance(v, str):
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
"{0}={1}".format(k, v)
|
||||
])
|
||||
else:
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, format_args(v))
|
||||
])
|
||||
else:
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
])
|
||||
|
||||
if variables_files:
|
||||
for f in variables_files:
|
||||
variables_args.extend(['-var-file', f])
|
||||
|
||||
@@ -310,7 +310,7 @@ def do_ini(module, filename, section=None, option=None, values=None,
|
||||
# override option with no value to option with value if not allow_no_value
|
||||
if len(values) > 0:
|
||||
for index, line in enumerate(section_lines):
|
||||
if not changed_lines[index] and match_active_opt(option, section_lines[index]): # pylint: disable=unnecessary-list-index-lookup
|
||||
if not changed_lines[index] and match_active_opt(option, line):
|
||||
newline = assignment_format % (option, values.pop(0))
|
||||
(changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
|
||||
if len(values) == 0:
|
||||
|
||||
@@ -279,20 +279,20 @@ def main():
|
||||
module.fail_json(msg='Either the `name` or `id` has to be specified on each role.')
|
||||
# Fetch missing role_id
|
||||
if role['id'] is None:
|
||||
role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm)
|
||||
role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm)
|
||||
if role_id is not None:
|
||||
role['id'] = role_id
|
||||
else:
|
||||
module.fail_json(msg='Could not fetch role %s:' % (role['name']))
|
||||
# Fetch missing role_name
|
||||
else:
|
||||
role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name']
|
||||
role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name']
|
||||
if role['name'] is None:
|
||||
module.fail_json(msg='Could not fetch role %s' % (role['id']))
|
||||
|
||||
# Get effective client-level role mappings
|
||||
available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm)
|
||||
assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm)
|
||||
available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm)
|
||||
assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
|
||||
|
||||
result['existing'] = assigned_roles_before
|
||||
result['proposed'] = roles
|
||||
@@ -326,7 +326,7 @@ def main():
|
||||
module.exit_json(**result)
|
||||
kc.add_group_rolemapping(gid, cid, update_roles, realm=realm)
|
||||
result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name)
|
||||
assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm)
|
||||
assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
|
||||
result['end_state'] = assigned_roles_after
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
@@ -338,7 +338,7 @@ def main():
|
||||
module.exit_json(**result)
|
||||
kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm)
|
||||
result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name)
|
||||
assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm)
|
||||
assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
|
||||
result['end_state'] = assigned_roles_after
|
||||
module.exit_json(**result)
|
||||
# Do nothing
|
||||
|
||||
401
plugins/modules/identity/keycloak/keycloak_user_rolemapping.py
Normal file
401
plugins/modules/identity/keycloak/keycloak_user_rolemapping.py
Normal file
@@ -0,0 +1,401 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Dušan Marković (@bratwurzt)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_user_rolemapping
|
||||
|
||||
short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API
|
||||
|
||||
version_added: 5.7.0
|
||||
|
||||
description:
|
||||
- This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API.
|
||||
It requires access to the REST API via OpenID Connect; the user connecting and the client being
|
||||
used must have the requisite access rights. In a default Keycloak installation, admin-cli
|
||||
and an admin user would work, as would a separate client definition with the scope tailored
|
||||
to your needs and a user having the expected roles.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
|
||||
|
||||
- Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
|
||||
be returned that way by this module. You may pass single values for attributes when calling the module,
|
||||
and this will be translated into a list suitable for the API.
|
||||
|
||||
- When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup
|
||||
to the API to translate the name into the role ID.
|
||||
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State of the user_rolemapping.
|
||||
- On C(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
|
||||
- On C(absent), the user_rolemapping will be removed if it exists.
|
||||
default: 'present'
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
realm:
|
||||
type: str
|
||||
description:
|
||||
- They Keycloak realm under which this role_representation resides.
|
||||
default: 'master'
|
||||
|
||||
target_username:
|
||||
type: str
|
||||
description:
|
||||
- Username of the user roles are mapped to.
|
||||
- This parameter is not required (can be replaced by uid for less API call).
|
||||
|
||||
uid:
|
||||
type: str
|
||||
description:
|
||||
- ID of the user to be mapped.
|
||||
- This parameter is not required for updating or deleting the rolemapping but
|
||||
providing it will reduce the number of API calls required.
|
||||
|
||||
service_account_user_client_id:
|
||||
type: str
|
||||
description:
|
||||
- Client ID of the service-account-user to be mapped.
|
||||
- This parameter is not required for updating or deleting the rolemapping but
|
||||
providing it will reduce the number of API calls required.
|
||||
|
||||
client_id:
|
||||
type: str
|
||||
description:
|
||||
- Name of the client to be mapped (different than I(cid)).
|
||||
- This parameter is required if I(cid) is not provided (can be replaced by I(cid)
|
||||
to reduce the number of API calls that must be made).
|
||||
|
||||
cid:
|
||||
type: str
|
||||
description:
|
||||
- ID of the client to be mapped.
|
||||
- This parameter is not required for updating or deleting the rolemapping but
|
||||
providing it will reduce the number of API calls required.
|
||||
|
||||
roles:
|
||||
description:
|
||||
- Roles to be mapped to the user.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- Name of the role representation.
|
||||
- This parameter is required only when creating or updating the role_representation.
|
||||
id:
|
||||
type: str
|
||||
description:
|
||||
- The unique identifier for this role_representation.
|
||||
- This parameter is not required for updating or deleting a role_representation but
|
||||
providing it will reduce the number of API calls required.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.keycloak
|
||||
|
||||
|
||||
author:
|
||||
- Dušan Marković (@bratwurzt)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Map a client role to a user, authentication with credentials
|
||||
community.general.keycloak_user_rolemapping:
|
||||
realm: MyCustomRealm
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: USERNAME
|
||||
auth_password: PASSWORD
|
||||
state: present
|
||||
client_id: client1
|
||||
user_id: user1Id
|
||||
roles:
|
||||
- name: role_name1
|
||||
id: role_id1
|
||||
- name: role_name2
|
||||
id: role_id2
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Map a client role to a service account user for a client, authentication with credentials
|
||||
community.general.keycloak_user_rolemapping:
|
||||
realm: MyCustomRealm
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: USERNAME
|
||||
auth_password: PASSWORD
|
||||
state: present
|
||||
client_id: client1
|
||||
service_account_user_client_id: clientIdOfServiceAccount
|
||||
roles:
|
||||
- name: role_name1
|
||||
id: role_id1
|
||||
- name: role_name2
|
||||
id: role_id2
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Map a client role to a user, authentication with token
|
||||
community.general.keycloak_user_rolemapping:
|
||||
realm: MyCustomRealm
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
token: TOKEN
|
||||
state: present
|
||||
client_id: client1
|
||||
target_username: user1
|
||||
roles:
|
||||
- name: role_name1
|
||||
id: role_id1
|
||||
- name: role_name2
|
||||
id: role_id2
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Unmap client role from a user
|
||||
community.general.keycloak_user_rolemapping:
|
||||
realm: MyCustomRealm
|
||||
auth_client_id: admin-cli
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: USERNAME
|
||||
auth_password: PASSWORD
|
||||
state: absent
|
||||
client_id: client1
|
||||
uid: 70e3ae72-96b6-11e6-9056-9737fd4d0764
|
||||
roles:
|
||||
- name: role_name1
|
||||
id: role_id1
|
||||
- name: role_name2
|
||||
id: role_id2
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Role role1 assigned to user user1."
|
||||
|
||||
proposed:
|
||||
description: Representation of proposed client role mapping.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
clientId: "test"
|
||||
}
|
||||
|
||||
existing:
|
||||
description:
|
||||
- Representation of existing client role mapping.
|
||||
- The sample is truncated.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"adminUrl": "http://www.example.com/admin_url",
|
||||
"attributes": {
|
||||
"request.object.signature.alg": "RS256",
|
||||
}
|
||||
}
|
||||
|
||||
end_state:
|
||||
description:
|
||||
- Representation of client role mapping after module execution.
|
||||
- The sample is truncated.
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"adminUrl": "http://www.example.com/admin_url",
|
||||
"attributes": {
|
||||
"request.object.signature.alg": "RS256",
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
|
||||
keycloak_argument_spec, get_token, KeycloakError, is_struct_included
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = keycloak_argument_spec()
|
||||
|
||||
roles_spec = dict(
|
||||
name=dict(type='str'),
|
||||
id=dict(type='str'),
|
||||
)
|
||||
|
||||
meta_args = dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
realm=dict(default='master'),
|
||||
uid=dict(type='str'),
|
||||
target_username=dict(type='str'),
|
||||
service_account_user_client_id=dict(type='str'),
|
||||
cid=dict(type='str'),
|
||||
client_id=dict(type='str'),
|
||||
roles=dict(type='list', elements='dict', options=roles_spec),
|
||||
)
|
||||
|
||||
argument_spec.update(meta_args)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'],
|
||||
['uid', 'target_username', 'service_account_user_client_id']]),
|
||||
required_together=([['auth_realm', 'auth_username', 'auth_password']]))
|
||||
|
||||
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
|
||||
|
||||
# Obtain access token, initialize API
|
||||
try:
|
||||
connection_header = get_token(module.params)
|
||||
except KeycloakError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
kc = KeycloakAPI(module, connection_header)
|
||||
|
||||
realm = module.params.get('realm')
|
||||
state = module.params.get('state')
|
||||
cid = module.params.get('cid')
|
||||
client_id = module.params.get('client_id')
|
||||
uid = module.params.get('uid')
|
||||
target_username = module.params.get('target_username')
|
||||
service_account_user_client_id = module.params.get('service_account_user_client_id')
|
||||
roles = module.params.get('roles')
|
||||
|
||||
# Check the parameters
|
||||
if uid is None and target_username is None and service_account_user_client_id is None:
|
||||
module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.')
|
||||
|
||||
# Get the potential missing parameters
|
||||
if uid is None and service_account_user_client_id is None:
|
||||
user_rep = kc.get_user_by_username(username=target_username, realm=realm)
|
||||
if user_rep is not None:
|
||||
uid = user_rep.get('id')
|
||||
else:
|
||||
module.fail_json(msg='Could not fetch user for username %s:' % target_username)
|
||||
else:
|
||||
if uid is None and target_username is None:
|
||||
user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm)
|
||||
if user_rep is not None:
|
||||
uid = user_rep['id']
|
||||
else:
|
||||
module.fail_json(msg='Could not fetch service-account-user for client_id %s:' % target_username)
|
||||
|
||||
if cid is None and client_id is not None:
|
||||
cid = kc.get_client_id(client_id=client_id, realm=realm)
|
||||
if cid is None:
|
||||
module.fail_json(msg='Could not fetch client %s:' % client_id)
|
||||
if roles is None:
|
||||
module.exit_json(msg="Nothing to do (no roles specified).")
|
||||
else:
|
||||
for role_index, role in enumerate(roles, start=0):
|
||||
if role.get('name') is None and role.get('id') is None:
|
||||
module.fail_json(msg='Either the `name` or `id` has to be specified on each role.')
|
||||
# Fetch missing role_id
|
||||
if role.get('id') is None:
|
||||
if cid is None:
|
||||
role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id']
|
||||
else:
|
||||
role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm)
|
||||
if role_id is not None:
|
||||
role['id'] = role_id
|
||||
else:
|
||||
module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('name'), client_id, realm))
|
||||
# Fetch missing role_name
|
||||
else:
|
||||
if cid is None:
|
||||
role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name']
|
||||
else:
|
||||
role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name']
|
||||
if role.get('name') is None:
|
||||
module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('id'), client_id, realm))
|
||||
|
||||
# Get effective role mappings
|
||||
if cid is None:
|
||||
available_roles_before = kc.get_realm_user_available_rolemappings(uid=uid, realm=realm)
|
||||
assigned_roles_before = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
|
||||
else:
|
||||
available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm)
|
||||
assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
|
||||
|
||||
result['existing'] = assigned_roles_before
|
||||
result['proposed'] = roles
|
||||
|
||||
update_roles = []
|
||||
for role_index, role in enumerate(roles, start=0):
|
||||
# Fetch roles to assign if state present
|
||||
if state == 'present':
|
||||
for available_role in available_roles_before:
|
||||
if role.get('name') == available_role.get('name'):
|
||||
update_roles.append({
|
||||
'id': role.get('id'),
|
||||
'name': role.get('name'),
|
||||
})
|
||||
# Fetch roles to remove if state absent
|
||||
else:
|
||||
for assigned_role in assigned_roles_before:
|
||||
if role.get('name') == assigned_role.get('name'):
|
||||
update_roles.append({
|
||||
'id': role.get('id'),
|
||||
'name': role.get('name'),
|
||||
})
|
||||
|
||||
if len(update_roles):
|
||||
if state == 'present':
|
||||
# Assign roles
|
||||
result['changed'] = True
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=assigned_roles_before, after=update_roles)
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
|
||||
result['msg'] = 'Roles %s assigned to userId %s.' % (update_roles, uid)
|
||||
if cid is None:
|
||||
assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
|
||||
else:
|
||||
assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
|
||||
result['end_state'] = assigned_roles_after
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
# Remove mapping of role
|
||||
result['changed'] = True
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=assigned_roles_before, after=update_roles)
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
|
||||
result['msg'] = 'Roles %s removed from userId %s.' % (update_roles, uid)
|
||||
if cid is None:
|
||||
assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
|
||||
else:
|
||||
assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
|
||||
result['end_state'] = assigned_roles_after
|
||||
module.exit_json(**result)
|
||||
# Do nothing
|
||||
else:
|
||||
result['changed'] = False
|
||||
result['msg'] = 'Nothing to do, roles %s are correctly mapped to user for username %s.' % (roles, target_username)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -72,6 +72,12 @@ options:
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- HTTP(S) connection timeout in seconds.
|
||||
default: 5
|
||||
type: int
|
||||
version_added: 5.7.0
|
||||
requirements:
|
||||
- "nc-dnsapi >= 0.1.3"
|
||||
author: "Nicolai Buchwitz (@nbuchwitz)"
|
||||
@@ -129,6 +135,18 @@ EXAMPLES = '''
|
||||
type: "AAAA"
|
||||
value: "::1"
|
||||
solo: true
|
||||
|
||||
- name: Increase the connection timeout to avoid problems with an unstable connection
|
||||
community.general.netcup_dns:
|
||||
api_key: "..."
|
||||
api_password: "..."
|
||||
customer_id: "..."
|
||||
domain: "example.com"
|
||||
name: "mail"
|
||||
type: "A"
|
||||
value: "127.0.0.1"
|
||||
timeout: 30
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -193,6 +211,7 @@ def main():
|
||||
priority=dict(required=False, type='int'),
|
||||
solo=dict(required=False, type='bool', default=False),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
timeout=dict(required=False, type='int', default=5),
|
||||
|
||||
),
|
||||
supports_check_mode=True
|
||||
@@ -211,6 +230,7 @@ def main():
|
||||
priority = module.params.get('priority')
|
||||
solo = module.params.get('solo')
|
||||
state = module.params.get('state')
|
||||
timeout = module.params.get('timeout')
|
||||
|
||||
if record_type == 'MX' and not priority:
|
||||
module.fail_json(msg="record type MX required the 'priority' argument")
|
||||
@@ -218,7 +238,7 @@ def main():
|
||||
has_changed = False
|
||||
all_records = []
|
||||
try:
|
||||
with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
|
||||
with nc_dnsapi.Client(customer_id, api_key, api_password, timeout) as api:
|
||||
all_records = api.dns_records(domain)
|
||||
record = DNSRecord(record, record_type, value, priority=priority)
|
||||
|
||||
|
||||
@@ -2099,15 +2099,18 @@ class Nmcli(object):
|
||||
# MAC addresses are case insensitive, nmcli always reports them in uppercase
|
||||
value = value.upper()
|
||||
# ensure current_value is also converted to uppercase in case nmcli changes behaviour
|
||||
current_value = current_value.upper()
|
||||
if current_value:
|
||||
current_value = current_value.upper()
|
||||
if key == 'gsm.apn':
|
||||
# Depending on version nmcli adds double-qoutes to gsm.apn
|
||||
# Need to strip them in order to compare both
|
||||
current_value = current_value.strip('"')
|
||||
if current_value:
|
||||
current_value = current_value.strip('"')
|
||||
if key == self.mtu_setting and self.mtu is None:
|
||||
self.mtu = 0
|
||||
if key == 'vpn.data':
|
||||
current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(','))
|
||||
if current_value:
|
||||
current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(','))
|
||||
value = sorted(part.strip() for part in value.split(','))
|
||||
else:
|
||||
# parameter does not exist
|
||||
|
||||
@@ -593,15 +593,22 @@ class Rhsm(RegistrationBase):
|
||||
consumed_pools = RhsmPools(self.module, consumed=True)
|
||||
|
||||
existing_pools = {}
|
||||
serials_to_remove = []
|
||||
for p in consumed_pools:
|
||||
existing_pools[p.get_pool_id()] = p.QuantityUsed
|
||||
pool_id = p.get_pool_id()
|
||||
quantity_used = p.get_quantity_used()
|
||||
existing_pools[pool_id] = quantity_used
|
||||
|
||||
quantity = pool_ids.get(pool_id, 0)
|
||||
if quantity is not None and quantity != quantity_used:
|
||||
serials_to_remove.append(p.Serial)
|
||||
|
||||
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
|
||||
serials = self.unsubscribe(serials=serials_to_remove)
|
||||
|
||||
missing_pools = {}
|
||||
for pool_id, quantity in sorted(pool_ids.items()):
|
||||
if existing_pools.get(pool_id, 0) != quantity:
|
||||
quantity_used = existing_pools.get(pool_id, 0)
|
||||
if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used):
|
||||
missing_pools[pool_id] = quantity
|
||||
|
||||
self.subscribe_by_pool_ids(missing_pools)
|
||||
@@ -635,6 +642,9 @@ class RhsmPool(object):
|
||||
def get_pool_id(self):
|
||||
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
|
||||
|
||||
def get_quantity_used(self):
|
||||
return int(getattr(self, 'QuantityUsed'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager attach --pool %s" % self.get_pool_id()
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
@@ -113,6 +113,12 @@ options:
|
||||
- Redfish HostInterface instance ID if multiple HostInterfaces are present.
|
||||
type: str
|
||||
version_added: '4.1.0'
|
||||
sessions_config:
|
||||
required: false
|
||||
description:
|
||||
- Setting dict of Sessions.
|
||||
type: dict
|
||||
version_added: '5.7.0'
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
@@ -235,6 +241,16 @@ EXAMPLES = '''
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set SessionService Session Timeout to 30 minutes
|
||||
community.general.redfish_config:
|
||||
category: Sessions
|
||||
command: SetSessionService
|
||||
sessions_config:
|
||||
SessionTimeout: 1800
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -254,7 +270,8 @@ from ansible.module_utils.common.text.converters import to_native
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
|
||||
"SetDefaultBootOrder"],
|
||||
"Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"]
|
||||
"Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"],
|
||||
"Sessions": ["SetSessionService"],
|
||||
}
|
||||
|
||||
|
||||
@@ -284,6 +301,7 @@ def main():
|
||||
strip_etag_quotes=dict(type='bool', default=False),
|
||||
hostinterface_config=dict(type='dict', default={}),
|
||||
hostinterface_id=dict(),
|
||||
sessions_config=dict(type='dict', default={}),
|
||||
),
|
||||
required_together=[
|
||||
('username', 'password'),
|
||||
@@ -330,6 +348,9 @@ def main():
|
||||
# HostInterface instance ID
|
||||
hostinterface_id = module.params['hostinterface_id']
|
||||
|
||||
# Sessions config options
|
||||
sessions_config = module.params['sessions_config']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
@@ -376,6 +397,16 @@ def main():
|
||||
elif command == "SetHostInterface":
|
||||
result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id)
|
||||
|
||||
elif category == "Sessions":
|
||||
# execute only if we find a Sessions resource
|
||||
result = rf_utils._find_sessionservice_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "SetSessionService":
|
||||
result = rf_utils.set_session_service(sessions_config)
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
if result.get('warning'):
|
||||
|
||||
@@ -197,15 +197,15 @@ def main():
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
||||
if not os.path.exists("/etc/locale.gen"):
|
||||
if os.path.exists("/var/lib/locales/supported.d/"):
|
||||
# Ubuntu created its own system to manage locales.
|
||||
ubuntuMode = True
|
||||
if not os.path.exists("/var/lib/locales/supported.d/"):
|
||||
if os.path.exists("/etc/locale.gen"):
|
||||
# We found the common way to manage locales.
|
||||
ubuntuMode = False
|
||||
else:
|
||||
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
|
||||
else:
|
||||
# We found the common way to manage locales.
|
||||
ubuntuMode = False
|
||||
# Ubuntu created its own system to manage locales.
|
||||
ubuntuMode = True
|
||||
|
||||
if not is_available(name, ubuntuMode):
|
||||
module.fail_json(msg="The locale you've entered is not available "
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
# Copyright (c) 2022, Dušan Marković (@bratwurzt)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
unsupported
|
||||
@@ -0,0 +1,143 @@
|
||||
# Copyright (c) 2022, Dušan Marković (@bratwurzt)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: Create realm
|
||||
community.general.keycloak_realm:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
id: "{{ realm }}"
|
||||
realm: "{{ realm }}"
|
||||
state: present
|
||||
|
||||
- name: Create client
|
||||
community.general.keycloak_client:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
client_id: "{{ client_id }}"
|
||||
service_accounts_enabled: True
|
||||
state: present
|
||||
register: client
|
||||
|
||||
- name: Create new realm role
|
||||
community.general.keycloak_role:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
name: "{{ role }}"
|
||||
description: "{{ description_1 }}"
|
||||
state: present
|
||||
|
||||
- name: Map a realm role to client service account
|
||||
vars:
|
||||
- roles: [ {'name': '{{ role }}'} ]
|
||||
community.general.keycloak_user_rolemapping:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
service_account_user_client_id: "{{ client_id }}"
|
||||
roles: "{{ roles }}"
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- name: Assert realm role is assigned
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0
|
||||
|
||||
- name: Unmap a realm role from client service account
|
||||
vars:
|
||||
- roles: [ {'name': '{{ role }}'} ]
|
||||
community.general.keycloak_user_rolemapping:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
service_account_user_client_id: "{{ client_id }}"
|
||||
roles: "{{ roles }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- name: Assert realm role is unassigned
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- (result.end_state | length) == (result.existing | length) - 1
|
||||
- result.existing | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0
|
||||
- result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count == 0
|
||||
|
||||
- name: Delete existing realm role
|
||||
community.general.keycloak_role:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
name: "{{ role }}"
|
||||
state: absent
|
||||
|
||||
- name: Create new client role
|
||||
community.general.keycloak_role:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
client_id: "{{ client_id }}"
|
||||
name: "{{ role }}"
|
||||
description: "{{ description_1 }}"
|
||||
state: present
|
||||
|
||||
- name: Map a client role to client service account
|
||||
vars:
|
||||
- roles: [ {'name': '{{ role }}'} ]
|
||||
community.general.keycloak_user_rolemapping:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
client_id: "{{ client_id }}"
|
||||
service_account_user_client_id: "{{ client_id }}"
|
||||
roles: "{{ roles }}"
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- name: Assert client role is assigned
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.end_state | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0
|
||||
|
||||
- name: Unmap a client role from client service account
|
||||
vars:
|
||||
- roles: [ {'name': '{{ role }}'} ]
|
||||
community.general.keycloak_user_rolemapping:
|
||||
auth_keycloak_url: "{{ url }}"
|
||||
auth_realm: "{{ admin_realm }}"
|
||||
auth_username: "{{ admin_user }}"
|
||||
auth_password: "{{ admin_password }}"
|
||||
realm: "{{ realm }}"
|
||||
client_id: "{{ client_id }}"
|
||||
service_account_user_client_id: "{{ client_id }}"
|
||||
roles: "{{ roles }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- name: Assert client role is unassigned
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.end_state == []
|
||||
- result.existing | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
# Copyright (c) 2022, Dušan Marković (@bratwurzt)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
url: http://localhost:8080/auth
|
||||
admin_realm: master
|
||||
admin_user: admin
|
||||
admin_password: password
|
||||
realm: myrealm
|
||||
client_id: myclient
|
||||
role: myrole
|
||||
description_1: desc 1
|
||||
description_2: desc 2
|
||||
@@ -313,6 +313,202 @@
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}"
|
||||
|
||||
- name: Create new disk in VM
|
||||
tags: ['create_disk']
|
||||
block:
|
||||
- name: Add new disk (without force) to VM
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
storage: "{{ storage }}"
|
||||
size: 1
|
||||
state: present
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
|
||||
|
||||
- name: Try add disk again with same options (expect no-op)
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
storage: "{{ storage }}"
|
||||
size: 1
|
||||
state: present
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is not changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} is up to date in VM {{ vmid }}"
|
||||
|
||||
- name: Add new disk replacing existing disk (detach old and leave unused)
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
storage: "{{ storage }}"
|
||||
size: 2
|
||||
create: forced
|
||||
state: present
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
|
||||
|
||||
- name: Update existing disk in VM
|
||||
tags: ['update_disk']
|
||||
block:
|
||||
- name: Update disk configuration
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
backup: false
|
||||
ro: true
|
||||
aio: native
|
||||
state: present
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} updated in VM {{ vmid }}"
|
||||
|
||||
- name: Grow existing disk in VM
|
||||
tags: ['grow_disk']
|
||||
block:
|
||||
- name: Increase disk size
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
size: +1G
|
||||
state: resized
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} resized in VM {{ vmid }}"
|
||||
|
||||
- name: Detach disk and leave it unused
|
||||
tags: ['detach_disk']
|
||||
block:
|
||||
- name: Detach disk
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
state: detached
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} detached from VM {{ vmid }}"
|
||||
|
||||
- name: Move disk to another storage or another VM
|
||||
tags: ['move_disk']
|
||||
block:
|
||||
- name: Move disk to another storage inside same VM
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
target_storage: "{{ target_storage }}"
|
||||
format: "{{ target_format }}"
|
||||
state: moved
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
|
||||
|
||||
- name: Move disk to another VM (same storage)
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ vmid }}"
|
||||
disk: "{{ disk }}"
|
||||
target_vmid: "{{ target_vm }}"
|
||||
target_disk: "{{ target_disk }}"
|
||||
state: moved
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ vmid }}
|
||||
- results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
|
||||
|
||||
|
||||
- name: Remove disk permanently
|
||||
tags: ['remove_disk']
|
||||
block:
|
||||
- name: Remove disk
|
||||
proxmox_disk:
|
||||
api_host: "{{ api_host }}"
|
||||
api_user: "{{ user }}@{{ domain }}"
|
||||
api_password: "{{ api_password | default(omit) }}"
|
||||
api_token_id: "{{ api_token_id | default(omit) }}"
|
||||
api_token_secret: "{{ api_token_secret | default(omit) }}"
|
||||
vmid: "{{ target_vm }}"
|
||||
disk: "{{ target_disk }}"
|
||||
state: absent
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is changed
|
||||
- results.vmid == {{ target_vm }}
|
||||
- results.msg == "Disk {{ target_disk }} removed from VM {{ target_vm }}"
|
||||
|
||||
- name: VM stop
|
||||
tags: [ 'stop' ]
|
||||
block:
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
resource "null_resource" "mynullresource" {
|
||||
triggers = {
|
||||
# plain dictionaries
|
||||
dict_name = var.dictionaries.name
|
||||
dict_age = var.dictionaries.age
|
||||
|
||||
# list of dicrs
|
||||
join_dic_name = join(",", var.list_of_objects.*.name)
|
||||
|
||||
# list-of-strings
|
||||
join_list = join(",", var.list_of_strings.*)
|
||||
|
||||
# testing boolean
|
||||
name = var.boolean ? var.dictionaries.name : var.list_of_objects[0].name
|
||||
|
||||
# top level string
|
||||
sample_string_1 = var.string_type
|
||||
|
||||
# nested lists
|
||||
num_from_matrix = var.list_of_lists[1][2]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
output "string_type" {
|
||||
value = var.string_type
|
||||
}
|
||||
|
||||
output "multiline_string" {
|
||||
value = var.multiline_string
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
variable "dictionaries" {
|
||||
type = object({
|
||||
name = string
|
||||
age = number
|
||||
})
|
||||
description = "Same as ansible Dict"
|
||||
default = {
|
||||
age = 1
|
||||
name = "value"
|
||||
}
|
||||
}
|
||||
|
||||
variable "list_of_strings" {
|
||||
type = list(string)
|
||||
description = "list of strings"
|
||||
validation {
|
||||
condition = (var.list_of_strings[1] == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
|
||||
error_message = "Strings do not match."
|
||||
}
|
||||
}
|
||||
|
||||
variable "list_of_objects" {
|
||||
type = list(object({
|
||||
name = string
|
||||
age = number
|
||||
}))
|
||||
validation {
|
||||
condition = (var.list_of_objects[1].name == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
|
||||
error_message = "Strings do not match."
|
||||
}
|
||||
}
|
||||
|
||||
variable "boolean" {
|
||||
type = bool
|
||||
description = "boolean"
|
||||
|
||||
}
|
||||
|
||||
variable "string_type" {
|
||||
type = string
|
||||
validation {
|
||||
condition = (var.string_type == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
|
||||
error_message = "Strings do not match."
|
||||
}
|
||||
}
|
||||
|
||||
variable "multiline_string" {
|
||||
type = string
|
||||
validation {
|
||||
condition = (var.multiline_string == "one\ntwo\n")
|
||||
error_message = "Strings do not match."
|
||||
}
|
||||
}
|
||||
|
||||
variable "list_of_lists" {
|
||||
type = list(list(any))
|
||||
default = [ [ 1 ], [1, 2, 3], [3] ]
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: Create terraform project directory (complex variables)
|
||||
ansible.builtin.file:
|
||||
path: "{{ terraform_project_dir }}/complex_vars"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: copy terraform files to work space
|
||||
ansible.builtin.copy:
|
||||
src: "complex_variables/{{ item }}"
|
||||
dest: "{{ terraform_project_dir }}/complex_vars/{{ item }}"
|
||||
with_items:
|
||||
- main.tf
|
||||
- variables.tf
|
||||
|
||||
# This task would test the various complex variable structures of the with the
|
||||
# terraform null_resource
|
||||
- name: test complex variables
|
||||
community.general.terraform:
|
||||
project_path: "{{ terraform_project_dir }}/complex_vars"
|
||||
binary_path: "{{ terraform_binary_path }}"
|
||||
force_init: yes
|
||||
complex_vars: true
|
||||
variables:
|
||||
dictionaries:
|
||||
name: "kosala"
|
||||
age: 99
|
||||
list_of_strings:
|
||||
- "kosala"
|
||||
- 'cli specials"&$%@#*!(){}[]:"" \\'
|
||||
- "xxx"
|
||||
- "zzz"
|
||||
list_of_objects:
|
||||
- name: "kosala"
|
||||
age: 99
|
||||
- name: 'cli specials"&$%@#*!(){}[]:"" \\'
|
||||
age: 0.1
|
||||
- name: "zzz"
|
||||
age: 9.789
|
||||
- name: "lll"
|
||||
age: 1000
|
||||
boolean: true
|
||||
string_type: 'cli specials"&$%@#*!(){}[]:"" \\'
|
||||
multiline_string: |
|
||||
one
|
||||
two
|
||||
list_of_lists:
|
||||
- [ 1 ]
|
||||
- [ 11, 12, 13 ]
|
||||
- [ 2 ]
|
||||
- [ 3 ]
|
||||
state: present
|
||||
register: terraform_init_result
|
||||
|
||||
- assert:
|
||||
that: terraform_init_result is not failed
|
||||
@@ -9,17 +9,17 @@
|
||||
- name: Check for existing Terraform in path
|
||||
block:
|
||||
- name: Check if terraform is present in path
|
||||
command: "command -v terraform"
|
||||
ansible.builtin.command: "command -v terraform"
|
||||
register: terraform_binary_path
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check Terraform version
|
||||
command: terraform version
|
||||
ansible.builtin.command: terraform version
|
||||
register: terraform_version_output
|
||||
when: terraform_binary_path.rc == 0
|
||||
|
||||
- name: Set terraform version
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}"
|
||||
when: terraform_version_output.changed
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
block:
|
||||
|
||||
- name: Install Terraform
|
||||
debug:
|
||||
ansible.builtin.debug:
|
||||
msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}."
|
||||
|
||||
- name: Ensure unzip is present
|
||||
@@ -39,7 +39,7 @@
|
||||
state: present
|
||||
|
||||
- name: Install Terraform binary
|
||||
unarchive:
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ terraform_url }}"
|
||||
dest: "{{ remote_tmp_dir }}"
|
||||
mode: 0755
|
||||
@@ -52,22 +52,16 @@
|
||||
# path from the 'Check if terraform is present in path' task, and lastly, the fallback path.
|
||||
|
||||
- name: Set path to terraform binary
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
terraform_binary_path: "{{ terraform_binary_path.stdout or remote_tmp_dir ~ '/terraform' }}"
|
||||
|
||||
- name: Create terraform project directory
|
||||
file:
|
||||
path: "{{ terraform_project_dir }}/{{ item['name'] }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop: "{{ terraform_provider_versions }}"
|
||||
loop_control:
|
||||
index_var: provider_index
|
||||
|
||||
- name: Loop over provider upgrade test tasks
|
||||
include_tasks: test_provider_upgrade.yml
|
||||
ansible.builtin.include_tasks: test_provider_upgrade.yml
|
||||
vars:
|
||||
tf_provider: "{{ terraform_provider_versions[provider_index] }}"
|
||||
loop: "{{ terraform_provider_versions }}"
|
||||
loop_control:
|
||||
index_var: provider_index
|
||||
|
||||
- name: Test Complex Varibles
|
||||
ansible.builtin.include_tasks: complex_variables.yml
|
||||
|
||||
@@ -3,6 +3,15 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
- name: Create terraform project directory (provider upgrade)
|
||||
file:
|
||||
path: "{{ terraform_project_dir }}/{{ item['name'] }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop: "{{ terraform_provider_versions }}"
|
||||
loop_control:
|
||||
index_var: provider_index
|
||||
|
||||
- name: Output terraform provider test project
|
||||
ansible.builtin.template:
|
||||
src: templates/provider_test/main.tf.j2
|
||||
|
||||
@@ -22,7 +22,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
|
||||
plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/files/ini_file.py pylint:bad-option-value
|
||||
plugins/modules/packaging/language/yarn.py use-argspec-type-path
|
||||
plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
|
||||
plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
|
||||
|
||||
@@ -17,7 +17,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
|
||||
plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/files/ini_file.py pylint:bad-option-value
|
||||
plugins/modules/packaging/language/yarn.py use-argspec-type-path
|
||||
plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
|
||||
plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
|
||||
|
||||
@@ -17,7 +17,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
|
||||
plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/files/ini_file.py pylint:bad-option-value
|
||||
plugins/modules/packaging/language/yarn.py use-argspec-type-path
|
||||
plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
|
||||
plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
|
||||
|
||||
38
tests/sanity/ignore-2.15.txt
Normal file
38
tests/sanity/ignore-2.15.txt
Normal file
@@ -0,0 +1,38 @@
|
||||
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
|
||||
plugins/modules/cloud/univention/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
|
||||
plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path
|
||||
plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen
|
||||
plugins/modules/cloud/lxd/lxd_project.py use-argspec-type-path # expanduser() applied to constants
|
||||
plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed
|
||||
plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path
|
||||
plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values
|
||||
plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error
|
||||
plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc # unused param - removed in 6.0.0
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter
|
||||
plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
|
||||
plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
|
||||
plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/packaging/language/yarn.py use-argspec-type-path
|
||||
plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
|
||||
plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
|
||||
plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt'
|
||||
plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter
|
||||
plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice
|
||||
plugins/modules/system/puppet.py use-argspec-type-path
|
||||
plugins/modules/system/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
|
||||
plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path
|
||||
plugins/modules/system/xfconf.py validate-modules:return-syntax-error
|
||||
plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path
|
||||
3
tests/sanity/ignore-2.15.txt.license
Normal file
3
tests/sanity/ignore-2.15.txt.license
Normal file
@@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
||||
@@ -21,9 +21,9 @@ from ansible.module_utils.six import StringIO
|
||||
|
||||
|
||||
@contextmanager
|
||||
def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_by_name=None,
|
||||
get_client_rolemapping_by_id=None, get_client_available_rolemappings=None,
|
||||
get_client_composite_rolemappings=None, add_group_rolemapping=None,
|
||||
def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_id_by_name=None,
|
||||
get_client_group_rolemapping_by_id=None, get_client_group_available_rolemappings=None,
|
||||
get_client_group_composite_rolemappings=None, add_group_rolemapping=None,
|
||||
delete_group_rolemapping=None):
|
||||
"""Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
|
||||
|
||||
@@ -44,21 +44,21 @@ def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_ro
|
||||
side_effect=get_group_by_name) as mock_get_group_by_name:
|
||||
with patch.object(obj, 'get_client_id',
|
||||
side_effect=get_client_id) as mock_get_client_id:
|
||||
with patch.object(obj, 'get_client_role_by_name',
|
||||
side_effect=get_client_role_by_name) as mock_get_client_role_by_name:
|
||||
with patch.object(obj, 'get_client_rolemapping_by_id',
|
||||
side_effect=get_client_rolemapping_by_id) as mock_get_client_rolemapping_by_id:
|
||||
with patch.object(obj, 'get_client_available_rolemappings',
|
||||
side_effect=get_client_available_rolemappings) as mock_get_client_available_rolemappings:
|
||||
with patch.object(obj, 'get_client_composite_rolemappings',
|
||||
side_effect=get_client_composite_rolemappings) as mock_get_client_composite_rolemappings:
|
||||
with patch.object(obj, 'get_client_role_id_by_name',
|
||||
side_effect=get_client_role_id_by_name) as mock_get_client_role_id_by_name:
|
||||
with patch.object(obj, 'get_client_group_rolemapping_by_id',
|
||||
side_effect=get_client_group_rolemapping_by_id) as mock_get_client_group_rolemapping_by_id:
|
||||
with patch.object(obj, 'get_client_group_available_rolemappings',
|
||||
side_effect=get_client_group_available_rolemappings) as mock_get_client_group_available_rolemappings:
|
||||
with patch.object(obj, 'get_client_group_composite_rolemappings',
|
||||
side_effect=get_client_group_composite_rolemappings) as mock_get_client_group_composite_rolemappings:
|
||||
with patch.object(obj, 'add_group_rolemapping',
|
||||
side_effect=add_group_rolemapping) as mock_add_group_rolemapping:
|
||||
with patch.object(obj, 'delete_group_rolemapping',
|
||||
side_effect=delete_group_rolemapping) as mock_delete_group_rolemapping:
|
||||
yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, \
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, \
|
||||
mock_delete_group_rolemapping
|
||||
yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, \
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, \
|
||||
mock_get_client_group_composite_rolemappings, mock_delete_group_rolemapping
|
||||
|
||||
|
||||
def get_response(object_with_future_response, method, get_id_call_count):
|
||||
@@ -144,8 +144,8 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"subGroups": "[]"
|
||||
}]
|
||||
return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
|
||||
return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_available_rolemappings = [[
|
||||
return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_group_available_rolemappings = [[
|
||||
{
|
||||
"clientRole": "true",
|
||||
"composite": "false",
|
||||
@@ -161,7 +161,7 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"name": "test_role1"
|
||||
}
|
||||
]]
|
||||
return_value_get_client_composite_rolemappings = [
|
||||
return_value_get_client_group_composite_rolemappings = [
|
||||
None,
|
||||
[
|
||||
{
|
||||
@@ -189,11 +189,11 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
|
||||
with mock_good_connection():
|
||||
with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
|
||||
get_client_role_by_name=return_value_get_client_role_by_name,
|
||||
get_client_available_rolemappings=return_value_get_client_available_rolemappings,
|
||||
get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings,
|
||||
get_client_role_id_by_name=return_value_get_client_role_id_by_name,
|
||||
get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
|
||||
get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
|
||||
mock_delete_group_rolemapping):
|
||||
with self.assertRaises(AnsibleExitJson) as exec_info:
|
||||
self.module.main()
|
||||
@@ -201,9 +201,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
self.assertEqual(mock_get_group_by_name.call_count, 1)
|
||||
self.assertEqual(mock_get_client_id.call_count, 1)
|
||||
self.assertEqual(mock_add_group_rolemapping.call_count, 1)
|
||||
self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
|
||||
|
||||
# Verify that the module's changed status matches what is expected
|
||||
@@ -246,9 +246,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"subGroups": "[]"
|
||||
}]
|
||||
return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
|
||||
return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_available_rolemappings = [[]]
|
||||
return_value_get_client_composite_rolemappings = [[
|
||||
return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_group_available_rolemappings = [[]]
|
||||
return_value_get_client_group_composite_rolemappings = [[
|
||||
{
|
||||
"clientRole": "true",
|
||||
"composite": "false",
|
||||
@@ -273,11 +273,11 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
|
||||
with mock_good_connection():
|
||||
with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
|
||||
get_client_role_by_name=return_value_get_client_role_by_name,
|
||||
get_client_available_rolemappings=return_value_get_client_available_rolemappings,
|
||||
get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings,
|
||||
get_client_role_id_by_name=return_value_get_client_role_id_by_name,
|
||||
get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
|
||||
get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
|
||||
mock_delete_group_rolemapping):
|
||||
with self.assertRaises(AnsibleExitJson) as exec_info:
|
||||
self.module.main()
|
||||
@@ -285,9 +285,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
self.assertEqual(mock_get_group_by_name.call_count, 1)
|
||||
self.assertEqual(mock_get_client_id.call_count, 1)
|
||||
self.assertEqual(mock_add_group_rolemapping.call_count, 0)
|
||||
self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
|
||||
|
||||
# Verify that the module's changed status matches what is expected
|
||||
@@ -330,8 +330,8 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"subGroups": "[]"
|
||||
}]
|
||||
return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
|
||||
return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_available_rolemappings = [[
|
||||
return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_group_available_rolemappings = [[
|
||||
{
|
||||
"clientRole": "true",
|
||||
"composite": "false",
|
||||
@@ -347,7 +347,7 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"name": "test_role1"
|
||||
}
|
||||
]]
|
||||
return_value_get_client_composite_rolemappings = [
|
||||
return_value_get_client_group_composite_rolemappings = [
|
||||
None,
|
||||
[
|
||||
{
|
||||
@@ -375,11 +375,11 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
|
||||
with mock_good_connection():
|
||||
with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
|
||||
get_client_role_by_name=return_value_get_client_role_by_name,
|
||||
get_client_available_rolemappings=return_value_get_client_available_rolemappings,
|
||||
get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings,
|
||||
get_client_role_id_by_name=return_value_get_client_role_id_by_name,
|
||||
get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
|
||||
get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
|
||||
mock_delete_group_rolemapping):
|
||||
with self.assertRaises(AnsibleExitJson) as exec_info:
|
||||
self.module.main()
|
||||
@@ -387,9 +387,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
self.assertEqual(mock_get_group_by_name.call_count, 0)
|
||||
self.assertEqual(mock_get_client_id.call_count, 0)
|
||||
self.assertEqual(mock_add_group_rolemapping.call_count, 1)
|
||||
self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
|
||||
|
||||
# Verify that the module's changed status matches what is expected
|
||||
@@ -432,9 +432,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"subGroups": "[]"
|
||||
}]
|
||||
return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
|
||||
return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_available_rolemappings = [[]]
|
||||
return_value_get_client_composite_rolemappings = [
|
||||
return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_group_available_rolemappings = [[]]
|
||||
return_value_get_client_group_composite_rolemappings = [
|
||||
[
|
||||
{
|
||||
"clientRole": "true",
|
||||
@@ -462,11 +462,11 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
|
||||
with mock_good_connection():
|
||||
with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
|
||||
get_client_role_by_name=return_value_get_client_role_by_name,
|
||||
get_client_available_rolemappings=return_value_get_client_available_rolemappings,
|
||||
get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings,
|
||||
get_client_role_id_by_name=return_value_get_client_role_id_by_name,
|
||||
get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
|
||||
get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
|
||||
mock_delete_group_rolemapping):
|
||||
with self.assertRaises(AnsibleExitJson) as exec_info:
|
||||
self.module.main()
|
||||
@@ -474,9 +474,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
self.assertEqual(mock_get_group_by_name.call_count, 1)
|
||||
self.assertEqual(mock_get_client_id.call_count, 1)
|
||||
self.assertEqual(mock_add_group_rolemapping.call_count, 0)
|
||||
self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
|
||||
self.assertEqual(mock_delete_group_rolemapping.call_count, 1)
|
||||
|
||||
# Verify that the module's changed status matches what is expected
|
||||
@@ -519,8 +519,8 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
"subGroups": "[]"
|
||||
}]
|
||||
return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
|
||||
return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_available_rolemappings = [
|
||||
return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
|
||||
return_value_get_client_group_available_rolemappings = [
|
||||
[
|
||||
{
|
||||
"clientRole": "true",
|
||||
@@ -538,7 +538,7 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
}
|
||||
]
|
||||
]
|
||||
return_value_get_client_composite_rolemappings = [[]]
|
||||
return_value_get_client_group_composite_rolemappings = [[]]
|
||||
|
||||
changed = False
|
||||
|
||||
@@ -548,11 +548,11 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
|
||||
with mock_good_connection():
|
||||
with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
|
||||
get_client_role_by_name=return_value_get_client_role_by_name,
|
||||
get_client_available_rolemappings=return_value_get_client_available_rolemappings,
|
||||
get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings,
|
||||
get_client_role_id_by_name=return_value_get_client_role_id_by_name,
|
||||
get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
|
||||
get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
|
||||
as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
|
||||
mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
|
||||
mock_delete_group_rolemapping):
|
||||
with self.assertRaises(AnsibleExitJson) as exec_info:
|
||||
self.module.main()
|
||||
@@ -560,9 +560,9 @@ class TestKeycloakRealm(ModuleTestCase):
|
||||
self.assertEqual(mock_get_group_by_name.call_count, 1)
|
||||
self.assertEqual(mock_get_client_id.call_count, 1)
|
||||
self.assertEqual(mock_add_group_rolemapping.call_count, 0)
|
||||
self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
|
||||
self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 1)
|
||||
self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
|
||||
|
||||
# Verify that the module's changed status matches what is expected
|
||||
|
||||
Reference in New Issue
Block a user