mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-30 18:36:28 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24f6493cd4 | ||
|
|
68364df409 | ||
|
|
fb61da5246 | ||
|
|
cf9b01ec6b | ||
|
|
89663a0688 | ||
|
|
7fcb21e044 | ||
|
|
1bf9caa90f | ||
|
|
c6ecc0f3f8 | ||
|
|
4d74aa05a8 | ||
|
|
7fb44b0643 | ||
|
|
7ddb2eb438 | ||
|
|
3158495572 | ||
|
|
58f110ae9c | ||
|
|
5695c919f1 | ||
|
|
6e1a1c028e | ||
|
|
d02b8507d1 | ||
|
|
14d43b10c1 | ||
|
|
92c41a5f55 | ||
|
|
012f684133 | ||
|
|
77b7a65002 | ||
|
|
7f4cd86fe5 | ||
|
|
06980d8239 | ||
|
|
d4740ff387 | ||
|
|
a0b22e4402 | ||
|
|
a56879c1b0 | ||
|
|
d7b31655c4 | ||
|
|
70a7f66d4c | ||
|
|
391c3aa850 | ||
|
|
deb95ea6bf | ||
|
|
806ca0a9e0 | ||
|
|
a171d9bb90 | ||
|
|
dd70c8b031 | ||
|
|
30e707aa79 | ||
|
|
7be95c8bbe | ||
|
|
8e9a348e92 | ||
|
|
2622513d65 | ||
|
|
37d37b20cb |
5
.github/BOTMETA.yml
vendored
5
.github/BOTMETA.yml
vendored
@@ -1168,7 +1168,8 @@ files:
|
||||
$modules/web_infrastructure/jenkins_script.py:
|
||||
maintainers: hogarthj
|
||||
$modules/web_infrastructure/jira.py:
|
||||
maintainers: Slezhuk tarka pertoft DWSR
|
||||
maintainers: Slezhuk tarka pertoft
|
||||
ignore: DWSR
|
||||
labels: jira
|
||||
$modules/web_infrastructure/nginx_status_info.py:
|
||||
maintainers: resmo
|
||||
@@ -1237,7 +1238,7 @@ macros:
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
|
||||
@@ -6,6 +6,59 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 3.0.0.
|
||||
|
||||
v4.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- jira - when creating a comment, ``fields`` now is used for additional data (https://github.com/ansible-collections/community.general/pull/4304).
|
||||
- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
|
||||
- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
|
||||
- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` (https://github.com/ansible-collections/community.general/pull/4108).
|
||||
- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
|
||||
- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
|
||||
- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, https://github.com/ansible-collections/community.general/issues/4315).
|
||||
- pacman - now implements proper change detection for ``update_cache=true``. Adds ``cache_updated`` return value to when ``update_cache=true`` to report this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
|
||||
- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
|
||||
- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
|
||||
- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
|
||||
- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
|
||||
- zypper - add support for ``--clean-deps`` option to remove packages that depend on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep the old behavior, add something like ``register: result`` and ``changed_when: result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- filesize - add support for busybox dd implementation, that is used by default on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, https://github.com/ansible-collections/community.general/issues/4259).
|
||||
- linode inventory plugin - fix configuration handling relating to inventory filtering (https://github.com/ansible-collections/community.general/pull/4336).
|
||||
- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
|
||||
- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
|
||||
- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, https://github.com/ansible-collections/community.general/issues/4285).
|
||||
- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, https://github.com/ansible-collections/community.general/issues/4274).
|
||||
- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
|
||||
- pacman - when the ``update_cache`` option is combined with another option such as ``upgrade``, report ``changed`` based on the actions performed by the latter option. This was the behavior in community.general 4.4.0 and before. In community.general 4.5.0, a task combining these options would always report ``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
|
||||
- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
|
||||
- proxmox inventory plugin - fixed the ``description`` field being ignored if it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
|
||||
- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
|
||||
- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
|
||||
- terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281).
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
- pacman - ``update_cache`` cannot differentiate between up to date and outdated package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
|
||||
- pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` in order to be used by this module. In particular, AUR helper ``yay`` is known not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
|
||||
|
||||
v4.5.0
|
||||
======
|
||||
|
||||
|
||||
@@ -1495,3 +1495,93 @@ releases:
|
||||
name: scaleway_private_network
|
||||
namespace: cloud.scaleway
|
||||
release_date: '2022-02-22'
|
||||
4.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- filesize - add support for busybox dd implementation, that is used by default
|
||||
on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288,
|
||||
https://github.com/ansible-collections/community.general/issues/4259).
|
||||
- linode inventory plugin - fix configuration handling relating to inventory
|
||||
filtering (https://github.com/ansible-collections/community.general/pull/4336).
|
||||
- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong
|
||||
CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
|
||||
- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
|
||||
- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286,
|
||||
https://github.com/ansible-collections/community.general/issues/4285).
|
||||
- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275,
|
||||
https://github.com/ansible-collections/community.general/issues/4274).
|
||||
- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade``
|
||||
is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
|
||||
- pacman - when the ``update_cache`` option is combined with another option
|
||||
such as ``upgrade``, report ``changed`` based on the actions performed by
|
||||
the latter option. This was the behavior in community.general 4.4.0 and before.
|
||||
In community.general 4.5.0, a task combining these options would always report
|
||||
``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
|
||||
- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]``
|
||||
form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
|
||||
- proxmox inventory plugin - fixed the ``description`` field being ignored if
|
||||
it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
|
||||
- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
|
||||
- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
|
||||
- terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281).
|
||||
deprecated_features:
|
||||
- 'pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache``
|
||||
will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep
|
||||
the old behavior, add something like ``register: result`` and ``changed_when:
|
||||
result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).'
|
||||
known_issues:
|
||||
- pacman - ``update_cache`` cannot differentiate between up to date and outdated
|
||||
package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
|
||||
- pacman - binaries specified in the ``executable`` parameter must support ``--print-format``
|
||||
in order to be used by this module. In particular, AUR helper ``yay`` is known
|
||||
not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
|
||||
minor_changes:
|
||||
- jira - when creating a comment, ``fields`` now is used for additional data
|
||||
(https://github.com/ansible-collections/community.general/pull/4304).
|
||||
- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
|
||||
- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
|
||||
- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless``
|
||||
(https://github.com/ansible-collections/community.general/pull/4108).
|
||||
- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
|
||||
- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
|
||||
- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration
|
||||
files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316,
|
||||
https://github.com/ansible-collections/community.general/issues/4315).
|
||||
- pacman - now implements proper change detection for ``update_cache=true``.
|
||||
Adds ``cache_updated`` return value to when ``update_cache=true`` to report
|
||||
this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
|
||||
- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
|
||||
- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
|
||||
- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``,
|
||||
and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
|
||||
- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks;
|
||||
use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
|
||||
- zypper - add support for ``--clean-deps`` option to remove packages that depend
|
||||
on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 3295-mksysb-revamp.yaml
|
||||
- 4.6.0.yml
|
||||
- 4108-nmcli-support-modifcation-without-type-param.yml
|
||||
- 4192-zypper-add-clean-deps.yml
|
||||
- 4207-add-redis-tls-support.yml
|
||||
- 4223-syslog-json-skip-syslog-option.yml
|
||||
- 4275-pacman-sysupgrade.yml
|
||||
- 4281-terraform-complex-variables.yml
|
||||
- 4286-pacman-url-pkgs.yml
|
||||
- 4287-fix-proxmox-vm-chek.yml
|
||||
- 4288-fix-4259-support-busybox-dd.yml
|
||||
- 4299-npm-add-production-with-ci-flag.yml
|
||||
- 4303-pipx-editable.yml
|
||||
- 4304-jira-fields-in-comment.yml
|
||||
- 4306-proxmox-fix-error-on-vm-clone.yml
|
||||
- 4312-pacman-groups.yml
|
||||
- 4316-pacman-remove-nosave.yml
|
||||
- 4318-pacman-restore-old-changed-behavior.yml
|
||||
- 4330-pacman-packages-update_cache.yml
|
||||
- 4336-linode-inventory-filtering.yaml
|
||||
- 4337-pacman-update_cache.yml
|
||||
- 4349-proxmox-inventory-dict-facts.yml
|
||||
- 4352-proxmox-inventory-filters.yml
|
||||
- 4355-ldap-recursive-delete.yml
|
||||
release_date: '2022-03-15'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 4.5.0
|
||||
version: 4.6.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
1
plugins/cache/memcached.py
vendored
1
plugins/cache/memcached.py
vendored
@@ -20,6 +20,7 @@ DOCUMENTATION = '''
|
||||
- List of connection information for the memcached DBs
|
||||
default: ['127.0.0.1:11211']
|
||||
type: list
|
||||
elements: string
|
||||
env:
|
||||
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
|
||||
ini:
|
||||
|
||||
@@ -41,6 +41,16 @@ DOCUMENTATION = '''
|
||||
ini:
|
||||
- section: callback_syslog_json
|
||||
key: syslog_facility
|
||||
setup:
|
||||
description: Log setup tasks.
|
||||
env:
|
||||
- name: ANSIBLE_SYSLOG_SETUP
|
||||
type: bool
|
||||
default: true
|
||||
ini:
|
||||
- section: callback_syslog_json
|
||||
key: syslog_setup
|
||||
version_added: 4.5.0
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -86,23 +96,36 @@ class CallbackModule(CallbackBase):
|
||||
self.logger.addHandler(self.handler)
|
||||
self.hostname = socket.gethostname()
|
||||
|
||||
def runner_on_failed(self, host, res, ignore_errors=False):
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
res = result._result
|
||||
host = result._host.get_name()
|
||||
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||
|
||||
def runner_on_ok(self, host, res):
|
||||
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||
def v2_runner_on_ok(self, result):
|
||||
res = result._result
|
||||
host = result._host.get_name()
|
||||
if result._task.action != "gather_facts" or self.get_option("setup"):
|
||||
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||
|
||||
def runner_on_skipped(self, host, item=None):
|
||||
def v2_runner_on_skipped(self, result):
|
||||
host = result._host.get_name()
|
||||
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
|
||||
|
||||
def runner_on_unreachable(self, host, res):
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
res = result._result
|
||||
host = result._host.get_name()
|
||||
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||
|
||||
def runner_on_async_failed(self, host, res, jid):
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
res = result._result
|
||||
host = result._host.get_name()
|
||||
jid = result._result.get('ansible_job_id')
|
||||
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
|
||||
|
||||
def playbook_on_import_for_host(self, host, imported_file):
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
host = result._host.get_name()
|
||||
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
|
||||
|
||||
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
host = result._host.get_name()
|
||||
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
|
||||
|
||||
@@ -58,6 +58,7 @@ DOCUMENTATION = '''
|
||||
group_by:
|
||||
description: Keys to group hosts by
|
||||
type: list
|
||||
elements: string
|
||||
default: [ 'mgmt_classes', 'owners', 'status' ]
|
||||
group:
|
||||
description: Group to place all hosts into
|
||||
|
||||
@@ -35,7 +35,6 @@ DOCUMENTATION = '''
|
||||
version_added: 1.0.0
|
||||
type: str
|
||||
required: true
|
||||
default: https://gitlab.com
|
||||
api_token:
|
||||
description: GitLab token for logging in.
|
||||
env:
|
||||
|
||||
@@ -54,15 +54,18 @@ DOCUMENTATION = r'''
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
elements: string
|
||||
tags:
|
||||
description: Populate inventory only with instances which have at least one of the tags listed here.
|
||||
default: []
|
||||
type: list
|
||||
elements: string
|
||||
version_added: 2.0.0
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
elements: string
|
||||
strict:
|
||||
version_added: 2.0.0
|
||||
compose:
|
||||
@@ -181,20 +184,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
for linode_group in self.linode_groups:
|
||||
self.inventory.add_group(linode_group)
|
||||
|
||||
def _filter_by_config(self, regions, types, tags):
|
||||
def _filter_by_config(self):
|
||||
"""Filter instances by user specified configuration."""
|
||||
regions = self.get_option('regions')
|
||||
if regions:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
if instance.region.id in regions
|
||||
]
|
||||
|
||||
types = self.get_option('types')
|
||||
if types:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
if instance.type.id in types
|
||||
]
|
||||
|
||||
tags = self.get_option('tags')
|
||||
if tags:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
@@ -247,60 +253,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
)
|
||||
return data
|
||||
|
||||
def _validate_option(self, name, desired_type, option_value):
|
||||
"""Validate user specified configuration data against types."""
|
||||
if isinstance(option_value, string_types) and desired_type == list:
|
||||
option_value = [option_value]
|
||||
|
||||
if option_value is None:
|
||||
option_value = desired_type()
|
||||
|
||||
if not isinstance(option_value, desired_type):
|
||||
raise AnsibleParserError(
|
||||
'The option %s (%s) must be a %s' % (
|
||||
name, option_value, desired_type
|
||||
)
|
||||
)
|
||||
|
||||
return option_value
|
||||
|
||||
def _get_query_options(self, config_data):
|
||||
"""Get user specified query options from the configuration."""
|
||||
options = {
|
||||
'regions': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('regions', [])
|
||||
},
|
||||
'types': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('types', [])
|
||||
},
|
||||
'tags': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('tags', [])
|
||||
},
|
||||
}
|
||||
|
||||
for name in options:
|
||||
options[name]['value'] = self._validate_option(
|
||||
name,
|
||||
options[name]['type_to_be'],
|
||||
options[name]['value']
|
||||
)
|
||||
|
||||
regions = options['regions']['value']
|
||||
types = options['types']['value']
|
||||
tags = options['tags']['value']
|
||||
|
||||
return regions, types, tags
|
||||
|
||||
def _cacheable_inventory(self):
|
||||
return [i._raw_json for i in self.instances]
|
||||
|
||||
def populate(self, config_data):
|
||||
def populate(self):
|
||||
strict = self.get_option('strict')
|
||||
regions, types, tags = self._get_query_options(config_data)
|
||||
self._filter_by_config(regions, types, tags)
|
||||
|
||||
self._filter_by_config()
|
||||
|
||||
self._add_groups()
|
||||
self._add_instances_to_groups()
|
||||
@@ -339,8 +298,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if not HAS_LINODE:
|
||||
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
|
||||
|
||||
config_data = self._read_config_data(path)
|
||||
self._consume_options(config_data)
|
||||
self._read_config_data(path)
|
||||
|
||||
cache_key = self.get_cache_key(path)
|
||||
|
||||
@@ -363,4 +321,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if update_cache:
|
||||
self._cache[cache_key] = self._cacheable_inventory()
|
||||
|
||||
self.populate(config_data)
|
||||
self.populate()
|
||||
|
||||
@@ -27,6 +27,7 @@ DOCUMENTATION = '''
|
||||
exclude:
|
||||
description: list of addresses to exclude
|
||||
type: list
|
||||
elements: string
|
||||
ports:
|
||||
description: Enable/disable scanning for open ports
|
||||
type: boolean
|
||||
|
||||
@@ -28,6 +28,7 @@ DOCUMENTATION = r'''
|
||||
hostnames:
|
||||
description: List of preference about what to use as an hostname.
|
||||
type: list
|
||||
elements: string
|
||||
default:
|
||||
- public_ipv4
|
||||
choices:
|
||||
@@ -37,6 +38,7 @@ DOCUMENTATION = r'''
|
||||
groups:
|
||||
description: List of groups.
|
||||
type: list
|
||||
elements: string
|
||||
choices:
|
||||
- location
|
||||
- offer
|
||||
|
||||
@@ -77,6 +77,12 @@ DOCUMENTATION = '''
|
||||
- When set to C(true) (default), will use the first available interface. This can be different from what you expect.
|
||||
default: true
|
||||
type: bool
|
||||
filters:
|
||||
version_added: 4.6.0
|
||||
description: A list of Jinja templates that allow filtering hosts.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
strict:
|
||||
version_added: 2.5.0
|
||||
compose:
|
||||
@@ -132,13 +138,16 @@ compose:
|
||||
"my_var_2_value"
|
||||
'''
|
||||
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
@@ -151,6 +160,8 @@ try:
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Proxmox as source. '''
|
||||
@@ -291,28 +302,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
return result
|
||||
|
||||
def _get_vm_config(self, node, vmid, vmtype, name):
|
||||
def _get_vm_config(self, properties, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
node_key = 'node'
|
||||
node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower()))
|
||||
self.inventory.set_variable(name, node_key, node)
|
||||
|
||||
vmid_key = 'vmid'
|
||||
vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
|
||||
self.inventory.set_variable(name, vmid_key, vmid)
|
||||
|
||||
vmtype_key = 'vmtype'
|
||||
vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
|
||||
self.inventory.set_variable(name, vmtype_key, vmtype)
|
||||
properties[self._fact('node')] = node
|
||||
properties[self._fact('vmid')] = vmid
|
||||
properties[self._fact('vmtype')] = vmtype
|
||||
|
||||
plaintext_configs = [
|
||||
'tags',
|
||||
'description',
|
||||
]
|
||||
|
||||
for config in ret:
|
||||
key = config
|
||||
key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
|
||||
key = self._fact(config)
|
||||
value = ret[config]
|
||||
try:
|
||||
# fixup disk images as they have no key
|
||||
@@ -322,45 +324,36 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# Additional field containing parsed tags as list
|
||||
if config == 'tags':
|
||||
parsed_key = self.to_safe('%s%s' % (key, "_parsed"))
|
||||
parsed_value = [tag.strip() for tag in value.split(",")]
|
||||
self.inventory.set_variable(name, parsed_key, parsed_value)
|
||||
properties[parsed_key] = [tag.strip() for tag in value.split(",")]
|
||||
|
||||
# The first field in the agent string tells you whether the agent is enabled
|
||||
# the rest of the comma separated string is extra config for the agent
|
||||
if config == 'agent' and int(value.split(',')[0]):
|
||||
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
|
||||
agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
|
||||
if agent_iface_value:
|
||||
self.inventory.set_variable(name, agent_iface_key, agent_iface_value)
|
||||
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
|
||||
properties[agent_iface_key] = agent_iface_value
|
||||
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
if config not in plaintext_configs and not isinstance(value, int) and all("=" in v for v in value.split(",")):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
try:
|
||||
value = dict(key.split("=") for key in value.split(","))
|
||||
value = dict(key.split("=", 1) for key in value.split(","))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
self.inventory.set_variable(name, key, value)
|
||||
properties[key] = value
|
||||
except NameError:
|
||||
return None
|
||||
|
||||
def _get_vm_status(self, node, vmid, vmtype, name):
|
||||
def _get_vm_status(self, properties, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
|
||||
properties[self._fact('status')] = ret['status']
|
||||
|
||||
status = ret['status']
|
||||
status_key = 'status'
|
||||
status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
|
||||
self.inventory.set_variable(name, status_key, status)
|
||||
|
||||
def _get_vm_snapshots(self, node, vmid, vmtype, name):
|
||||
def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
snapshots_key = 'snapshots'
|
||||
snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower()))
|
||||
|
||||
snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
|
||||
self.inventory.set_variable(name, snapshots_key, snapshots)
|
||||
properties[self._fact('snapshots')] = snapshots
|
||||
|
||||
def to_safe(self, word):
|
||||
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
||||
@@ -370,109 +363,130 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
def _fact(self, name):
|
||||
'''Generate a fact's full name from the common prefix and a name.'''
|
||||
return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
|
||||
|
||||
def _group(self, name):
|
||||
'''Generate a group's full name from the common prefix and a name.'''
|
||||
return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
|
||||
|
||||
def _can_add_host(self, name, properties):
|
||||
'''Ensure that a host satisfies all defined hosts filters. If strict mode is
|
||||
enabled, any error during host filter compositing will lead to an AnsibleError
|
||||
being raised, otherwise the filter will be ignored.
|
||||
'''
|
||||
for host_filter in self.host_filters:
|
||||
try:
|
||||
if not self._compose(host_filter, properties):
|
||||
return False
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
|
||||
if self.strict:
|
||||
raise AnsibleError(message)
|
||||
display.warning(message)
|
||||
return True
|
||||
|
||||
def _add_host(self, name, variables):
|
||||
self.inventory.add_host(name)
|
||||
for k, v in variables.items():
|
||||
self.inventory.set_variable(name, k, v)
|
||||
variables = self.inventory.get_host(name).get_vars()
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
|
||||
|
||||
def _handle_item(self, node, ittype, item):
|
||||
'''Handle an item from the list of LXC containers and Qemu VM. The
|
||||
return value will be either None if the item was skipped or the name of
|
||||
the item if it was added to the inventory.'''
|
||||
if item.get('template'):
|
||||
return None
|
||||
|
||||
properties = dict()
|
||||
name, vmid = item['name'], item['vmid']
|
||||
|
||||
# get status, config and snapshots if want_facts == True
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_status(properties, node, vmid, ittype, name)
|
||||
self._get_vm_config(properties, node, vmid, ittype, name)
|
||||
self._get_vm_snapshots(properties, node, vmid, ittype, name)
|
||||
|
||||
# ensure the host satisfies filters
|
||||
if not self._can_add_host(name, properties):
|
||||
return None
|
||||
|
||||
# add the host to the inventory
|
||||
self._add_host(name, properties)
|
||||
node_type_group = self._group('%s_%s' % (node, ittype))
|
||||
self.inventory.add_child(self._group('all_' + ittype), name)
|
||||
self.inventory.add_child(node_type_group, name)
|
||||
if item['status'] == 'stopped':
|
||||
self.inventory.add_child(self._group('all_stopped'), name)
|
||||
elif item['status'] == 'running':
|
||||
self.inventory.add_child(self._group('all_running'), name)
|
||||
|
||||
return name
|
||||
|
||||
def _populate_pool_groups(self, added_hosts):
|
||||
'''Generate groups from Proxmox resource pools, ignoring VMs and
|
||||
containers that were skipped.'''
|
||||
for pool in self._get_pools():
|
||||
poolid = pool.get('poolid')
|
||||
if not poolid:
|
||||
continue
|
||||
pool_group = self._group('pool_' + poolid)
|
||||
self.inventory.add_group(pool_group)
|
||||
|
||||
for member in self._get_members_per_pool(poolid):
|
||||
name = member.get('name')
|
||||
if name and name in added_hosts:
|
||||
self.inventory.add_child(pool_group, name)
|
||||
|
||||
def _populate(self):
|
||||
|
||||
self._get_auth()
|
||||
# create common groups
|
||||
self.inventory.add_group(self._group('all_lxc'))
|
||||
self.inventory.add_group(self._group('all_qemu'))
|
||||
self.inventory.add_group(self._group('all_running'))
|
||||
self.inventory.add_group(self._group('all_stopped'))
|
||||
nodes_group = self._group('nodes')
|
||||
self.inventory.add_group(nodes_group)
|
||||
|
||||
# gather vm's on nodes
|
||||
self._get_auth()
|
||||
hosts = []
|
||||
for node in self._get_nodes():
|
||||
# FIXME: this can probably be cleaner
|
||||
# create groups
|
||||
lxc_group = 'all_lxc'
|
||||
lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower()))
|
||||
self.inventory.add_group(lxc_group)
|
||||
qemu_group = 'all_qemu'
|
||||
qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower()))
|
||||
self.inventory.add_group(qemu_group)
|
||||
nodes_group = 'nodes'
|
||||
nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower()))
|
||||
self.inventory.add_group(nodes_group)
|
||||
running_group = 'all_running'
|
||||
running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower()))
|
||||
self.inventory.add_group(running_group)
|
||||
stopped_group = 'all_stopped'
|
||||
stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower()))
|
||||
self.inventory.add_group(stopped_group)
|
||||
if not node.get('node'):
|
||||
continue
|
||||
|
||||
if node.get('node'):
|
||||
self.inventory.add_host(node['node'])
|
||||
self.inventory.add_host(node['node'])
|
||||
if node['type'] == 'node':
|
||||
self.inventory.add_child(nodes_group, node['node'])
|
||||
|
||||
if node['type'] == 'node':
|
||||
self.inventory.add_child(nodes_group, node['node'])
|
||||
if node['status'] == 'offline':
|
||||
continue
|
||||
|
||||
if node['status'] == 'offline':
|
||||
continue
|
||||
# get node IP address
|
||||
if self.get_option("want_proxmox_nodes_ansible_host"):
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
|
||||
# get node IP address
|
||||
if self.get_option("want_proxmox_nodes_ansible_host"):
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
# add LXC/Qemu groups for the node
|
||||
for ittype in ('lxc', 'qemu'):
|
||||
node_type_group = self._group('%s_%s' % (node['node'], ittype))
|
||||
self.inventory.add_group(node_type_group)
|
||||
|
||||
# get LXC containers for this node
|
||||
node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
|
||||
self.inventory.add_group(node_lxc_group)
|
||||
for lxc in self._get_lxc_per_node(node['node']):
|
||||
self.inventory.add_host(lxc['name'])
|
||||
self.inventory.add_child(lxc_group, lxc['name'])
|
||||
self.inventory.add_child(node_lxc_group, lxc['name'])
|
||||
|
||||
# get LXC status when want_facts == True
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
if lxc['status'] == 'stopped':
|
||||
self.inventory.add_child(stopped_group, lxc['name'])
|
||||
elif lxc['status'] == 'running':
|
||||
self.inventory.add_child(running_group, lxc['name'])
|
||||
|
||||
# get LXC config and snapshots for facts
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
|
||||
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
||||
|
||||
# get QEMU vm's for this node
|
||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||
self.inventory.add_group(node_qemu_group)
|
||||
for qemu in self._get_qemu_per_node(node['node']):
|
||||
if qemu.get('template'):
|
||||
continue
|
||||
|
||||
self.inventory.add_host(qemu['name'])
|
||||
self.inventory.add_child(qemu_group, qemu['name'])
|
||||
self.inventory.add_child(node_qemu_group, qemu['name'])
|
||||
|
||||
# get QEMU status
|
||||
self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
if qemu['status'] == 'stopped':
|
||||
self.inventory.add_child(stopped_group, qemu['name'])
|
||||
elif qemu['status'] == 'running':
|
||||
self.inventory.add_child(running_group, qemu['name'])
|
||||
|
||||
# get QEMU config and snapshots for facts
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
|
||||
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
||||
# get LXC containers and Qemu VMs for this node
|
||||
lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
|
||||
qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
|
||||
for ittype, item in itertools.chain(lxc_objects, qemu_objects):
|
||||
name = self._handle_item(node['node'], ittype, item)
|
||||
if name is not None:
|
||||
hosts.append(name)
|
||||
|
||||
# gather vm's in pools
|
||||
for pool in self._get_pools():
|
||||
if pool.get('poolid'):
|
||||
pool_group = 'pool_' + pool['poolid']
|
||||
pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower()))
|
||||
self.inventory.add_group(pool_group)
|
||||
|
||||
for member in self._get_members_per_pool(pool['poolid']):
|
||||
if member.get('name'):
|
||||
if not member.get('template'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
self._populate_pool_groups(hosts)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_REQUESTS:
|
||||
@@ -484,12 +498,16 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
|
||||
# get connection host
|
||||
# read options
|
||||
self.proxmox_url = self.get_option('url').rstrip('/')
|
||||
self.proxmox_user = self.get_option('user')
|
||||
self.proxmox_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
self.host_filters = self.get_option('filters')
|
||||
self.group_prefix = self.get_option('group_prefix')
|
||||
self.facts_prefix = self.get_option('facts_prefix')
|
||||
self.strict = self.get_option('strict')
|
||||
|
||||
# actually populate inventory
|
||||
self._populate()
|
||||
|
||||
@@ -23,6 +23,7 @@ DOCUMENTATION = r'''
|
||||
regions:
|
||||
description: Filter results on a specific Scaleway region.
|
||||
type: list
|
||||
elements: string
|
||||
default:
|
||||
- ams1
|
||||
- par1
|
||||
@@ -31,6 +32,7 @@ DOCUMENTATION = r'''
|
||||
tags:
|
||||
description: Filter results on a specific tag.
|
||||
type: list
|
||||
elements: string
|
||||
scw_profile:
|
||||
description:
|
||||
- The config profile to use in config file.
|
||||
@@ -51,6 +53,7 @@ DOCUMENTATION = r'''
|
||||
hostnames:
|
||||
description: List of preference about what to use as an hostname.
|
||||
type: list
|
||||
elements: string
|
||||
default:
|
||||
- public_ipv4
|
||||
choices:
|
||||
|
||||
@@ -20,6 +20,7 @@ DOCUMENTATION = '''
|
||||
_raw:
|
||||
description: List of key(s) to retrieve.
|
||||
type: list
|
||||
elements: string
|
||||
recurse:
|
||||
type: boolean
|
||||
description: If true, will retrieve all the values that have the given key as prefix.
|
||||
|
||||
@@ -17,11 +17,11 @@ DOCUMENTATION = '''
|
||||
_terms:
|
||||
description: term or list of terms to lookup in the credit store
|
||||
type: list
|
||||
required: True
|
||||
elements: string
|
||||
required: true
|
||||
table:
|
||||
description: name of the credstash table to query
|
||||
default: 'credential-store'
|
||||
required: True
|
||||
version:
|
||||
description: Credstash version
|
||||
region:
|
||||
|
||||
@@ -18,6 +18,7 @@ DOCUMENTATION = '''
|
||||
description: domain or list of domains to query TXT records from
|
||||
required: True
|
||||
type: list
|
||||
elements: string
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
|
||||
@@ -14,6 +14,10 @@ description:
|
||||
- Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
|
||||
Server using token authentication with I(username) and I(password) on
|
||||
the REST API at I(base_url).
|
||||
- When using self-signed certificates the environment variable
|
||||
C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
|
||||
(in C(.pem) format).
|
||||
- For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
|
||||
requirements:
|
||||
- python-tss-sdk - https://pypi.org/project/python-tss-sdk/
|
||||
options:
|
||||
|
||||
@@ -27,20 +27,20 @@ except ImportError:
|
||||
HAS_CERTIFI_PACKAGE = False
|
||||
|
||||
|
||||
def fail_imports(module):
|
||||
def fail_imports(module, needs_certifi=True):
|
||||
errors = []
|
||||
traceback = []
|
||||
if not HAS_REDIS_PACKAGE:
|
||||
errors.append(missing_required_lib('redis'))
|
||||
traceback.append(REDIS_IMP_ERR)
|
||||
if not HAS_CERTIFI_PACKAGE:
|
||||
if not HAS_CERTIFI_PACKAGE and needs_certifi:
|
||||
errors.append(missing_required_lib('certifi'))
|
||||
traceback.append(CERTIFI_IMPORT_ERROR)
|
||||
if errors:
|
||||
module.fail_json(errors=errors, traceback='\n'.join(traceback))
|
||||
|
||||
|
||||
def redis_auth_argument_spec():
|
||||
def redis_auth_argument_spec(tls_default=True):
|
||||
return dict(
|
||||
login_host=dict(type='str',
|
||||
default='localhost',),
|
||||
@@ -50,7 +50,7 @@ def redis_auth_argument_spec():
|
||||
),
|
||||
login_port=dict(type='int', default=6379),
|
||||
tls=dict(type='bool',
|
||||
default=True),
|
||||
default=tls_default),
|
||||
validate_certs=dict(type='bool',
|
||||
default=True
|
||||
),
|
||||
@@ -58,6 +58,30 @@ def redis_auth_argument_spec():
|
||||
)
|
||||
|
||||
|
||||
def redis_auth_params(module):
|
||||
login_host = module.params['login_host']
|
||||
login_user = module.params['login_user']
|
||||
login_password = module.params['login_password']
|
||||
login_port = module.params['login_port']
|
||||
tls = module.params['tls']
|
||||
validate_certs = 'required' if module.params['validate_certs'] else None
|
||||
ca_certs = module.params['ca_certs']
|
||||
if tls and ca_certs is None:
|
||||
ca_certs = str(certifi.where())
|
||||
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
|
||||
module.fail_json(
|
||||
msg='The option `username` in only supported with redis >= 3.4.0.')
|
||||
params = {'host': login_host,
|
||||
'port': login_port,
|
||||
'password': login_password,
|
||||
'ssl_ca_certs': ca_certs,
|
||||
'ssl_cert_reqs': validate_certs,
|
||||
'ssl': tls}
|
||||
if login_user is not None:
|
||||
params['username'] = login_user
|
||||
return params
|
||||
|
||||
|
||||
class RedisAnsible(object):
|
||||
'''Base class for Redis module'''
|
||||
|
||||
@@ -66,28 +90,8 @@ class RedisAnsible(object):
|
||||
self.connection = self._connect()
|
||||
|
||||
def _connect(self):
|
||||
login_host = self.module.params['login_host']
|
||||
login_user = self.module.params['login_user']
|
||||
login_password = self.module.params['login_password']
|
||||
login_port = self.module.params['login_port']
|
||||
tls = self.module.params['tls']
|
||||
validate_certs = 'required' if self.module.params['validate_certs'] else None
|
||||
ca_certs = self.module.params['ca_certs']
|
||||
if tls and ca_certs is None:
|
||||
ca_certs = str(certifi.where())
|
||||
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
|
||||
self.module.fail_json(
|
||||
msg='The option `username` in only supported with redis >= 3.4.0.')
|
||||
params = {'host': login_host,
|
||||
'port': login_port,
|
||||
'password': login_password,
|
||||
'ssl_ca_certs': ca_certs,
|
||||
'ssl_cert_reqs': validate_certs,
|
||||
'ssl': tls}
|
||||
if login_user is not None:
|
||||
params['username'] = login_user
|
||||
try:
|
||||
return Redis(**params)
|
||||
return Redis(**redis_auth_params(self.module))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='{0}'.format(str(e)))
|
||||
return None
|
||||
|
||||
@@ -42,7 +42,7 @@ options:
|
||||
description:
|
||||
- Whether to wait for the tasks to finish before returning.
|
||||
type: str
|
||||
default: True
|
||||
default: 'True'
|
||||
required: False
|
||||
requirements:
|
||||
- python = 2.7
|
||||
|
||||
@@ -66,8 +66,8 @@ options:
|
||||
description:
|
||||
- Whether the firewall policy is enabled or disabled
|
||||
type: str
|
||||
choices: [True, False]
|
||||
default: True
|
||||
choices: ['True', 'False']
|
||||
default: 'True'
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
|
||||
@@ -48,7 +48,7 @@ options:
|
||||
description:
|
||||
- Port to configure on the public-facing side of the load balancer pool
|
||||
type: str
|
||||
choices: [80, 443]
|
||||
choices: ['80', '443']
|
||||
nodes:
|
||||
description:
|
||||
- A list of nodes that needs to be added to the load balancer pool
|
||||
|
||||
@@ -36,7 +36,7 @@ options:
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the provisioning tasks to finish before returning.
|
||||
default: True
|
||||
default: 'True'
|
||||
required: False
|
||||
type: str
|
||||
requirements:
|
||||
|
||||
@@ -51,7 +51,6 @@ options:
|
||||
group.s
|
||||
type: str
|
||||
required: false
|
||||
default: 0
|
||||
vpc_id:
|
||||
description:
|
||||
- Specifies the resource ID of the VPC to which the security group
|
||||
|
||||
@@ -1205,12 +1205,12 @@ def main():
|
||||
proxmox.get_vm(vmid)
|
||||
|
||||
# Ensure the choosen VM name doesn't already exist when cloning
|
||||
existing_vmid = proxmox.get_vmid(name, choose_first_if_multiple=True)
|
||||
existing_vmid = proxmox.get_vmid(name, ignore_missing=True, choose_first_if_multiple=True)
|
||||
if existing_vmid:
|
||||
module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name)
|
||||
|
||||
# Ensure the choosen VM id doesn't already exist when cloning
|
||||
if proxmox.get_vm(newid, ignore_errors=True):
|
||||
if proxmox.get_vm(newid, ignore_missing=True):
|
||||
module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name))
|
||||
|
||||
if delete is not None:
|
||||
|
||||
@@ -443,7 +443,7 @@ def main():
|
||||
for k, v in variables.items():
|
||||
variables_args.extend([
|
||||
'-var',
|
||||
'{0}={1}'.format(k, v)
|
||||
'{0}={1}'.format(k, json.dumps(v))
|
||||
])
|
||||
if variables_files:
|
||||
for f in variables_files:
|
||||
|
||||
@@ -42,7 +42,7 @@ options:
|
||||
description:
|
||||
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
|
||||
- "The available choices are: C(5.1), C(5.6) and C(10)."
|
||||
default: 5.6
|
||||
default: '5.6'
|
||||
aliases: ['version']
|
||||
state:
|
||||
type: str
|
||||
|
||||
@@ -13,6 +13,8 @@ module: redis
|
||||
short_description: Various redis commands, replica and flush
|
||||
description:
|
||||
- Unified utility to interact with redis instances.
|
||||
extends_documentation_fragment:
|
||||
- community.general.redis
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
@@ -22,20 +24,15 @@ options:
|
||||
- C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).)
|
||||
choices: [ config, flush, replica, slave ]
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with (usually not used)
|
||||
type: str
|
||||
login_host:
|
||||
description:
|
||||
- The host running the database
|
||||
default: localhost
|
||||
type: str
|
||||
login_port:
|
||||
description:
|
||||
- The port to connect to
|
||||
default: 6379
|
||||
type: int
|
||||
tls:
|
||||
default: false
|
||||
version_added: 4.6.0
|
||||
login_user:
|
||||
version_added: 4.6.0
|
||||
validate_certs:
|
||||
version_added: 4.6.0
|
||||
ca_certs:
|
||||
version_added: 4.6.0
|
||||
master_host:
|
||||
description:
|
||||
- The host of the master instance [replica command]
|
||||
@@ -144,6 +141,8 @@ else:
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.formatters import human_to_bytes
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.redis import (
|
||||
fail_imports, redis_auth_argument_spec, redis_auth_params)
|
||||
import re
|
||||
|
||||
|
||||
@@ -175,29 +174,28 @@ def flush(client, db=None):
|
||||
|
||||
# Module execution.
|
||||
def main():
|
||||
redis_auth_args = redis_auth_argument_spec(tls_default=False)
|
||||
module_args = dict(
|
||||
command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']),
|
||||
master_host=dict(type='str'),
|
||||
master_port=dict(type='int'),
|
||||
replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'],
|
||||
aliases=["slave_mode"]),
|
||||
db=dict(type='int'),
|
||||
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
|
||||
name=dict(type='str'),
|
||||
value=dict(type='str'),
|
||||
)
|
||||
module_args.update(redis_auth_args)
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']),
|
||||
login_password=dict(type='str', no_log=True),
|
||||
login_host=dict(type='str', default='localhost'),
|
||||
login_port=dict(type='int', default=6379),
|
||||
master_host=dict(type='str'),
|
||||
master_port=dict(type='int'),
|
||||
replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], aliases=["slave_mode"]),
|
||||
db=dict(type='int'),
|
||||
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
|
||||
name=dict(type='str'),
|
||||
value=dict(type='str')
|
||||
),
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not redis_found:
|
||||
module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
|
||||
fail_imports(module, module.params['tls'])
|
||||
|
||||
redis_params = redis_auth_params(module)
|
||||
|
||||
login_password = module.params['login_password']
|
||||
login_host = module.params['login_host']
|
||||
login_port = module.params['login_port']
|
||||
command = module.params['command']
|
||||
if command == "slave":
|
||||
command = "replica"
|
||||
@@ -219,7 +217,7 @@ def main():
|
||||
module.fail_json(msg='In replica mode master port must be provided')
|
||||
|
||||
# Connect and check
|
||||
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
|
||||
r = redis.StrictRedis(**redis_params)
|
||||
try:
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
@@ -270,7 +268,7 @@ def main():
|
||||
module.fail_json(msg="In db mode the db number must be provided")
|
||||
|
||||
# Connect and check
|
||||
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
|
||||
r = redis.StrictRedis(db=db, **redis_params)
|
||||
try:
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
@@ -301,7 +299,7 @@ def main():
|
||||
except ValueError:
|
||||
value = module.params['value']
|
||||
|
||||
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
|
||||
r = redis.StrictRedis(**redis_params)
|
||||
|
||||
try:
|
||||
r.ping()
|
||||
|
||||
@@ -25,7 +25,7 @@ options:
|
||||
port:
|
||||
description:
|
||||
Database port to connect to.
|
||||
default: 5433
|
||||
default: '5433'
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
|
||||
@@ -44,7 +44,7 @@ options:
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
default: '5433'
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
|
||||
@@ -58,7 +58,7 @@ options:
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
default: '5433'
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
|
||||
@@ -70,7 +70,7 @@ options:
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
default: '5433'
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
|
||||
@@ -83,7 +83,7 @@ options:
|
||||
- Whether or not the file to create should be a sparse file.
|
||||
- This option is effective only on newly created files, or when growing a
|
||||
file, only for the bytes to append.
|
||||
- This option is not supported on OpenBSD, Solaris and AIX.
|
||||
- This option is not supported on OSes or filesystems not supporting sparse files.
|
||||
- I(force=true) and I(sparse=true) are mutually exclusive.
|
||||
type: bool
|
||||
default: false
|
||||
@@ -129,6 +129,10 @@ seealso:
|
||||
- name: dd(1) manpage for NetBSD
|
||||
description: Manual page of the NetBSD's dd implementation.
|
||||
link: https://man.netbsd.org/dd.1
|
||||
|
||||
- name: busybox(1) manpage for Linux
|
||||
description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation.
|
||||
link: https://www.unix.com/man-page/linux/1/busybox
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -377,12 +381,10 @@ def complete_dd_cmdline(args, dd_cmd):
|
||||
return list()
|
||||
|
||||
bs = args['size_spec']['blocksize']
|
||||
conv = list()
|
||||
|
||||
# For sparse files (create, truncate, grow): write count=0 block.
|
||||
if args['sparse']:
|
||||
seek = args['size_spec']['blocks']
|
||||
conv += ['sparse']
|
||||
elif args['force'] or not os.path.exists(args['path']): # Create file
|
||||
seek = 0
|
||||
elif args['size_diff'] < 0: # Truncate file
|
||||
@@ -394,8 +396,6 @@ def complete_dd_cmdline(args, dd_cmd):
|
||||
|
||||
count = args['size_spec']['blocks'] - seek
|
||||
dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)]
|
||||
if conv:
|
||||
dd_cmd += ['conv=%s' % ','.join(conv)]
|
||||
|
||||
return dd_cmd
|
||||
|
||||
|
||||
@@ -84,7 +84,6 @@ options:
|
||||
description:
|
||||
- Dictionary of scopes to silence, with timestamps or None.
|
||||
- Each scope will be muted until the given POSIX timestamp or forever if the value is None.
|
||||
default: ""
|
||||
notify_no_data:
|
||||
description:
|
||||
- Whether this monitor will notify when data stops reporting.
|
||||
@@ -153,6 +152,11 @@ options:
|
||||
type: bool
|
||||
default: yes
|
||||
version_added: 1.3.0
|
||||
priority:
|
||||
description:
|
||||
- Integer from 1 (high) to 5 (low) indicating alert severity.
|
||||
type: int
|
||||
version_added: 4.6.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -239,6 +243,7 @@ def main():
|
||||
evaluation_delay=dict(),
|
||||
id=dict(),
|
||||
include_tags=dict(required=False, default=True, type='bool'),
|
||||
priority=dict(type='int'),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -298,6 +303,7 @@ def _post_monitor(module, options):
|
||||
name=_fix_template_vars(module.params['name']),
|
||||
message=_fix_template_vars(module.params['notification_message']),
|
||||
escalation_message=_fix_template_vars(module.params['escalation_message']),
|
||||
priority=module.params['priority'],
|
||||
options=options)
|
||||
if module.params['tags'] is not None:
|
||||
kwargs['tags'] = module.params['tags']
|
||||
@@ -322,6 +328,7 @@ def _update_monitor(module, monitor, options):
|
||||
name=_fix_template_vars(module.params['name']),
|
||||
message=_fix_template_vars(module.params['notification_message']),
|
||||
escalation_message=_fix_template_vars(module.params['escalation_message']),
|
||||
priority=module.params['priority'],
|
||||
options=options)
|
||||
if module.params['tags'] is not None:
|
||||
kwargs['tags'] = module.params['tags']
|
||||
|
||||
@@ -77,8 +77,8 @@ options:
|
||||
description:
|
||||
- Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
|
||||
type: str
|
||||
choices: [ 4, 6, dual ]
|
||||
default: 4
|
||||
choices: [ '4', '6', dual ]
|
||||
default: '4'
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
@@ -49,6 +49,13 @@ options:
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
type: str
|
||||
recursive:
|
||||
description:
|
||||
- If I(state=delete), a flag indicating whether a single entry or the
|
||||
whole branch must be deleted.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 4.6.0
|
||||
extends_documentation_fragment:
|
||||
- community.general.ldap.documentation
|
||||
|
||||
@@ -110,6 +117,7 @@ from ansible_collections.community.general.plugins.module_utils.ldap import Ldap
|
||||
LDAP_IMP_ERR = None
|
||||
try:
|
||||
import ldap.modlist
|
||||
import ldap.controls
|
||||
|
||||
HAS_LDAP = True
|
||||
except ImportError:
|
||||
@@ -123,6 +131,7 @@ class LdapEntry(LdapGeneric):
|
||||
|
||||
# Shortcuts
|
||||
self.state = self.module.params['state']
|
||||
self.recursive = self.module.params['recursive']
|
||||
|
||||
# Add the objectClass into the list of attributes
|
||||
self.module.params['attributes']['objectClass'] = (
|
||||
@@ -158,12 +167,29 @@ class LdapEntry(LdapGeneric):
|
||||
return action
|
||||
|
||||
def delete(self):
|
||||
""" If self.dn exists, returns a callable that will delete it. """
|
||||
""" If self.dn exists, returns a callable that will delete either
|
||||
the item itself if the recursive option is not set or the whole branch
|
||||
if it is. """
|
||||
def _delete():
|
||||
self.connection.delete_s(self.dn)
|
||||
|
||||
def _delete_recursive():
|
||||
""" Attempt recurive deletion using the subtree-delete control.
|
||||
If that fails, do it manually. """
|
||||
try:
|
||||
subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805')
|
||||
self.connection.delete_ext_s(self.dn, serverctrls=[subtree_delete])
|
||||
except ldap.NOT_ALLOWED_ON_NONLEAF:
|
||||
search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=('dn',))
|
||||
search.reverse()
|
||||
for entry in search:
|
||||
self.connection.delete_s(entry[0])
|
||||
|
||||
if self._is_entry_present():
|
||||
action = _delete
|
||||
if self.recursive:
|
||||
action = _delete_recursive
|
||||
else:
|
||||
action = _delete
|
||||
else:
|
||||
action = None
|
||||
|
||||
@@ -186,6 +212,7 @@ def main():
|
||||
attributes=dict(default={}, type='dict'),
|
||||
objectClass=dict(type='list', elements='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
recursive=dict(default=False, type='bool'),
|
||||
),
|
||||
required_if=[('state', 'present', ['objectClass'])],
|
||||
supports_check_mode=True,
|
||||
|
||||
@@ -1528,6 +1528,7 @@ class Nmcli(object):
|
||||
'bridge',
|
||||
'dummy',
|
||||
'ethernet',
|
||||
'802-3-ethernet',
|
||||
'generic',
|
||||
'gre',
|
||||
'infiniband',
|
||||
@@ -1536,6 +1537,7 @@ class Nmcli(object):
|
||||
'team',
|
||||
'vlan',
|
||||
'wifi',
|
||||
'802-11-wireless',
|
||||
'gsm',
|
||||
'wireguard',
|
||||
)
|
||||
@@ -1895,6 +1897,12 @@ class Nmcli(object):
|
||||
options = {
|
||||
'connection.interface-name': self.ifname,
|
||||
}
|
||||
|
||||
if not self.type:
|
||||
current_con_type = self.show_connection().get('connection.type')
|
||||
if current_con_type:
|
||||
self.type = current_con_type
|
||||
|
||||
options.update(self.connection_options(detect_change=True))
|
||||
return self._compare_conn_params(self.show_connection(), options)
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ class Npm(object):
|
||||
|
||||
if self.glbl:
|
||||
cmd.append('--global')
|
||||
if self.production and ('install' in cmd or 'update' in cmd):
|
||||
if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd):
|
||||
cmd.append('--production')
|
||||
if self.ignore_scripts:
|
||||
cmd.append('--ignore-scripts')
|
||||
|
||||
@@ -78,6 +78,17 @@ options:
|
||||
If not specified, the module will use C(python -m pipx) to run the tool,
|
||||
using the same Python interpreter as ansible itself.
|
||||
type: path
|
||||
editable:
|
||||
description:
|
||||
- Install the project in editable mode.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 4.6.0
|
||||
pip_args:
|
||||
description:
|
||||
- Arbitrary arguments to pass directly to C(pip).
|
||||
type: str
|
||||
version_added: 4.6.0
|
||||
notes:
|
||||
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
|
||||
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
|
||||
@@ -153,7 +164,9 @@ class PipX(CmdStateModuleHelper):
|
||||
include_injected=dict(type='bool', default=False),
|
||||
index_url=dict(type='str'),
|
||||
python=dict(type='str'),
|
||||
executable=dict(type='path')
|
||||
executable=dict(type='path'),
|
||||
editable=dict(type='bool', default=False),
|
||||
pip_args=dict(type='str'),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['name']),
|
||||
@@ -174,6 +187,8 @@ class PipX(CmdStateModuleHelper):
|
||||
index_url=dict(fmt=('--index-url', '{0}'),),
|
||||
python=dict(fmt=('--python', '{0}'),),
|
||||
_list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN),
|
||||
editable=dict(fmt="--editable", style=ArgFormat.BOOLEAN),
|
||||
pip_args=dict(fmt=('--pip-args', '{0}'),),
|
||||
)
|
||||
check_rc = True
|
||||
run_command_fixed_options = dict(
|
||||
@@ -224,8 +239,9 @@ class PipX(CmdStateModuleHelper):
|
||||
if not self.vars.application or self.vars.force:
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'python',
|
||||
{'name_source': [self.vars.name, self.vars.source]}])
|
||||
self.run_command(params=[
|
||||
'state', 'index_url', 'install_deps', 'force', 'python', 'editable', 'pip_args',
|
||||
{'name_source': [self.vars.name, self.vars.source]}])
|
||||
|
||||
state_present = state_install
|
||||
|
||||
@@ -236,7 +252,7 @@ class PipX(CmdStateModuleHelper):
|
||||
if self.vars.force:
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'name'])
|
||||
self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'editable', 'pip_args', 'name'])
|
||||
|
||||
def state_uninstall(self):
|
||||
if self.vars.application and not self.module.check_mode:
|
||||
@@ -259,7 +275,7 @@ class PipX(CmdStateModuleHelper):
|
||||
if self.vars.force:
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=['state', 'index_url', 'force', 'name', 'inject_packages'])
|
||||
self.run_command(params=['state', 'index_url', 'force', 'editable', 'pip_args', 'name', 'inject_packages'])
|
||||
|
||||
def state_uninstall_all(self):
|
||||
if not self.module.check_mode:
|
||||
|
||||
@@ -43,16 +43,27 @@ options:
|
||||
|
||||
force:
|
||||
description:
|
||||
- When removing package, force remove package, without any checks.
|
||||
Same as `extra_args="--nodeps --nodeps"`.
|
||||
When update_cache, force redownload repo databases.
|
||||
Same as `update_cache_extra_args="--refresh --refresh"`.
|
||||
- When removing packages, forcefully remove them, without any checks.
|
||||
Same as C(extra_args="--nodeps --nodeps").
|
||||
When combined with I(update_cache), force a refresh of all package databases.
|
||||
Same as C(update_cache_extra_args="--refresh --refresh").
|
||||
default: no
|
||||
type: bool
|
||||
|
||||
remove_nosave:
|
||||
description:
|
||||
- When removing packages, do not save modified configuration files as C(.pacsave) files.
|
||||
(passes C(--nosave) to pacman)
|
||||
version_added: 4.6.0
|
||||
default: no
|
||||
type: bool
|
||||
|
||||
executable:
|
||||
description:
|
||||
- Name of binary to use. This can either be C(pacman) or a pacman compatible AUR helper.
|
||||
- Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper.
|
||||
- Pacman compatibility is unfortunately ill defined, in particular, this modules makes
|
||||
extensive use of the C(--print-format) directive which is known not to be implemented by
|
||||
some AUR helpers (notably, C(yay)).
|
||||
- Beware that AUR helpers might behave unexpectedly and are therefore not recommended.
|
||||
default: pacman
|
||||
type: str
|
||||
@@ -70,6 +81,10 @@ options:
|
||||
- This can be run as part of a package installation or as a separate step.
|
||||
- Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0.
|
||||
- If not specified, it defaults to C(false).
|
||||
- Please note that this option will only have an influence on the module's C(changed) state
|
||||
if I(name) and I(upgrade) are not specified. This will change in community.general 5.0.0.
|
||||
See the examples for how to make the module behave as it will in 5.0.0 right now, or how
|
||||
to keep the current behavior with 5.0.0 and later.
|
||||
type: bool
|
||||
aliases: [ update-cache ]
|
||||
|
||||
@@ -101,20 +116,37 @@ notes:
|
||||
|
||||
RETURN = """
|
||||
packages:
|
||||
description: a list of packages that have been changed
|
||||
returned: when upgrade is set to yes
|
||||
description:
|
||||
- A list of packages that have been changed.
|
||||
- Before community.general 4.5.0 this was only returned when I(upgrade=true).
|
||||
In community.general 4.5.0, it was sometimes omitted when the package list is empty,
|
||||
but since community.general 4.6.0 it is always returned when I(name) is specified or
|
||||
I(upgrade=true).
|
||||
returned: success and I(name) is specified or I(upgrade=true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: [ package, other-package ]
|
||||
|
||||
cache_updated:
|
||||
description:
|
||||
- The changed status of C(pacman -Sy).
|
||||
- Useful when I(name) or I(upgrade=true) are specified next to I(update_cache=true).
|
||||
returned: success, when I(update_cache=true)
|
||||
type: bool
|
||||
sample: false
|
||||
version_added: 4.6.0
|
||||
|
||||
stdout:
|
||||
description: Output from pacman.
|
||||
description:
|
||||
- Output from pacman.
|
||||
returned: success, when needed
|
||||
type: str
|
||||
sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..."
|
||||
version_added: 4.1.0
|
||||
|
||||
stderr:
|
||||
description: Error output from pacman.
|
||||
description:
|
||||
- Error output from pacman.
|
||||
returned: success, when needed
|
||||
type: str
|
||||
sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..."
|
||||
@@ -147,6 +179,9 @@ EXAMPLES = """
|
||||
extra_args: --builddir /var/cache/yay
|
||||
|
||||
- name: Upgrade package foo
|
||||
# The 'changed' state of this call will only indicate whether foo was
|
||||
# installed/upgraded, but not on whether the cache was updated. This
|
||||
# will change in community.general 5.0.0!
|
||||
community.general.pacman:
|
||||
name: foo
|
||||
state: latest
|
||||
@@ -174,6 +209,29 @@ EXAMPLES = """
|
||||
upgrade: yes
|
||||
|
||||
- name: Run the equivalent of "pacman -Syu" as a separate step
|
||||
# The 'changed' state of this call will only indicate whether
|
||||
# something was upgraded, but not on whether the cache was
|
||||
# updated. This will change in community.general 5.0.0!
|
||||
#
|
||||
# To keep the old behavior, add the following to the task:
|
||||
#
|
||||
# register: result
|
||||
# changed_when: result.packages | length > 0
|
||||
#
|
||||
# To already switch to the new behavior now, add:
|
||||
#
|
||||
# register: result
|
||||
# changed_when: result is changed or result.cache_updated
|
||||
#
|
||||
# Note that both constructs only work with community.general 4.6.0+.
|
||||
# For compatibility with older versions of community.general, you
|
||||
# have to use
|
||||
#
|
||||
# changed_when: result.packages | default([]) | length > 0
|
||||
#
|
||||
# respectively
|
||||
#
|
||||
# changed_when: result is changed or (result.cache_updated | default(false))
|
||||
community.general.pacman:
|
||||
update_cache: yes
|
||||
upgrade: yes
|
||||
@@ -190,7 +248,22 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from collections import defaultdict, namedtuple
|
||||
|
||||
|
||||
Package = namedtuple("Package", ["name", "source"])
|
||||
class Package(object):
|
||||
def __init__(self, name, source, source_is_URL=False):
|
||||
self.name = name
|
||||
self.source = source
|
||||
self.source_is_URL = source_is_URL
|
||||
|
||||
def __eq__(self, o):
|
||||
return self.name == o.name and self.source == o.source and self.source_is_URL == o.source_is_URL
|
||||
|
||||
def __lt__(self, o):
|
||||
return self.name < o.name
|
||||
|
||||
def __repr__(self):
|
||||
return 'Package("%s", "%s", %s)' % (self.name, self.source, self.source_is_URL)
|
||||
|
||||
|
||||
VersionTuple = namedtuple("VersionTuple", ["current", "latest"])
|
||||
|
||||
|
||||
@@ -209,6 +282,8 @@ class Pacman(object):
|
||||
|
||||
self.pacman_path = self.m.get_bin_path(p["executable"], True)
|
||||
|
||||
self._cached_database = None
|
||||
|
||||
# Normalize for old configs
|
||||
if p["state"] == "installed":
|
||||
self.target_state = "present"
|
||||
@@ -253,6 +328,11 @@ class Pacman(object):
|
||||
if not (self.m.params["name"] or self.m.params["upgrade"]):
|
||||
self.success()
|
||||
|
||||
# Avoid shadowing lack of changes in the following stages
|
||||
# so that `update_cache: true` doesn't always return changed
|
||||
# TODO: remove this in community.general 5.0.0
|
||||
self.changed = False
|
||||
|
||||
self.inventory = self._build_inventory()
|
||||
if self.m.params["upgrade"]:
|
||||
self.upgrade()
|
||||
@@ -273,7 +353,13 @@ class Pacman(object):
|
||||
|
||||
def install_packages(self, pkgs):
|
||||
pkgs_to_install = []
|
||||
pkgs_to_install_from_url = []
|
||||
for p in pkgs:
|
||||
if p.source_is_URL:
|
||||
# URL packages bypass the latest / upgradable_pkgs test
|
||||
# They go through the dry-run to let pacman decide if they will be installed
|
||||
pkgs_to_install_from_url.append(p)
|
||||
continue
|
||||
if (
|
||||
p.name not in self.inventory["installed_pkgs"]
|
||||
or self.target_state == "latest"
|
||||
@@ -281,14 +367,13 @@ class Pacman(object):
|
||||
):
|
||||
pkgs_to_install.append(p)
|
||||
|
||||
if len(pkgs_to_install) == 0:
|
||||
if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0:
|
||||
self.exit_params["packages"] = []
|
||||
self.add_exit_infos("package(s) already installed")
|
||||
return
|
||||
|
||||
self.changed = True
|
||||
cmd_base = [
|
||||
self.pacman_path,
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
@@ -296,53 +381,87 @@ class Pacman(object):
|
||||
if self.m.params["extra_args"]:
|
||||
cmd_base.extend(self.m.params["extra_args"])
|
||||
|
||||
# Dry run first to gather what will be done
|
||||
cmd = cmd_base + ["--print-format", "%n %v"] + [p.source for p in pkgs_to_install]
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("Failed to list package(s) to install", stdout=stdout, stderr=stderr)
|
||||
def _build_install_diff(pacman_verb, pkglist):
|
||||
# Dry run to build the installation diff
|
||||
|
||||
cmd = cmd_base + [pacman_verb, "--print-format", "%n %v"] + [p.source for p in pkglist]
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("Failed to list package(s) to install", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
|
||||
name_ver = [l.strip() for l in stdout.splitlines()]
|
||||
before = []
|
||||
after = []
|
||||
to_be_installed = []
|
||||
for p in name_ver:
|
||||
# With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that.
|
||||
# When installing from URLs, pacman can also output a 'nothing to do' message. strip that too.
|
||||
if "loading packages" in p or "there is nothing to do" in p:
|
||||
continue
|
||||
name, version = p.split()
|
||||
if name in self.inventory["installed_pkgs"]:
|
||||
before.append("%s-%s" % (name, self.inventory["installed_pkgs"][name]))
|
||||
after.append("%s-%s" % (name, version))
|
||||
to_be_installed.append(name)
|
||||
|
||||
return (to_be_installed, before, after)
|
||||
|
||||
name_ver = [l.strip() for l in stdout.splitlines()]
|
||||
before = []
|
||||
after = []
|
||||
installed_pkgs = []
|
||||
self.exit_params["packages"] = []
|
||||
for p in name_ver:
|
||||
name, version = p.split()
|
||||
if name in self.inventory["installed_pkgs"]:
|
||||
before.append("%s-%s" % (name, self.inventory["installed_pkgs"][name]))
|
||||
after.append("%s-%s" % (name, version))
|
||||
installed_pkgs.append(name)
|
||||
|
||||
if pkgs_to_install:
|
||||
p, b, a = _build_install_diff("--sync", pkgs_to_install)
|
||||
installed_pkgs.extend(p)
|
||||
before.extend(b)
|
||||
after.extend(a)
|
||||
if pkgs_to_install_from_url:
|
||||
p, b, a = _build_install_diff("--upgrade", pkgs_to_install_from_url)
|
||||
installed_pkgs.extend(p)
|
||||
before.extend(b)
|
||||
after.extend(a)
|
||||
|
||||
if len(installed_pkgs) == 0:
|
||||
# This can happen with URL packages if pacman decides there's nothing to do
|
||||
self.exit_params["packages"] = []
|
||||
self.add_exit_infos("package(s) already installed")
|
||||
return
|
||||
|
||||
self.changed = True
|
||||
|
||||
self.exit_params["diff"] = {
|
||||
"before": "\n".join(before) + "\n" if before else "",
|
||||
"after": "\n".join(after) + "\n" if after else "",
|
||||
"before": "\n".join(sorted(before)) + "\n" if before else "",
|
||||
"after": "\n".join(sorted(after)) + "\n" if after else "",
|
||||
}
|
||||
|
||||
if self.m.check_mode:
|
||||
self.add_exit_infos("Would have installed %d packages" % len(installed_pkgs))
|
||||
self.exit_params["packages"] = installed_pkgs
|
||||
self.exit_params["packages"] = sorted(installed_pkgs)
|
||||
return
|
||||
|
||||
# actually do it
|
||||
cmd = cmd_base + [p.source for p in pkgs_to_install]
|
||||
def _install_packages_for_real(pacman_verb, pkglist):
|
||||
cmd = cmd_base + [pacman_verb] + [p.source for p in pkglist]
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
self.add_exit_infos(stdout=stdout, stderr=stderr)
|
||||
self._invalidate_database()
|
||||
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("Failed to install package(s)", stdout=stdout, stderr=stderr)
|
||||
if pkgs_to_install:
|
||||
_install_packages_for_real("--sync", pkgs_to_install)
|
||||
if pkgs_to_install_from_url:
|
||||
_install_packages_for_real("--upgrade", pkgs_to_install_from_url)
|
||||
|
||||
self.exit_params["packages"] = installed_pkgs
|
||||
self.add_exit_infos(
|
||||
"Installed %d package(s)" % len(installed_pkgs), stdout=stdout, stderr=stderr
|
||||
)
|
||||
self.add_exit_infos("Installed %d package(s)" % len(installed_pkgs))
|
||||
|
||||
def remove_packages(self, pkgs):
|
||||
force_args = ["--nodeps", "--nodeps"] if self.m.params["force"] else []
|
||||
|
||||
# filter out pkgs that are already absent
|
||||
pkg_names_to_remove = [p.name for p in pkgs if p.name in self.inventory["installed_pkgs"]]
|
||||
|
||||
if len(pkg_names_to_remove) == 0:
|
||||
self.exit_params["packages"] = []
|
||||
self.add_exit_infos("package(s) already absent")
|
||||
return
|
||||
|
||||
@@ -350,10 +469,10 @@ class Pacman(object):
|
||||
self.changed = True
|
||||
|
||||
cmd_base = [self.pacman_path, "--remove", "--noconfirm", "--noprogressbar"]
|
||||
if self.m.params["extra_args"]:
|
||||
cmd_base.extend(self.m.params["extra_args"])
|
||||
if force_args:
|
||||
cmd_base.extend(force_args)
|
||||
cmd_base += self.m.params["extra_args"]
|
||||
cmd_base += ["--nodeps", "--nodeps"] if self.m.params["force"] else []
|
||||
# nosave_args conflicts with --print-format. Added later.
|
||||
# https://github.com/ansible-collections/community.general/issues/4315
|
||||
|
||||
# This is a bit of a TOCTOU but it is better than parsing the output of
|
||||
# pacman -R, which is different depending on the user config (VerbosePkgLists)
|
||||
@@ -362,7 +481,7 @@ class Pacman(object):
|
||||
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("failed to list package(s) to remove", stdout=stdout, stderr=stderr)
|
||||
self.fail("failed to list package(s) to remove", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
|
||||
removed_pkgs = stdout.split()
|
||||
self.exit_params["packages"] = removed_pkgs
|
||||
@@ -376,12 +495,13 @@ class Pacman(object):
|
||||
self.add_exit_infos("Would have removed %d packages" % len(removed_pkgs))
|
||||
return
|
||||
|
||||
# actually do it
|
||||
cmd = cmd_base + pkg_names_to_remove
|
||||
nosave_args = ["--nosave"] if self.m.params["remove_nosave"] else []
|
||||
cmd = cmd_base + nosave_args + pkg_names_to_remove
|
||||
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
self.fail("failed to remove package(s)", stdout=stdout, stderr=stderr)
|
||||
self.fail("failed to remove package(s)", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
self._invalidate_database()
|
||||
self.exit_params["packages"] = removed_pkgs
|
||||
self.add_exit_infos("Removed %d package(s)" % len(removed_pkgs), stdout=stdout, stderr=stderr)
|
||||
|
||||
@@ -410,23 +530,36 @@ class Pacman(object):
|
||||
cmd = [
|
||||
self.pacman_path,
|
||||
"--sync",
|
||||
"--sys-upgrade",
|
||||
"--sysupgrade",
|
||||
"--quiet",
|
||||
"--noconfirm",
|
||||
]
|
||||
if self.m.params["upgrade_extra_args"]:
|
||||
cmd += self.m.params["upgrade_extra_args"]
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
self._invalidate_database()
|
||||
if rc == 0:
|
||||
self.add_exit_infos("System upgraded", stdout=stdout, stderr=stderr)
|
||||
else:
|
||||
self.fail("Could not upgrade", stdout=stdout, stderr=stderr)
|
||||
self.fail("Could not upgrade", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
|
||||
def _list_database(self):
|
||||
"""runs pacman --sync --list with some caching"""
|
||||
if self._cached_database is None:
|
||||
dummy, packages, dummy = self.m.run_command([self.pacman_path, '--sync', '--list'], check_rc=True)
|
||||
self._cached_database = packages.splitlines()
|
||||
return self._cached_database
|
||||
|
||||
def _invalidate_database(self):
|
||||
"""invalidates the pacman --sync --list cache"""
|
||||
self._cached_database = None
|
||||
|
||||
def update_package_db(self):
|
||||
"""runs pacman --sync --refresh"""
|
||||
if self.m.check_mode:
|
||||
self.add_exit_infos("Would have updated the package db")
|
||||
self.changed = True
|
||||
self.exit_params["cache_updated"] = True
|
||||
return
|
||||
|
||||
cmd = [
|
||||
@@ -438,15 +571,28 @@ class Pacman(object):
|
||||
cmd += self.m.params["update_cache_extra_args"]
|
||||
if self.m.params["force"]:
|
||||
cmd += ["--refresh"]
|
||||
else:
|
||||
# Dump package database to get contents before update
|
||||
pre_state = sorted(self._list_database())
|
||||
|
||||
rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
|
||||
self._invalidate_database()
|
||||
|
||||
self.changed = True
|
||||
if self.m.params["force"]:
|
||||
# Always changed when force=true
|
||||
self.exit_params["cache_updated"] = True
|
||||
else:
|
||||
# Dump package database to get contents after update
|
||||
post_state = sorted(self._list_database())
|
||||
# If contents changed, set changed=true
|
||||
self.exit_params["cache_updated"] = pre_state != post_state
|
||||
if self.exit_params["cache_updated"]:
|
||||
self.changed = True
|
||||
|
||||
if rc == 0:
|
||||
self.add_exit_infos("Updated package db", stdout=stdout, stderr=stderr)
|
||||
else:
|
||||
self.fail("could not update package db", stdout=stdout, stderr=stderr)
|
||||
self.fail("could not update package db", cmd=cmd, stdout=stdout, stderr=stderr)
|
||||
|
||||
def package_list(self):
|
||||
"""Takes the input package list and resolves packages groups to their package list using the inventory,
|
||||
@@ -459,6 +605,7 @@ class Pacman(object):
|
||||
if not pkg:
|
||||
continue
|
||||
|
||||
is_URL = False
|
||||
if pkg in self.inventory["available_groups"]:
|
||||
# Expand group members
|
||||
for group_member in self.inventory["available_groups"][pkg]:
|
||||
@@ -488,8 +635,11 @@ class Pacman(object):
|
||||
stderr=stderr,
|
||||
rc=rc,
|
||||
)
|
||||
# With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that
|
||||
stdout = stdout.replace("loading packages...\n", "")
|
||||
is_URL = True
|
||||
pkg_name = stdout.strip()
|
||||
pkg_list.append(Package(name=pkg_name, source=pkg))
|
||||
pkg_list.append(Package(name=pkg_name, source=pkg, source_is_URL=is_URL))
|
||||
|
||||
return pkg_list
|
||||
|
||||
@@ -519,7 +669,7 @@ class Pacman(object):
|
||||
|
||||
installed_groups = defaultdict(set)
|
||||
dummy, stdout, dummy = self.m.run_command(
|
||||
[self.pacman_path, "--query", "--group"], check_rc=True
|
||||
[self.pacman_path, "--query", "--groups"], check_rc=True
|
||||
)
|
||||
# Format of lines:
|
||||
# base-devel file
|
||||
@@ -533,9 +683,9 @@ class Pacman(object):
|
||||
installed_groups[group].add(pkgname)
|
||||
|
||||
available_pkgs = {}
|
||||
dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--sync", "--list"], check_rc=True)
|
||||
database = self._list_database()
|
||||
# Format of a line: "core pacman 6.0.1-2"
|
||||
for l in stdout.splitlines():
|
||||
for l in database:
|
||||
l = l.strip()
|
||||
if not l:
|
||||
continue
|
||||
@@ -544,7 +694,7 @@ class Pacman(object):
|
||||
|
||||
available_groups = defaultdict(set)
|
||||
dummy, stdout, dummy = self.m.run_command(
|
||||
[self.pacman_path, "--sync", "--group", "--group"], check_rc=True
|
||||
[self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True
|
||||
)
|
||||
# Format of lines:
|
||||
# vim-plugins vim-airline
|
||||
@@ -613,6 +763,7 @@ def setup_module():
|
||||
choices=["present", "installed", "latest", "absent", "removed"],
|
||||
),
|
||||
force=dict(type="bool", default=False),
|
||||
remove_nosave=dict(type="bool", default=False),
|
||||
executable=dict(type="str", default="pacman"),
|
||||
extra_args=dict(type="str", default=""),
|
||||
upgrade=dict(type="bool"),
|
||||
|
||||
@@ -128,6 +128,13 @@ options:
|
||||
description:
|
||||
- Adds C(--replacefiles) option to I(zypper) install/update command.
|
||||
version_added: '0.2.0'
|
||||
clean_deps:
|
||||
type: bool
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- Adds C(--clean-deps) option to I(zypper) remove command.
|
||||
version_added: '4.6.0'
|
||||
notes:
|
||||
- When used with a `loop:` each package will be processed individually,
|
||||
it is much more efficient to pass the list directly to the `name` option.
|
||||
@@ -368,6 +375,9 @@ def get_cmd(m, subcommand):
|
||||
cmd.append('--oldpackage')
|
||||
if m.params['replacefiles']:
|
||||
cmd.append('--replacefiles')
|
||||
if subcommand == 'remove':
|
||||
if m.params['clean_deps']:
|
||||
cmd.append('--clean-deps')
|
||||
if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
|
||||
cmd.append('--allow-vendor-change')
|
||||
if m.params['extra_args']:
|
||||
@@ -518,7 +528,8 @@ def main():
|
||||
oldpackage=dict(required=False, default=False, type='bool'),
|
||||
extra_args=dict(required=False, default=None),
|
||||
allow_vendor_change=dict(required=False, default=False, type='bool'),
|
||||
replacefiles=dict(required=False, default=False, type='bool')
|
||||
replacefiles=dict(required=False, default=False, type='bool'),
|
||||
clean_deps=dict(required=False, default=False, type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2021, Alexei Znamensky (@russoz) <russoz@gmail.com>
|
||||
# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
@@ -95,13 +96,15 @@ msg:
|
||||
type: str
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import (
|
||||
CmdModuleHelper, ArgFormat, ModuleHelperException
|
||||
)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
|
||||
class MkSysB(CmdModuleHelper):
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
backup_crypt_files=dict(type='bool', default=True),
|
||||
backup_dmapi_fs=dict(type='bool', default=True),
|
||||
@@ -117,85 +120,41 @@ def main():
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
command = ['mksysb', '-X']
|
||||
command_args_formats = dict(
|
||||
create_map_files=dict(fmt="-m", style=ArgFormat.BOOLEAN),
|
||||
use_snapshot=dict(fmt="-T", style=ArgFormat.BOOLEAN),
|
||||
exclude_files=dict(fmt="-e", style=ArgFormat.BOOLEAN),
|
||||
exclude_wpar_files=dict(fmt="-G", style=ArgFormat.BOOLEAN),
|
||||
new_image_data=dict(fmt="-i", style=ArgFormat.BOOLEAN),
|
||||
software_packing=dict(fmt="-p", style=ArgFormat.BOOLEAN_NOT),
|
||||
extended_attrs=dict(fmt="-a", style=ArgFormat.BOOLEAN),
|
||||
backup_crypt_files=dict(fmt="-Z", style=ArgFormat.BOOLEAN_NOT),
|
||||
backup_dmapi_fs=dict(fmt="-A", style=ArgFormat.BOOLEAN),
|
||||
combined_path=dict(fmt=lambda p, n: ["%s/%s" % (p, n)], stars=1)
|
||||
)
|
||||
|
||||
# Command options.
|
||||
map_file_opt = {
|
||||
True: '-m',
|
||||
False: ''
|
||||
}
|
||||
def __init_module__(self):
|
||||
if not os.path.isdir(self.vars.storage_path):
|
||||
raise ModuleHelperException("Storage path %s is not valid." % self.vars.storage_path)
|
||||
|
||||
use_snapshot_opt = {
|
||||
True: '-T',
|
||||
False: ''
|
||||
}
|
||||
def __run__(self):
|
||||
if not self.module.check_mode:
|
||||
self.run_command(params=[
|
||||
'create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing',
|
||||
'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data',
|
||||
{'combined_path': [self.vars.storage_path, self.vars.name]},
|
||||
])
|
||||
self._changed = True
|
||||
|
||||
exclude_files_opt = {
|
||||
True: '-e',
|
||||
False: ''
|
||||
}
|
||||
def process_command_output(self, rc, out, err):
|
||||
if rc != 0:
|
||||
raise ModuleHelperException("mksysb failed.")
|
||||
self.vars.msg = out
|
||||
|
||||
exclude_wpar_opt = {
|
||||
True: '-G',
|
||||
False: ''
|
||||
}
|
||||
|
||||
new_image_data_opt = {
|
||||
True: '-i',
|
||||
False: ''
|
||||
}
|
||||
|
||||
soft_packing_opt = {
|
||||
True: '',
|
||||
False: '-p'
|
||||
}
|
||||
|
||||
extend_attr_opt = {
|
||||
True: '',
|
||||
False: '-a'
|
||||
}
|
||||
|
||||
crypt_files_opt = {
|
||||
True: '',
|
||||
False: '-Z'
|
||||
}
|
||||
|
||||
dmapi_fs_opt = {
|
||||
True: '-a',
|
||||
False: ''
|
||||
}
|
||||
|
||||
backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
|
||||
backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
|
||||
create_map_files = map_file_opt[module.params['create_map_files']]
|
||||
exclude_files = exclude_files_opt[module.params['exclude_files']]
|
||||
exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
|
||||
extended_attrs = extend_attr_opt[module.params['extended_attrs']]
|
||||
name = module.params['name']
|
||||
new_image_data = new_image_data_opt[module.params['new_image_data']]
|
||||
software_packing = soft_packing_opt[module.params['software_packing']]
|
||||
storage_path = module.params['storage_path']
|
||||
use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
|
||||
|
||||
# Validate if storage_path is a valid directory.
|
||||
if os.path.isdir(storage_path):
|
||||
if not module.check_mode:
|
||||
# Generates the mksysb image backup.
|
||||
mksysb_cmd = module.get_bin_path('mksysb', True)
|
||||
rc, mksysb_output, err = module.run_command(
|
||||
"%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
|
||||
mksysb_cmd, create_map_files, use_snapshot, exclude_files,
|
||||
exclude_wpar_files, software_packing, extended_attrs,
|
||||
backup_crypt_files, backup_dmapi_fs, new_image_data,
|
||||
storage_path, name))
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True, msg=mksysb_output)
|
||||
else:
|
||||
module.fail_json(msg="mksysb failed.", rc=rc, err=err)
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Storage path %s is not valid." % storage_path)
|
||||
def main():
|
||||
MkSysB.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -28,7 +28,7 @@ options:
|
||||
description:
|
||||
- The port on which the iSCSI target process listens.
|
||||
type: str
|
||||
default: 3260
|
||||
default: '3260'
|
||||
target:
|
||||
description:
|
||||
- The iSCSI target name.
|
||||
|
||||
@@ -20,7 +20,6 @@ options:
|
||||
- The domain that will be added or removed from the list of permissive domains.
|
||||
type: str
|
||||
required: true
|
||||
default: ''
|
||||
aliases: [ name ]
|
||||
permissive:
|
||||
description:
|
||||
|
||||
@@ -158,6 +158,7 @@ options:
|
||||
- This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
|
||||
(possibly after merging with other required data, as when passed to create). See examples for more information,
|
||||
and the JIRA REST API for the structure required for various fields.
|
||||
- When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example.
|
||||
- Note that JIRA may not allow changing field values on specific transitions or states.
|
||||
|
||||
jql:
|
||||
@@ -261,6 +262,20 @@ EXAMPLES = r"""
|
||||
type: role
|
||||
value: Developers
|
||||
|
||||
- name: Comment on issue with property to mark it internal
|
||||
community.general.jira:
|
||||
uri: '{{ server }}'
|
||||
username: '{{ user }}'
|
||||
password: '{{ pass }}'
|
||||
issue: '{{ issue.meta.key }}'
|
||||
operation: comment
|
||||
comment: A comment added by Ansible
|
||||
fields:
|
||||
properties:
|
||||
- key: 'sd.public.comment'
|
||||
value:
|
||||
internal: true
|
||||
|
||||
# Assign an existing issue using edit
|
||||
- name: Assign an issue using free-form fields
|
||||
community.general.jira:
|
||||
@@ -502,6 +517,10 @@ class JIRA(StateModuleHelper):
|
||||
if self.vars.comment_visibility is not None:
|
||||
data['visibility'] = self.vars.comment_visibility
|
||||
|
||||
# Use 'fields' to merge in any additional data
|
||||
if self.vars.fields:
|
||||
data.update(self.vars.fields)
|
||||
|
||||
url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment'
|
||||
self.vars.meta = self.post(url, data)
|
||||
|
||||
|
||||
@@ -2,4 +2,3 @@ destructive
|
||||
shippable/posix/group3
|
||||
skip/python2.6
|
||||
context/controller # While this is not really true, this module mainly is run on the controller, *and* needs access to the ansible-galaxy CLI tool
|
||||
disabled # FIXME
|
||||
|
||||
3
tests/integration/targets/cargo/meta/main.yml
Normal file
3
tests/integration/targets/cargo/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
3
tests/integration/targets/django_manage/meta/main.yml
Normal file
3
tests/integration/targets/django_manage/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
@@ -29,7 +29,6 @@
|
||||
include_tasks: sparse.yml
|
||||
when:
|
||||
- not (ansible_os_family == 'Darwin' and ansible_distribution_version is version('11', '<'))
|
||||
- not (ansible_os_family == 'Alpine') # TODO figure out why it fails
|
||||
|
||||
- name: Include tasks to test playing with symlinks
|
||||
include_tasks: symlinks.yml
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
- setup_postgresql_db
|
||||
|
||||
7
tests/integration/targets/pacman/aliases
Normal file
7
tests/integration/targets/pacman/aliases
Normal file
@@ -0,0 +1,7 @@
|
||||
destructive
|
||||
shippable/posix/group1
|
||||
skip/aix
|
||||
skip/freebsd
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/rhel
|
||||
3
tests/integration/targets/pacman/meta/main.yml
Normal file
3
tests/integration/targets/pacman/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
dependencies:
|
||||
- setup_remote_tmp_dir
|
||||
- setup_pkg_mgr
|
||||
82
tests/integration/targets/pacman/tasks/basic.yml
Normal file
82
tests/integration/targets/pacman/tasks/basic.yml
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
- vars:
|
||||
package_name: unarj
|
||||
block:
|
||||
- name: Make sure that {{ package_name }} is not installed
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
|
||||
- name: Install {{ package_name }} (check mode)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
check_mode: true
|
||||
register: install_1
|
||||
|
||||
- name: Install {{ package_name }}
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
register: install_2
|
||||
|
||||
- name: Install {{ package_name }} (check mode, idempotent)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
check_mode: true
|
||||
register: install_3
|
||||
|
||||
- name: Install {{ package_name }} (idempotent)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
register: install_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- install_1 is changed
|
||||
- install_1.msg == 'Would have installed 1 packages'
|
||||
- install_2 is changed
|
||||
- install_2.msg == 'Installed 1 package(s)'
|
||||
- install_3 is not changed
|
||||
- install_3.msg == 'package(s) already installed'
|
||||
- install_4 is not changed
|
||||
- install_4.msg == 'package(s) already installed'
|
||||
|
||||
- name: Uninstall {{ package_name }} (check mode)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
check_mode: true
|
||||
register: uninstall_1
|
||||
|
||||
- name: Uninstall {{ package_name }}
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
register: uninstall_2
|
||||
|
||||
- name: Uninstall {{ package_name }} (check mode, idempotent)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
check_mode: true
|
||||
register: uninstall_3
|
||||
|
||||
- name: Uninstall {{ package_name }} (idempotent)
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
register: uninstall_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- uninstall_1 is changed
|
||||
- uninstall_1.msg == 'Would have removed 1 packages'
|
||||
- uninstall_2 is changed
|
||||
- uninstall_2.msg == 'Removed 1 package(s)'
|
||||
- uninstall_3 is not changed
|
||||
- uninstall_3.msg == 'package(s) already absent'
|
||||
- uninstall_4 is not changed
|
||||
- uninstall_4.msg == 'package(s) already absent'
|
||||
13
tests/integration/targets/pacman/tasks/main.yml
Normal file
13
tests/integration/targets/pacman/tasks/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
####################################################################
|
||||
# WARNING: These are designed specifically for Ansible tests #
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- when: ansible_os_family == 'Archlinux'
|
||||
block:
|
||||
# Add more tests here by including more task files:
|
||||
- include: 'basic.yml'
|
||||
- include: 'package_urls.yml'
|
||||
- include: 'remove_nosave.yml'
|
||||
- include: 'update_cache.yml'
|
||||
125
tests/integration/targets/pacman/tasks/package_urls.yml
Normal file
125
tests/integration/targets/pacman/tasks/package_urls.yml
Normal file
@@ -0,0 +1,125 @@
|
||||
---
|
||||
- vars:
|
||||
reg_pkg: ed
|
||||
url_pkg: lemon
|
||||
file_pkg: hdparm
|
||||
file_pkg_path: /tmp/pkg.zst
|
||||
extra_pkg: core/sdparm
|
||||
extra_pkg_outfmt: sdparm
|
||||
block:
|
||||
- name: Make sure that test packages are not installed
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg}}'
|
||||
- '{{file_pkg}}'
|
||||
- '{{extra_pkg}}'
|
||||
state: absent
|
||||
|
||||
- name: Get URL for {{url_pkg}}
|
||||
command:
|
||||
cmd: pacman --sync --print-format "%l" {{url_pkg}}
|
||||
register: url_pkg_url
|
||||
|
||||
- name: Get URL for {{file_pkg}}
|
||||
command:
|
||||
cmd: pacman --sync --print-format "%l" {{file_pkg}}
|
||||
register: file_pkg_url
|
||||
- name: Download {{file_pkg}} pkg
|
||||
get_url:
|
||||
url: '{{file_pkg_url.stdout}}'
|
||||
dest: '{{file_pkg_path}}'
|
||||
|
||||
- name: Install packages from mixed sources (check mode)
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
check_mode: True
|
||||
register: install_1
|
||||
|
||||
- name: Install packages from mixed sources
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
register: install_2
|
||||
|
||||
- name: Install packages from mixed sources - (idempotency)
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
register: install_3
|
||||
|
||||
- name: Install packages with their regular names (idempotency)
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg}}'
|
||||
- '{{file_pkg}}'
|
||||
register: install_4
|
||||
|
||||
- name: Install new package with already installed packages from mixed sources
|
||||
pacman:
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
- '{{extra_pkg}}'
|
||||
register: install_5
|
||||
|
||||
- name: Uninstall packages - mixed sources (check mode)
|
||||
pacman:
|
||||
state: absent
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
check_mode: True
|
||||
register: uninstall_1
|
||||
|
||||
- name: Uninstall packages - mixed sources
|
||||
pacman:
|
||||
state: absent
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
register: uninstall_2
|
||||
|
||||
- name: Uninstall packages - mixed sources (idempotency)
|
||||
pacman:
|
||||
state: absent
|
||||
name:
|
||||
- '{{reg_pkg}}'
|
||||
- '{{url_pkg_url.stdout}}'
|
||||
- '{{file_pkg_path}}'
|
||||
register: uninstall_3
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- install_1 is changed
|
||||
- install_1.msg == 'Would have installed 3 packages'
|
||||
- install_1.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
|
||||
- install_2 is changed
|
||||
- install_2.msg == 'Installed 3 package(s)'
|
||||
- install_1.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
|
||||
- install_3 is not changed
|
||||
- install_3.msg == 'package(s) already installed'
|
||||
- install_4 is not changed
|
||||
- install_4.msg == 'package(s) already installed'
|
||||
- install_5 is changed
|
||||
- install_5.msg == 'Installed 1 package(s)'
|
||||
- install_5.packages == [extra_pkg_outfmt]
|
||||
- uninstall_1 is changed
|
||||
- uninstall_1.msg == 'Would have removed 3 packages'
|
||||
- uninstall_1.packages | length() == 3 # pkgs have versions here
|
||||
- uninstall_2 is changed
|
||||
- uninstall_2.msg == 'Removed 3 package(s)'
|
||||
- uninstall_2.packages | length() == 3
|
||||
- uninstall_3 is not changed
|
||||
- uninstall_3.msg == 'package(s) already absent'
|
||||
70
tests/integration/targets/pacman/tasks/remove_nosave.yml
Normal file
70
tests/integration/targets/pacman/tasks/remove_nosave.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
- vars:
|
||||
package_name: xinetd
|
||||
config_file: /etc/xinetd.conf
|
||||
block:
|
||||
- name: Make sure that {{ package_name }} is not installed
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
- name: Make sure {{config_file}}.pacsave file doesn't exist
|
||||
file:
|
||||
path: '{{config_file}}.pacsave'
|
||||
state: absent
|
||||
|
||||
- name: Install {{ package_name }}
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
|
||||
- name: Modify {{config_file}}
|
||||
blockinfile:
|
||||
path: '{{config_file}}'
|
||||
block: |
|
||||
# something something
|
||||
# on 2 lines
|
||||
|
||||
- name: Remove {{ package_name }} - generate pacsave
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: absent
|
||||
- name: Make sure {{config_file}}.pacsave exists
|
||||
stat:
|
||||
path: '{{config_file}}.pacsave'
|
||||
register: pacsave_st_1
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- pacsave_st_1.stat.exists
|
||||
|
||||
- name: Delete {{config_file}}.pacsave
|
||||
file:
|
||||
path: '{{config_file}}.pacsave'
|
||||
state: absent
|
||||
|
||||
- name: Install {{ package_name }}
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
state: present
|
||||
|
||||
- name: Modify {{config_file}}
|
||||
blockinfile:
|
||||
path: '{{config_file}}'
|
||||
block: |
|
||||
# something something
|
||||
# on 2 lines
|
||||
|
||||
- name: Remove {{ package_name }} - nosave
|
||||
pacman:
|
||||
name: '{{ package_name }}'
|
||||
remove_nosave: yes
|
||||
state: absent
|
||||
|
||||
- name: Make sure {{config_file}}.pacsave does not exist
|
||||
stat:
|
||||
path: '{{config_file}}.pacsave'
|
||||
register: pacsave_st_2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not pacsave_st_2.stat.exists
|
||||
23
tests/integration/targets/pacman/tasks/update_cache.yml
Normal file
23
tests/integration/targets/pacman/tasks/update_cache.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Make sure package cache is updated
|
||||
pacman:
|
||||
update_cache: true
|
||||
|
||||
- name: Update package cache again (should not be changed)
|
||||
pacman:
|
||||
update_cache: true
|
||||
register: update_cache_idem
|
||||
|
||||
- name: Update package cache again with force=true (should be changed)
|
||||
pacman:
|
||||
update_cache: true
|
||||
force: true
|
||||
register: update_cache_force
|
||||
|
||||
- name: Check conditions
|
||||
assert:
|
||||
that:
|
||||
- update_cache_idem is not changed
|
||||
- update_cache_idem.cache_updated == false
|
||||
- update_cache_force is changed
|
||||
- update_cache_force.cache_updated == true
|
||||
@@ -1 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
|
||||
19
tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml
Normal file
19
tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
# Since Arch Linux is a rolling distribution, it regularly needs its packages upgraded, otherwise some tests might
|
||||
# stop working due to conflicts during package installation. Since there is no good way to do this on container
|
||||
# startup time, we use the setup_pkg_mgr setup role to do this once per CI run (hopefully). In case the Arch Linux
|
||||
# tests are run outside of a container, we're using a date-based tag (see below) to avoid this running more than
|
||||
# once per day.
|
||||
|
||||
- name: Create tag
|
||||
copy:
|
||||
dest: /tmp/.ansible_archlinux_sysupgrade_tag
|
||||
content: |
|
||||
Last ArchLinux system upgrade by integration tests was done on {{ ansible_facts.date_time.date }}.
|
||||
register: archlinux_upgrade_tag
|
||||
|
||||
- name: Upgrade all packages
|
||||
pacman:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
when: archlinux_upgrade_tag is changed
|
||||
@@ -16,9 +16,20 @@
|
||||
cacheable: yes
|
||||
when: ansible_os_family == "Suse"
|
||||
|
||||
- set_fact:
|
||||
pkg_mgr: community.general.pacman
|
||||
ansible_pkg_mgr: community.general.pacman
|
||||
cacheable: yes
|
||||
when: ansible_os_family == "Archlinux"
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*.repo
|
||||
sed -i 's%#baseurl=http://mirror.centos.org/$contentdir/$releasever/%baseurl=https://vault.centos.org/8.4.2105/%g' /etc/yum.repos.d/CentOS-Linux-*.repo
|
||||
ignore_errors: true # This fails for CentOS Stream 8
|
||||
when: ansible_distribution in 'CentOS' and ansible_distribution_major_version == '8'
|
||||
|
||||
- when: ansible_os_family == "Archlinux"
|
||||
block:
|
||||
- name: ArchLinux specific setup
|
||||
include: archlinux.yml
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
|
||||
3
tests/integration/targets/timezone/meta/main.yml
Normal file
3
tests/integration/targets/timezone/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
@@ -49,6 +49,12 @@
|
||||
state: present
|
||||
when: ansible_distribution == 'Archlinux'
|
||||
|
||||
- name: make sure tzdata is installed on Alpine
|
||||
package:
|
||||
name: tzdata
|
||||
state: present
|
||||
when: ansible_distribution == 'Alpine'
|
||||
|
||||
- name: make sure the dbus service is started under systemd
|
||||
systemd:
|
||||
name: dbus
|
||||
|
||||
@@ -48,35 +48,6 @@ def test_missing_access_token_lookup(inventory):
|
||||
assert 'Could not retrieve Linode access token' in error_message
|
||||
|
||||
|
||||
def test_validate_option(inventory):
|
||||
assert ['eu-west'] == inventory._validate_option('regions', list, 'eu-west')
|
||||
assert ['eu-west'] == inventory._validate_option('regions', list, ['eu-west'])
|
||||
assert 'api' == inventory._validate_option('ip_style', str, 'api')
|
||||
|
||||
|
||||
def test_validation_option_bad_option(inventory):
|
||||
with pytest.raises(AnsibleParserError) as error_message:
|
||||
inventory._validate_option('regions', dict, [])
|
||||
assert "The option filters ([]) must be a <class 'dict'>" == error_message
|
||||
|
||||
|
||||
def test_empty_config_query_options(inventory):
|
||||
regions, types, tags = inventory._get_query_options({})
|
||||
assert regions == types == tags == []
|
||||
|
||||
|
||||
def test_config_query_options(inventory):
|
||||
regions, types, tags = inventory._get_query_options({
|
||||
'regions': ['eu-west', 'us-east'],
|
||||
'types': ['g5-standard-2', 'g6-standard-2'],
|
||||
'tags': ['web-server'],
|
||||
})
|
||||
|
||||
assert regions == ['eu-west', 'us-east']
|
||||
assert types == ['g5-standard-2', 'g6-standard-2']
|
||||
assert tags == ['web-server']
|
||||
|
||||
|
||||
def test_verify_file(tmp_path, inventory):
|
||||
file = tmp_path / "foobar.linode.yml"
|
||||
file.touch()
|
||||
|
||||
@@ -522,7 +522,7 @@ def get_json(url):
|
||||
}
|
||||
|
||||
|
||||
def get_vm_snapshots(node, vmtype, vmid, name):
|
||||
def get_vm_snapshots(node, properties, vmtype, vmid, name):
|
||||
return [
|
||||
{"description": "",
|
||||
"name": "clean",
|
||||
@@ -537,7 +537,7 @@ def get_vm_snapshots(node, vmtype, vmid, name):
|
||||
}]
|
||||
|
||||
|
||||
def get_vm_status(node, vmtype, vmid, name):
|
||||
def get_vm_status(properties, node, vmtype, vmid, name):
|
||||
return True
|
||||
|
||||
|
||||
@@ -559,6 +559,9 @@ def test_populate(inventory, mocker):
|
||||
inventory.proxmox_user = 'root@pam'
|
||||
inventory.proxmox_password = 'password'
|
||||
inventory.proxmox_url = 'https://localhost:8006'
|
||||
inventory.group_prefix = 'proxmox_'
|
||||
inventory.facts_prefix = 'proxmox_'
|
||||
inventory.strict = False
|
||||
|
||||
# bypass authentication and API fetch calls
|
||||
inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
|
||||
@@ -566,6 +569,7 @@ def test_populate(inventory, mocker):
|
||||
inventory._get_vm_status = mocker.MagicMock(side_effect=get_vm_status)
|
||||
inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
|
||||
inventory.get_option = mocker.MagicMock(side_effect=get_option)
|
||||
inventory._can_add_host = mocker.MagicMock(return_value=True)
|
||||
inventory._populate()
|
||||
|
||||
# get different hosts
|
||||
|
||||
@@ -188,6 +188,48 @@ ipv6.ignore-auto-dns: no
|
||||
ipv6.ignore-auto-routes: no
|
||||
"""
|
||||
|
||||
TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC = [
|
||||
{
|
||||
'type': 'ethernet',
|
||||
'conn_name': 'non_existent_nw_device',
|
||||
'routes4': ['192.168.200.0/24 192.168.1.1'],
|
||||
'route_metric4': 10,
|
||||
'state': 'present',
|
||||
'_ansible_check_mode': False,
|
||||
},
|
||||
]
|
||||
|
||||
TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT = """\
|
||||
connection.id: non_existent_nw_device
|
||||
connection.interface-name: ethernet_non_existant
|
||||
connection.autoconnect: yes
|
||||
ipv4.method: manual
|
||||
ipv4.addresses: 192.168.1.10
|
||||
ipv4.routes: { ip = 192.168.200.0/24, nh = 192.168.1.1 }
|
||||
ipv4.route-metric: 10
|
||||
"""
|
||||
|
||||
TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC = [
|
||||
{
|
||||
'type': 'ethernet',
|
||||
'conn_name': 'non_existent_nw_device',
|
||||
'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'],
|
||||
'route_metric6': 10,
|
||||
'state': 'present',
|
||||
'_ansible_check_mode': False,
|
||||
},
|
||||
]
|
||||
|
||||
TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT = """\
|
||||
connection.id: non_existent_nw_device
|
||||
connection.interface-name: ethernet_non_existant
|
||||
connection.autoconnect: yes
|
||||
ipv6.method: manual
|
||||
ipv6.addresses: 2001:beef:cafe:10::1/64
|
||||
ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }
|
||||
ipv6.route-metric 10
|
||||
"""
|
||||
|
||||
TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES = [
|
||||
{
|
||||
'type': 'ethernet',
|
||||
@@ -1273,6 +1315,28 @@ def mocked_ethernet_connection_with_ipv6_static_address_static_route_create(mock
|
||||
))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify(mocker):
|
||||
mocker_set(mocker,
|
||||
connection_exists=True,
|
||||
execute_return=None,
|
||||
execute_side_effect=(
|
||||
(0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""),
|
||||
(0, "", ""),
|
||||
))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify(mocker):
|
||||
mocker_set(mocker,
|
||||
connection_exists=True,
|
||||
execute_return=None,
|
||||
execute_side_effect=(
|
||||
(0, TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""),
|
||||
(0, "", ""),
|
||||
))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_create(mocker):
|
||||
mocker_set(mocker,
|
||||
@@ -2424,6 +2488,36 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_
|
||||
assert not results['changed']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module'])
|
||||
def test_ethernet_connection_static_ipv4_address_static_route_with_metric_modify(
|
||||
mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify, capfd):
|
||||
"""
|
||||
Test : Modify ethernet connection with static IPv4 address and static route
|
||||
"""
|
||||
with pytest.raises(SystemExit):
|
||||
nmcli.main()
|
||||
|
||||
arg_list = nmcli.Nmcli.execute_command.call_args_list
|
||||
add_args, add_kw = arg_list[1]
|
||||
|
||||
assert add_args[0][0] == '/usr/bin/nmcli'
|
||||
assert add_args[0][1] == 'con'
|
||||
assert add_args[0][2] == 'modify'
|
||||
assert add_args[0][3] == 'non_existent_nw_device'
|
||||
|
||||
add_args_text = list(map(to_text, add_args[0]))
|
||||
|
||||
for param in ['ipv4.routes', '192.168.200.0/24 192.168.1.1',
|
||||
'ipv4.route-metric', '10']:
|
||||
assert param in add_args_text
|
||||
|
||||
out, err = capfd.readouterr()
|
||||
results = json.loads(out)
|
||||
|
||||
assert results.get('changed') is True
|
||||
assert not results.get('failed')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE, indirect=['patch_ansible_module'])
|
||||
def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd):
|
||||
"""
|
||||
@@ -2459,6 +2553,36 @@ def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethe
|
||||
assert results['changed']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module'])
|
||||
def test_ethernet_connection_static_ipv6_address_static_route_metric_modify(
|
||||
mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify, capfd):
|
||||
"""
|
||||
Test : Modify ethernet connection with static IPv6 address and static route
|
||||
"""
|
||||
with pytest.raises(SystemExit):
|
||||
nmcli.main()
|
||||
|
||||
arg_list = nmcli.Nmcli.execute_command.call_args_list
|
||||
add_args, add_kw = arg_list[1]
|
||||
|
||||
assert add_args[0][0] == '/usr/bin/nmcli'
|
||||
assert add_args[0][1] == 'con'
|
||||
assert add_args[0][2] == 'modify'
|
||||
assert add_args[0][3] == 'non_existent_nw_device'
|
||||
|
||||
add_args_text = list(map(to_text, add_args[0]))
|
||||
|
||||
for param in ['ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2',
|
||||
'ipv6.route-metric', '10']:
|
||||
assert param in add_args_text
|
||||
|
||||
out, err = capfd.readouterr()
|
||||
results = json.loads(out)
|
||||
|
||||
assert results.get('changed') is True
|
||||
assert not results.get('failed')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES, indirect=['patch_ansible_module'])
|
||||
def test_ethernet_connection_static_ipv6_address_multiple_static_routes_with_metric_create(
|
||||
mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create, capfd):
|
||||
|
||||
@@ -188,3 +188,75 @@ class NPMModuleTestCase(ModuleTestCase):
|
||||
call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
|
||||
call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None),
|
||||
])
|
||||
|
||||
def test_present_package_json(self):
|
||||
set_module_args({
|
||||
'global': 'true',
|
||||
'state': 'present'
|
||||
})
|
||||
self.module_main_command.side_effect = [
|
||||
(0, '{}', ''),
|
||||
(0, '{}', ''),
|
||||
]
|
||||
|
||||
result = self.module_main(AnsibleExitJson)
|
||||
|
||||
self.assertTrue(result['changed'])
|
||||
self.module_main_command.assert_has_calls([
|
||||
call(['/testbin/npm', 'install', '--global'], check_rc=True, cwd=None),
|
||||
])
|
||||
|
||||
def test_present_package_json_production(self):
|
||||
set_module_args({
|
||||
'production': 'true',
|
||||
'global': 'true',
|
||||
'state': 'present',
|
||||
})
|
||||
self.module_main_command.side_effect = [
|
||||
(0, '{}', ''),
|
||||
(0, '{}', ''),
|
||||
]
|
||||
|
||||
result = self.module_main(AnsibleExitJson)
|
||||
|
||||
self.assertTrue(result['changed'])
|
||||
self.module_main_command.assert_has_calls([
|
||||
call(['/testbin/npm', 'install', '--global', '--production'], check_rc=True, cwd=None),
|
||||
])
|
||||
|
||||
def test_present_package_json_ci(self):
|
||||
set_module_args({
|
||||
'ci': 'true',
|
||||
'global': 'true',
|
||||
'state': 'present'
|
||||
})
|
||||
self.module_main_command.side_effect = [
|
||||
(0, '{}', ''),
|
||||
(0, '{}', ''),
|
||||
]
|
||||
|
||||
result = self.module_main(AnsibleExitJson)
|
||||
|
||||
self.assertTrue(result['changed'])
|
||||
self.module_main_command.assert_has_calls([
|
||||
call(['/testbin/npm', 'ci', '--global'], check_rc=True, cwd=None),
|
||||
])
|
||||
|
||||
def test_present_package_json_ci_production(self):
|
||||
set_module_args({
|
||||
'ci': 'true',
|
||||
'production': 'true',
|
||||
'global': 'true',
|
||||
'state': 'present'
|
||||
})
|
||||
self.module_main_command.side_effect = [
|
||||
(0, '{}', ''),
|
||||
(0, '{}', ''),
|
||||
]
|
||||
|
||||
result = self.module_main(AnsibleExitJson)
|
||||
|
||||
self.assertTrue(result['changed'])
|
||||
self.module_main_command.assert_has_calls([
|
||||
call(['/testbin/npm', 'ci', '--global', '--production'], check_rc=True, cwd=None),
|
||||
])
|
||||
|
||||
@@ -325,36 +325,79 @@ class TestPacman:
|
||||
P.run()
|
||||
self.mock_run_command.call_count == 0
|
||||
out = e.value.args[0]
|
||||
assert "packages" not in out
|
||||
assert out["changed"]
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"module_args,expected_call",
|
||||
"module_args,expected_calls,changed",
|
||||
[
|
||||
({}, ["pacman", "--sync", "--refresh"]),
|
||||
({"force": True}, ["pacman", "--sync", "--refresh", "--refresh"]),
|
||||
(
|
||||
{"update_cache_extra_args": "--some-extra args"},
|
||||
["pacman", "--sync", "--refresh", "--some-extra", "args"], # shlex test
|
||||
{},
|
||||
[
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''),
|
||||
(["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'b\na\nc', ''),
|
||||
],
|
||||
False,
|
||||
),
|
||||
(
|
||||
{"force": True},
|
||||
[
|
||||
(["pacman", "--sync", "--refresh", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
|
||||
],
|
||||
True,
|
||||
),
|
||||
(
|
||||
{"update_cache_extra_args": "--some-extra args"}, # shlex test
|
||||
[
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''),
|
||||
(["pacman", "--sync", "--refresh", "--some-extra", "args"], {'check_rc': False}, 0, 'stdout', 'stderr'),
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a changed\nb\nc', ''),
|
||||
],
|
||||
True,
|
||||
),
|
||||
(
|
||||
{"force": True, "update_cache_extra_args": "--some-extra args"},
|
||||
["pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh"],
|
||||
[
|
||||
(["pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
|
||||
],
|
||||
True,
|
||||
),
|
||||
(
|
||||
# Test whether pacman --sync --list is not called more than twice
|
||||
{"upgrade": True},
|
||||
[
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''),
|
||||
(["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
|
||||
(["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''),
|
||||
# The following is _build_inventory:
|
||||
(["pacman", "--query"], {'check_rc': True}, 0, 'foo 1.0.0-1', ''),
|
||||
(["pacman", "--query", "--groups"], {'check_rc': True}, 0, '', ''),
|
||||
(["pacman", "--sync", "--groups", "--groups"], {'check_rc': True}, 0, '', ''),
|
||||
(["pacman", "--query", "--upgrades"], {'check_rc': False}, 0, '', ''),
|
||||
],
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_update_db(self, mock_empty_inventory, module_args, expected_call):
|
||||
def test_update_db(self, module_args, expected_calls, changed):
|
||||
args = {"update_cache": True}
|
||||
args.update(module_args)
|
||||
set_module_args(args)
|
||||
|
||||
self.mock_run_command.return_value = [0, "stdout", "stderr"]
|
||||
self.mock_run_command.side_effect = [
|
||||
(rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls
|
||||
]
|
||||
with pytest.raises(AnsibleExitJson) as e:
|
||||
P = pacman.Pacman(pacman.setup_module())
|
||||
P.run()
|
||||
|
||||
self.mock_run_command.assert_called_with(mock.ANY, expected_call, check_rc=False)
|
||||
self.mock_run_command.assert_has_calls([
|
||||
mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls
|
||||
])
|
||||
out = e.value.args[0]
|
||||
assert out["changed"]
|
||||
assert out["cache_updated"] == changed
|
||||
assert out["changed"] == changed
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"check_mode_value, run_command_data, upgrade_extra_args",
|
||||
@@ -365,7 +408,7 @@ class TestPacman:
|
||||
# for real
|
||||
False,
|
||||
{
|
||||
"args": ["pacman", "--sync", "--sys-upgrade", "--quiet", "--noconfirm"],
|
||||
"args": ["pacman", "--sync", "--sysupgrade", "--quiet", "--noconfirm"],
|
||||
"return_value": [0, "stdout", "stderr"],
|
||||
},
|
||||
None,
|
||||
@@ -377,7 +420,7 @@ class TestPacman:
|
||||
"args": [
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--sys-upgrade",
|
||||
"--sysupgrade",
|
||||
"--quiet",
|
||||
"--noconfirm",
|
||||
"--some",
|
||||
@@ -475,7 +518,13 @@ class TestPacman:
|
||||
# catch all -> call to pacman to resolve (--sync and --upgrade)
|
||||
"present",
|
||||
["somepackage-12.3-x86_64.pkg.tar.zst"],
|
||||
[Package(name="somepackage", source="somepackage-12.3-x86_64.pkg.tar.zst")],
|
||||
[
|
||||
Package(
|
||||
name="somepackage",
|
||||
source="somepackage-12.3-x86_64.pkg.tar.zst",
|
||||
source_is_URL=True,
|
||||
)
|
||||
],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -576,18 +625,19 @@ class TestPacman:
|
||||
with pytest.raises(AnsibleExitJson) as e:
|
||||
P.run()
|
||||
out = e.value.args[0]
|
||||
assert "packages" not in out
|
||||
assert not out["changed"]
|
||||
assert "packages" in out
|
||||
assert "diff" not in out
|
||||
self.mock_run_command.call_count == 0
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"module_args, expected_packages, run_command_data, raises",
|
||||
"module_args, expected_packages, package_list_out, run_command_data, raises",
|
||||
[
|
||||
(
|
||||
# remove pkg: Check mode -- call to print format but that's it
|
||||
{"_ansible_check_mode": True, "name": ["grep"], "state": "absent"},
|
||||
["grep-version"],
|
||||
[Package("grep", "grep")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -612,6 +662,7 @@ class TestPacman:
|
||||
# remove pkg for real now -- with 2 packages
|
||||
{"name": ["grep", "gawk"], "state": "absent"},
|
||||
["grep-version", "gawk-anotherversion"],
|
||||
[Package("grep", "grep"), Package("gawk", "gawk")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -650,6 +701,7 @@ class TestPacman:
|
||||
"extra_args": "--some --extra arg",
|
||||
},
|
||||
["grep-version"],
|
||||
[Package("grep", "grep")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -698,6 +750,7 @@ class TestPacman:
|
||||
# remove pkg -- Failure to list
|
||||
{"name": ["grep"], "state": "absent"},
|
||||
["grep-3.7-1"],
|
||||
[Package("grep", "grep")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -724,6 +777,7 @@ class TestPacman:
|
||||
# remove pkg -- Failure to remove
|
||||
{"name": ["grep"], "state": "absent"},
|
||||
["grep-3.7-1"],
|
||||
[Package("grep", "grep")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
@@ -756,16 +810,17 @@ class TestPacman:
|
||||
# install pkg: Check mode
|
||||
{"_ansible_check_mode": True, "name": ["sudo"], "state": "present"},
|
||||
["sudo"],
|
||||
[Package("sudo", "sudo")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"sudo",
|
||||
@@ -778,19 +833,37 @@ class TestPacman:
|
||||
AnsibleExitJson,
|
||||
),
|
||||
(
|
||||
# install 2 pkgs, one already present
|
||||
{"name": ["sudo", "grep"], "state": "present"},
|
||||
["sudo"],
|
||||
# Install pkgs: one regular, one already installed, one file URL and one https URL
|
||||
{
|
||||
"name": [
|
||||
"sudo",
|
||||
"grep",
|
||||
"./somepackage-12.3-x86_64.pkg.tar.zst",
|
||||
"http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
|
||||
],
|
||||
"state": "present",
|
||||
},
|
||||
["sudo", "somepackage", "otherpkg"],
|
||||
[
|
||||
Package("sudo", "sudo"),
|
||||
Package("grep", "grep"),
|
||||
Package("somepackage", "./somepackage-12.3-x86_64.pkg.tar.zst", source_is_URL=True),
|
||||
Package(
|
||||
"otherpkg",
|
||||
"http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
|
||||
source_is_URL=True,
|
||||
),
|
||||
],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"sudo",
|
||||
@@ -801,16 +874,49 @@ class TestPacman:
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--upgrade",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"./somepackage-12.3-x86_64.pkg.tar.zst",
|
||||
"http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
|
||||
],
|
||||
check_rc=False,
|
||||
),
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"sudo",
|
||||
],
|
||||
check_rc=False,
|
||||
),
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--upgrade",
|
||||
"./somepackage-12.3-x86_64.pkg.tar.zst",
|
||||
"http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
|
||||
],
|
||||
check_rc=False,
|
||||
),
|
||||
],
|
||||
"side_effect": [
|
||||
(0, "sudo version", ""),
|
||||
(0, "somepackage 12.3\notherpkg 1.2", ""),
|
||||
(0, "", ""),
|
||||
(0, "", ""),
|
||||
],
|
||||
"side_effect": [(0, "sudo version", ""), (0, "", "")],
|
||||
},
|
||||
AnsibleExitJson,
|
||||
),
|
||||
@@ -818,19 +924,20 @@ class TestPacman:
|
||||
# install pkg, extra_args
|
||||
{"name": ["sudo"], "state": "present", "extra_args": "--some --thing else"},
|
||||
["sudo"],
|
||||
[Package("sudo", "sudo")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--some",
|
||||
"--thing",
|
||||
"else",
|
||||
"--sync",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"sudo",
|
||||
@@ -841,13 +948,13 @@ class TestPacman:
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--some",
|
||||
"--thing",
|
||||
"else",
|
||||
"--sync",
|
||||
"sudo",
|
||||
],
|
||||
check_rc=False,
|
||||
@@ -861,16 +968,17 @@ class TestPacman:
|
||||
# latest pkg: Check mode
|
||||
{"_ansible_check_mode": True, "name": ["sqlite"], "state": "latest"},
|
||||
["sqlite"],
|
||||
[Package("sqlite", "sqlite")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"sqlite",
|
||||
@@ -886,16 +994,17 @@ class TestPacman:
|
||||
# latest pkg -- one already latest
|
||||
{"name": ["sqlite", "grep"], "state": "latest"},
|
||||
["sqlite"],
|
||||
[Package("sqlite", "sqlite")],
|
||||
{
|
||||
"calls": [
|
||||
mock.call(
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"--print-format",
|
||||
"%n %v",
|
||||
"sqlite",
|
||||
@@ -906,10 +1015,10 @@ class TestPacman:
|
||||
mock.ANY,
|
||||
[
|
||||
"pacman",
|
||||
"--sync",
|
||||
"--noconfirm",
|
||||
"--noprogressbar",
|
||||
"--needed",
|
||||
"--sync",
|
||||
"sqlite",
|
||||
],
|
||||
check_rc=False,
|
||||
@@ -924,13 +1033,16 @@ class TestPacman:
|
||||
def test_op_packages(
|
||||
self,
|
||||
mock_valid_inventory,
|
||||
mock_package_list,
|
||||
module_args,
|
||||
expected_packages,
|
||||
package_list_out,
|
||||
run_command_data,
|
||||
raises,
|
||||
):
|
||||
set_module_args(module_args)
|
||||
self.mock_run_command.side_effect = run_command_data["side_effect"]
|
||||
mock_package_list.return_value = package_list_out
|
||||
|
||||
P = pacman.Pacman(pacman.setup_module())
|
||||
with pytest.raises(raises) as e:
|
||||
@@ -941,6 +1053,7 @@ class TestPacman:
|
||||
if raises == AnsibleExitJson:
|
||||
assert out["packages"] == expected_packages
|
||||
assert out["changed"]
|
||||
assert "packages" in out
|
||||
assert "diff" in out
|
||||
else:
|
||||
assert out["stdout"] == "stdout"
|
||||
|
||||
Reference in New Issue
Block a user