mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 09:26:44 +00:00
Compare commits
99 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c85bb8713e | ||
|
|
5cdc8f4b07 | ||
|
|
50131f5dfa | ||
|
|
c734e7c2e5 | ||
|
|
7e6e8f7749 | ||
|
|
687acdc961 | ||
|
|
16092feaab | ||
|
|
6676fb8fb4 | ||
|
|
a860f537dd | ||
|
|
f1a9c2f00a | ||
|
|
f8de068e32 | ||
|
|
70b4bacf0f | ||
|
|
41f5d1741c | ||
|
|
54ede7dd7f | ||
|
|
7f0702b786 | ||
|
|
89a3abe64a | ||
|
|
59eff2e3e0 | ||
|
|
1115b463fe | ||
|
|
77bf1fedf5 | ||
|
|
89560ea2e7 | ||
|
|
f9919d28d4 | ||
|
|
7b4660d28a | ||
|
|
29496be80e | ||
|
|
991c96615c | ||
|
|
fe5ad997c1 | ||
|
|
468b28bbb8 | ||
|
|
9b57221d9a | ||
|
|
cd1a92d417 | ||
|
|
7486e3a074 | ||
|
|
6661917370 | ||
|
|
ec0bd3143a | ||
|
|
cce68def8b | ||
|
|
6f5ad22d28 | ||
|
|
6c53a09eef | ||
|
|
9b6e75f7f4 | ||
|
|
e09650140d | ||
|
|
67388be1a9 | ||
|
|
130d07948a | ||
|
|
5d6fcaef53 | ||
|
|
f044a83c49 | ||
|
|
e3f7e8dadf | ||
|
|
8d1a028dbd | ||
|
|
8823e5c061 | ||
|
|
102456d033 | ||
|
|
aad4c55d3d | ||
|
|
e31c98f17f | ||
|
|
6a5dfc5579 | ||
|
|
ab7efef9df | ||
|
|
ca9c763b57 | ||
|
|
cfeb40ed23 | ||
|
|
c495d136fa | ||
|
|
d9e2d6682b | ||
|
|
d7fe288ffd | ||
|
|
7de89699f7 | ||
|
|
b0a9cceeb5 | ||
|
|
b08f0b2f82 | ||
|
|
f23f409bd6 | ||
|
|
cfea62793f | ||
|
|
62bda91466 | ||
|
|
473d5fa2af | ||
|
|
cc76d684d5 | ||
|
|
7a6770c731 | ||
|
|
d2214af6e8 | ||
|
|
fad1220869 | ||
|
|
fe09516235 | ||
|
|
78cd8886f4 | ||
|
|
6b99d48f06 | ||
|
|
6e0e17a7e3 | ||
|
|
90de95c7b2 | ||
|
|
07c6b8b24e | ||
|
|
d106de6d51 | ||
|
|
e96101fb3f | ||
|
|
a60d55f03c | ||
|
|
d6a09ada98 | ||
|
|
9ddb75a3a2 | ||
|
|
b85ff2a997 | ||
|
|
3d1ca5638b | ||
|
|
35fd4700bf | ||
|
|
9add9df7d6 | ||
|
|
cdb747b41d | ||
|
|
7a70fda784 | ||
|
|
13d1b9569e | ||
|
|
c8c5021773 | ||
|
|
206ac72bd8 | ||
|
|
1ee123bb1e | ||
|
|
aa0bcad9df | ||
|
|
6e51690a95 | ||
|
|
3eab4faf0b | ||
|
|
c6589a772b | ||
|
|
cb06c4ff77 | ||
|
|
78316fbb75 | ||
|
|
c7df82652f | ||
|
|
342a1a7faa | ||
|
|
17431bd42c | ||
|
|
eacf663999 | ||
|
|
84a806be08 | ||
|
|
7db5c86dc8 | ||
|
|
1e82b5c580 | ||
|
|
7f6be665f9 |
@@ -24,14 +24,15 @@ schedules:
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-2
|
||||
- stable-3
|
||||
- stable-4
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-1
|
||||
- stable-2
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@@ -209,8 +210,8 @@ stages:
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
|
||||
38
.github/BOTMETA.yml
vendored
38
.github/BOTMETA.yml
vendored
@@ -118,6 +118,8 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/counter.py:
|
||||
maintainers: keilr
|
||||
$filters/dict.py:
|
||||
maintainers: felixfontein
|
||||
$filters/dict_kv.py:
|
||||
@@ -156,15 +158,17 @@ files:
|
||||
maintainers: conloos
|
||||
$inventories/nmap.py: {}
|
||||
$inventories/online.py:
|
||||
maintainers: sieben
|
||||
maintainers: remyleone
|
||||
$inventories/opennebula.py:
|
||||
maintainers: feldsam
|
||||
labels: cloud opennebula
|
||||
keywords: opennebula dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/xen_orchestra.py:
|
||||
maintainers: ddelnano shinuza
|
||||
$inventories/icinga2.py:
|
||||
maintainers: bongoeadgc6
|
||||
maintainers: BongoEADGC6
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -223,6 +227,8 @@ files:
|
||||
maintainers: konstruktoid
|
||||
$lookups/redis.py:
|
||||
maintainers: $team_ansible_core jpmens
|
||||
$lookups/revbitspss.py:
|
||||
maintainers: RevBits
|
||||
$lookups/shelvefile.py: {}
|
||||
$lookups/tss.py:
|
||||
maintainers: amigus endlesstrax
|
||||
@@ -318,6 +324,10 @@ files:
|
||||
$modules/cloud/misc/proxmox_kvm.py:
|
||||
maintainers: helldorado
|
||||
ignore: skvidal
|
||||
$modules/cloud/misc/proxmox_nic.py:
|
||||
maintainers: Kogelvis
|
||||
$modules/cloud/misc/proxmox_tasks_info:
|
||||
maintainers: paginabianca
|
||||
$modules/cloud/misc/proxmox_template.py:
|
||||
maintainers: UnderGreen
|
||||
ignore: skvidal
|
||||
@@ -337,7 +347,7 @@ files:
|
||||
$modules/cloud/oneandone/:
|
||||
maintainers: aajdinov edevenport
|
||||
$modules/cloud/online/:
|
||||
maintainers: sieben
|
||||
maintainers: remyleone
|
||||
$modules/cloud/opennebula/:
|
||||
maintainers: $team_opennebula
|
||||
$modules/cloud/opennebula/one_host.py:
|
||||
@@ -407,11 +417,11 @@ files:
|
||||
$modules/cloud/scaleway/scaleway_ip_info.py:
|
||||
maintainers: Spredzy
|
||||
$modules/cloud/scaleway/scaleway_organization_info.py:
|
||||
maintainers: sieben Spredzy
|
||||
maintainers: Spredzy
|
||||
$modules/cloud/scaleway/scaleway_security_group.py:
|
||||
maintainers: DenBeke
|
||||
$modules/cloud/scaleway/scaleway_security_group_info.py:
|
||||
maintainers: sieben Spredzy
|
||||
maintainers: Spredzy
|
||||
$modules/cloud/scaleway/scaleway_security_group_rule.py:
|
||||
maintainers: DenBeke
|
||||
$modules/cloud/scaleway/scaleway_server_info.py:
|
||||
@@ -530,6 +540,8 @@ files:
|
||||
maintainers: adamgoossens
|
||||
$modules/identity/keycloak/keycloak_identity_provider.py:
|
||||
maintainers: laurpaum
|
||||
$modules/identity/keycloak/keycloak_realm_info.py:
|
||||
maintainers: fynncfchen
|
||||
$modules/identity/keycloak/keycloak_realm.py:
|
||||
maintainers: kris2kris
|
||||
$modules/identity/keycloak/keycloak_role.py:
|
||||
@@ -615,6 +627,8 @@ files:
|
||||
labels: cloudflare_dns
|
||||
$modules/net_tools/dnsimple.py:
|
||||
maintainers: drcapulet
|
||||
$modules/net_tools/dnsimple_info.py:
|
||||
maintainers: edhilgendorf
|
||||
$modules/net_tools/dnsmadeeasy.py:
|
||||
maintainers: briceburg
|
||||
$modules/net_tools/gandi_livedns.py:
|
||||
@@ -716,6 +730,8 @@ files:
|
||||
maintainers: mwarkentin
|
||||
$modules/packaging/language/bundler.py:
|
||||
maintainers: thoiberg
|
||||
$modules/packaging/language/cargo.py:
|
||||
maintainers: radek-sprta
|
||||
$modules/packaging/language/composer.py:
|
||||
maintainers: dmtrs
|
||||
ignore: resmo
|
||||
@@ -897,6 +913,10 @@ files:
|
||||
$modules/remote_management/manageiq/:
|
||||
labels: manageiq
|
||||
maintainers: $team_manageiq
|
||||
$modules/remote_management/manageiq/manageiq_alert_profiles.py:
|
||||
maintainers: elad661
|
||||
$modules/remote_management/manageiq/manageiq_alerts.py:
|
||||
maintainers: elad661
|
||||
$modules/remote_management/manageiq/manageiq_group.py:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/manageiq/manageiq_tenant.py:
|
||||
@@ -947,6 +967,8 @@ files:
|
||||
maintainers: SamyCoenen
|
||||
$modules/source_control/gitlab/gitlab_user.py:
|
||||
maintainers: LennertMertens stgrace
|
||||
$modules/source_control/gitlab/gitlab_branch.py:
|
||||
maintainers: paytroff
|
||||
$modules/source_control/hg.py:
|
||||
maintainers: yeukhon
|
||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||
@@ -1082,6 +1104,8 @@ files:
|
||||
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
|
||||
$modules/system/ssh_config.py:
|
||||
maintainers: gaqzi Akasurde
|
||||
$modules/system/sudoers.py:
|
||||
maintainers: JonEllis
|
||||
$modules/system/svc.py:
|
||||
maintainers: bcoca
|
||||
$modules/system/syspatch.py:
|
||||
@@ -1220,9 +1244,9 @@ macros:
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||
team_scaleway: remyleone abarbare
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
|
||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||
|
||||
189
CHANGELOG.rst
189
CHANGELOG.rst
@@ -6,6 +6,195 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 3.0.0.
|
||||
|
||||
v4.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
|
||||
- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
|
||||
- ipmi_power - add ``machine`` option to ensure the power state via the remote target address (https://github.com/ansible-collections/community.general/pull/3968).
|
||||
- mattermost - add the possibility to send attachments instead of text messages (https://github.com/ansible-collections/community.general/pull/3946).
|
||||
- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
|
||||
- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
|
||||
- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
|
||||
- scaleway_compute - add possibility to use project identifier (new ``project`` option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
|
||||
- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
|
||||
- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
|
||||
- jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- say callback plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- scaleway_user_data - fix double-quote added where no double-quote is needed to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
|
||||
- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
|
||||
- zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- counter - Counts hashable elements in a sequence
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
|
||||
keycloak
|
||||
^^^^^^^^
|
||||
|
||||
- keycloak_realm_info - Allows obtaining Keycloak realm public information via Keycloak API
|
||||
|
||||
Packaging
|
||||
~~~~~~~~~
|
||||
|
||||
language
|
||||
^^^^^^^^
|
||||
|
||||
- cargo - Manage Rust packages with cargo
|
||||
|
||||
System
|
||||
~~~~~~
|
||||
|
||||
- sudoers - Manage sudoers files
|
||||
|
||||
v4.2.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
|
||||
- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3834).
|
||||
- gitlab - add more token authentication support with the new options ``api_oauth_token`` and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
|
||||
- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
|
||||
- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme = true``) (https://github.com/ansible-collections/community.general/pull/3792).
|
||||
- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
|
||||
- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` in your config file to the host object name, address, or display_name fields (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3822).
|
||||
- iso_extract - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3805).
|
||||
- java_cert - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3835).
|
||||
- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
|
||||
- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
|
||||
- logentries - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3807).
|
||||
- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
|
||||
- lxc_container - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
|
||||
- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
|
||||
- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
|
||||
- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
|
||||
- monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3821).
|
||||
- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
|
||||
- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
|
||||
- python_requirements_info - returns python version broken down into its components, and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
|
||||
- svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3829).
|
||||
- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3806).
|
||||
- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` (https://github.com/ansible-collections/community.general/pull/3801).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- icinga2 inventory plugin - handle 404 error when filter produces no results (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
|
||||
- jira - fixed bug where module returns error related to dictionary key ``body`` (https://github.com/ansible-collections/community.general/issues/3419).
|
||||
- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
|
||||
- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
|
||||
- nrdp callback plugin - fix error ``string arguments without an encoding`` (https://github.com/ansible-collections/community.general/issues/3903).
|
||||
- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
|
||||
- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
|
||||
- proxmox - fixed ``onboot`` parameter causing module failures when undefined (https://github.com/ansible-collections/community.general/issues/3844).
|
||||
- python_requirements_info - fails if version operator used without version (https://github.com/ansible-collections/community.general/pull/3785).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Net Tools
|
||||
~~~~~~~~~
|
||||
|
||||
- dnsimple_info - Pull basic info from DNSimple API
|
||||
|
||||
Remote Management
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
redfish
|
||||
^^^^^^^
|
||||
|
||||
- ilo_redfish_config - Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions
|
||||
- ilo_redfish_info - Gathers server information through iLO using Redfish APIs
|
||||
|
||||
Source Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
gitlab
|
||||
^^^^^^
|
||||
|
||||
- gitlab_branch - Create or delete a branch
|
||||
|
||||
v4.1.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
|
||||
- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
|
||||
- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
|
||||
- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
|
||||
- lxd_container - adds ``type`` option which also allows to operate on virtual machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
|
||||
- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, https://github.com/ansible-collections/community.general/pull/3738).
|
||||
- open_iscsi - extended module to allow rescanning of established session for one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
|
||||
- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
|
||||
- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
|
||||
- redfish_command - add ``SetHostInterface`` command to enable configuring the Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- github_repo - ``private`` and ``description`` attributes should not be set to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
|
||||
- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- xen_orchestra - Xen Orchestra inventory source
|
||||
|
||||
Lookup
|
||||
~~~~~~
|
||||
|
||||
- revbitspss - Get secrets from RevBits PAM server
|
||||
|
||||
v4.0.2
|
||||
======
|
||||
|
||||
|
||||
@@ -1069,3 +1069,243 @@ releases:
|
||||
- 4.0.2.yml
|
||||
- deprecate-ansible-2.9-2.10.yml
|
||||
release_date: '2021-11-16'
|
||||
4.1.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- github_repo - ``private`` and ``description`` attributes should not be set
|
||||
to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
|
||||
- terraform - fix command options being ignored during planned/plan in function
|
||||
``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707,
|
||||
https://github.com/ansible-collections/community.general/pull/3726).
|
||||
minor_changes:
|
||||
- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
|
||||
- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
|
||||
- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
|
||||
- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
|
||||
- lxd_container - adds ``type`` option which also allows to operate on virtual
|
||||
machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
|
||||
- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088,
|
||||
https://github.com/ansible-collections/community.general/pull/3738).
|
||||
- open_iscsi - extended module to allow rescanning of established session for
|
||||
one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
|
||||
- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
|
||||
- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish
|
||||
Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
|
||||
- redfish_command - add ``SetHostInterface`` command to enable configuring the
|
||||
Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
|
||||
release_summary: Regular bugfix and feature release.
|
||||
fragments:
|
||||
- 1088-nmcli_add_multiple_addresses_support.yml
|
||||
- 2386-github_repo-fix-idempotency-issues.yml
|
||||
- 3632-add-redfish-host-interface-config-support.yml
|
||||
- 3661-lxd_container-add-vm-support.yml
|
||||
- 3693-add-redfish-host-interface-info-support.yml
|
||||
- 3694-gitlab-cleanup.yml
|
||||
- 3702-ipmi-encryption-key.yml
|
||||
- 3708-listen_ports_facts-add-ss-support.yml
|
||||
- 3726-terraform-missing-parameters-planned-fix.yml
|
||||
- 3758-pacman-add-stdout-stderr.yml
|
||||
- 3765-extend-open_iscsi-with-rescan.yml
|
||||
- 4.1.0.yml
|
||||
plugins:
|
||||
inventory:
|
||||
- description: Xen Orchestra inventory source
|
||||
name: xen_orchestra
|
||||
namespace: null
|
||||
lookup:
|
||||
- description: Get secrets from RevBits PAM server
|
||||
name: revbitspss
|
||||
namespace: null
|
||||
release_date: '2021-11-23'
|
||||
4.2.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- icinga2 inventory plugin - handle 404 error when filter produces no results
|
||||
(https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
|
||||
- jira - fixed bug where module returns error related to dictionary key ``body``
|
||||
(https://github.com/ansible-collections/community.general/issues/3419).
|
||||
- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses
|
||||
on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
|
||||
- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
|
||||
- nrdp callback plugin - fix error ``string arguments without an encoding``
|
||||
(https://github.com/ansible-collections/community.general/issues/3903).
|
||||
- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead
|
||||
of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
|
||||
- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
|
||||
- proxmox - fixed ``onboot`` parameter causing module failures when undefined
|
||||
(https://github.com/ansible-collections/community.general/issues/3844).
|
||||
- python_requirements_info - fails if version operator used without version
|
||||
(https://github.com/ansible-collections/community.general/pull/3785).
|
||||
deprecated_features:
|
||||
- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict``
|
||||
(https://github.com/ansible-collections/community.general/pull/3801).
|
||||
minor_changes:
|
||||
- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead
|
||||
of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
|
||||
- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str``
|
||||
(https://github.com/ansible-collections/community.general/pull/3834).
|
||||
- gitlab - add more token authentication support with the new options ``api_oauth_token``
|
||||
and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
|
||||
- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
|
||||
- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme
|
||||
= true``) (https://github.com/ansible-collections/community.general/pull/3792).
|
||||
- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
|
||||
- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875,
|
||||
https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr``
|
||||
in your config file to the host object name, address, or display_name fields
|
||||
(https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
|
||||
- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str``
|
||||
(https://github.com/ansible-collections/community.general/pull/3822).
|
||||
- iso_extract - calling ``run_command`` with arguments as ``list`` instead of
|
||||
``str`` (https://github.com/ansible-collections/community.general/pull/3805).
|
||||
- java_cert - calling ``run_command`` with arguments as ``list`` instead of
|
||||
``str`` (https://github.com/ansible-collections/community.general/pull/3835).
|
||||
- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
|
||||
- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
|
||||
- logentries - calling ``run_command`` with arguments as ``list`` instead of
|
||||
``str`` (https://github.com/ansible-collections/community.general/pull/3807).
|
||||
- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead
|
||||
of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
|
||||
- lxc_container - calling ``run_command`` with arguments as ``list`` instead
|
||||
of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
|
||||
- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``,
|
||||
and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
|
||||
- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
|
||||
- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns``
|
||||
for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
|
||||
- monit - calling ``run_command`` with arguments as ``list`` instead of ``str``
|
||||
(https://github.com/ansible-collections/community.general/pull/3821).
|
||||
- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
|
||||
- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
|
||||
- python_requirements_info - returns python version broken down into its components,
|
||||
and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
|
||||
- svc - calling ``run_command`` with arguments as ``list`` instead of ``str``
|
||||
(https://github.com/ansible-collections/community.general/pull/3829).
|
||||
- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str``
|
||||
(https://github.com/ansible-collections/community.general/pull/3806).
|
||||
- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
|
||||
release_summary: Regular bugfix and feature release.
|
||||
fragments:
|
||||
- 1088-add_multiple_ipv6_address_support.yml
|
||||
- 3357-nmcli-eui64-and-ipv6privacy.yml
|
||||
- 3519-inventory-support-lxd-4.yml
|
||||
- 3768-nmcli_fix_changed_when_no_mask_set.yml
|
||||
- 3780-add-keycloak-sssd-user-federation.yml
|
||||
- 3785-python_requirements_info-versionless-op.yaml
|
||||
- 3792-improve_gitlab_group_and_project.yml
|
||||
- 3797-python_requirements_info-improvements.yaml
|
||||
- 3798-fix-lxd-connection-option-vars-support.yml
|
||||
- 3800-pipx-include-apps.yaml
|
||||
- 3801-mh-deprecate-vardict-attr.yaml
|
||||
- 3805-iso_extract-run_command-list.yaml
|
||||
- 3806-xattr-run_command-list.yaml
|
||||
- 3807-logentries-run_command-list.yaml
|
||||
- 3808-logstash_plugin-run_command-list.yaml
|
||||
- 3821-monit-run-list.yaml
|
||||
- 3822-ip_netns-run-list.yaml
|
||||
- 3829-svc-run-list.yaml
|
||||
- 3833-aix_filesystem-run-list.yaml
|
||||
- 3834-aix-lvg-run-list.yaml
|
||||
- 3835-java-cert-run-list.yaml
|
||||
- 3837-opentelemetry_plugin-honour_ignore_errors.yaml
|
||||
- 3838-jira-token.yaml
|
||||
- 3840-hponcfg-mh-revamp.yaml
|
||||
- 3849-mh-check-mode-decos.yaml
|
||||
- 3851-lxc-container-run-list.yaml
|
||||
- 3862-interfaces-file-fix-dup-option.yaml
|
||||
- 3867-jira-fix-body.yaml
|
||||
- 3874-proxmox-fix-onboot-param.yml
|
||||
- 3875-icinga2-inv-fix.yml
|
||||
- 3896-nmcli_vlan_missing_options.yaml
|
||||
- 3909-nrdp_fix_string_args_without_encoding.yaml
|
||||
- 3919-xfconf-baseclass.yaml
|
||||
- 4.2.0.yml
|
||||
- 705-gitlab-auth-support.yml
|
||||
modules:
|
||||
- description: Pull basic info from DNSimple API
|
||||
name: dnsimple_info
|
||||
namespace: net_tools
|
||||
- description: Create or delete a branch
|
||||
name: gitlab_branch
|
||||
namespace: source_control.gitlab
|
||||
- description: Sets or updates configuration attributes on HPE iLO with Redfish
|
||||
OEM extensions
|
||||
name: ilo_redfish_config
|
||||
namespace: remote_management.redfish
|
||||
- description: Gathers server information through iLO using Redfish APIs
|
||||
name: ilo_redfish_info
|
||||
namespace: remote_management.redfish
|
||||
release_date: '2021-12-21'
|
||||
4.3.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Various modules and plugins - use vendored version of ``distutils.version``
|
||||
instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
|
||||
- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
|
||||
- jail connection plugin - replace deprecated ``distutils.spawn.find_executable``
|
||||
with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable``
|
||||
with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool``
|
||||
with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``,
|
||||
``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- say callback plugin - replace deprecated ``distutils.spawn.find_executable``
|
||||
with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables
|
||||
(https://github.com/ansible-collections/community.general/pull/3934).
|
||||
- scaleway_user_data - fix double-quote added where no double-quote is needed
|
||||
to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
|
||||
- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
|
||||
- zone connection plugin - replace deprecated ``distutils.spawn.find_executable``
|
||||
with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
|
||||
minor_changes:
|
||||
- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string
|
||||
parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent
|
||||
with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
|
||||
- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
|
||||
- ipmi_power - add ``machine`` option to ensure the power state via the remote
|
||||
target address (https://github.com/ansible-collections/community.general/pull/3968).
|
||||
- mattermost - add the possibility to send attachments instead of text messages
|
||||
(https://github.com/ansible-collections/community.general/pull/3946).
|
||||
- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
|
||||
- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
|
||||
- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff``
|
||||
is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
|
||||
- scaleway_compute - add possibility to use project identifier (new ``project``
|
||||
option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
|
||||
- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 3374-add-ipa-ptr-sync-support.yml
|
||||
- 3921-add-counter-filter-plugin.yml
|
||||
- 3930-proxmox-add-clone.yaml
|
||||
- 3933-slack-charset-header.yaml
|
||||
- 3934-distutils.yml
|
||||
- 3936-distutils.version.yml
|
||||
- 3940_fix_contenttype_scaleway_user_data.yml
|
||||
- 3946-mattermost_attachments.yml
|
||||
- 3951-scaleway_compute_add_project_id.yml
|
||||
- 3964-scaleway_volume_add_region.yml
|
||||
- 3968-ipmi_power-add-machine-option.yaml
|
||||
- 3976-fix-alternatives-parsing.yml
|
||||
- 3980-puppet-show_diff.yml
|
||||
- 3985-nmcli-add-wireguard-connection-type.yml
|
||||
- 4.3.0.yml
|
||||
modules:
|
||||
- description: Manage Rust packages with cargo
|
||||
name: cargo
|
||||
namespace: packaging.language
|
||||
- description: Allows obtaining Keycloak realm public information via Keycloak
|
||||
API
|
||||
name: keycloak_realm_info
|
||||
namespace: identity.keycloak
|
||||
- description: Manage sudoers files
|
||||
name: sudoers
|
||||
namespace: system
|
||||
plugins:
|
||||
filter:
|
||||
- description: Counts hashable elements in a sequence
|
||||
name: counter
|
||||
namespace: null
|
||||
release_date: '2022-01-11'
|
||||
|
||||
@@ -297,6 +297,84 @@ This produces:
|
||||
|
||||
.. versionadded: 2.0.0
|
||||
|
||||
Counting elements in a sequence
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Count character occurrences in a string
|
||||
debug:
|
||||
msg: "{{ 'abccbaabca' | community.general.counter }}"
|
||||
|
||||
- name: Count items in a list
|
||||
debug:
|
||||
msg: "{{ ['car', 'car', 'bike', 'plane', 'bike'] | community.general.counter }}"
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Count character occurrences in a string] ********************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"a": 4,
|
||||
"b": 3,
|
||||
"c": 3
|
||||
}
|
||||
}
|
||||
|
||||
TASK [Count items in a list] **************************************************************
|
||||
ok: [localhost] => {
|
||||
"msg": {
|
||||
"bike": 2,
|
||||
"car": 2,
|
||||
"plane": 1
|
||||
}
|
||||
}
|
||||
|
||||
This plugin is useful for selecting resources based on current allocation:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
- name: Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks
|
||||
debug:
|
||||
msg: >-
|
||||
{{
|
||||
( disks | dict2items | map(attribute='value.adapter') | list
|
||||
| community.general.counter | dict2items
|
||||
| rejectattr('value', '>=', 4) | sort(attribute='value') | first
|
||||
).key
|
||||
}}
|
||||
vars:
|
||||
disks:
|
||||
sda:
|
||||
adapter: scsi_1
|
||||
sdb:
|
||||
adapter: scsi_1
|
||||
sdc:
|
||||
adapter: scsi_1
|
||||
sdd:
|
||||
adapter: scsi_1
|
||||
sde:
|
||||
adapter: scsi_2
|
||||
sdf:
|
||||
adapter: scsi_3
|
||||
sdg:
|
||||
adapter: scsi_3
|
||||
|
||||
This produces:
|
||||
|
||||
.. code-block:: ansible-output
|
||||
|
||||
TASK [Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks]
|
||||
ok: [localhost] => {
|
||||
"msg": "scsi_2"
|
||||
}
|
||||
|
||||
.. versionadded:: 4.3.0
|
||||
|
||||
Working with times
|
||||
------------------
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 4.0.2
|
||||
version: 4.3.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -70,6 +70,7 @@ import os
|
||||
import json
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
@@ -143,7 +144,7 @@ class CallbackModule(CallbackBase):
|
||||
body = {
|
||||
'cmd': 'submitcheck',
|
||||
'token': self.token,
|
||||
'XMLDATA': bytes(xmldata)
|
||||
'XMLDATA': to_bytes(xmldata)
|
||||
}
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (C) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@@ -267,6 +268,8 @@ class OpenTelemetrySource(object):
|
||||
elif host_data.status == 'skipped':
|
||||
message = res['skip_reason'] if 'skip_reason' in res else 'skipped'
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
elif host_data.status == 'ignored':
|
||||
status = Status(status_code=StatusCode.UNSET)
|
||||
|
||||
span.set_status(status)
|
||||
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
|
||||
@@ -462,10 +465,15 @@ class CallbackModule(CallbackBase):
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.errors += 1
|
||||
if ignore_errors:
|
||||
status = 'ignored'
|
||||
else:
|
||||
status = 'failed'
|
||||
self.errors += 1
|
||||
|
||||
self.opentelemetry.finish_task(
|
||||
self.tasks_data,
|
||||
'failed',
|
||||
status,
|
||||
result
|
||||
)
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ DOCUMENTATION = '''
|
||||
- In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import platform
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
@@ -47,21 +47,24 @@ class CallbackModule(CallbackBase):
|
||||
self.HAPPY_VOICE = None
|
||||
self.LASER_VOICE = None
|
||||
|
||||
self.synthesizer = distutils.spawn.find_executable('say')
|
||||
if not self.synthesizer:
|
||||
self.synthesizer = distutils.spawn.find_executable('espeak')
|
||||
if self.synthesizer:
|
||||
try:
|
||||
self.synthesizer = get_bin_path('say')
|
||||
if platform.system() != 'Darwin':
|
||||
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
||||
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
||||
else:
|
||||
self.FAILED_VOICE = 'Zarvox'
|
||||
self.REGULAR_VOICE = 'Trinoids'
|
||||
self.HAPPY_VOICE = 'Cellos'
|
||||
self.LASER_VOICE = 'Princess'
|
||||
except ValueError:
|
||||
try:
|
||||
self.synthesizer = get_bin_path('espeak')
|
||||
self.FAILED_VOICE = 'klatt'
|
||||
self.HAPPY_VOICE = 'f5'
|
||||
self.LASER_VOICE = 'whisper'
|
||||
elif platform.system() != 'Darwin':
|
||||
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
|
||||
self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
|
||||
else:
|
||||
self.FAILED_VOICE = 'Zarvox'
|
||||
self.REGULAR_VOICE = 'Trinoids'
|
||||
self.HAPPY_VOICE = 'Cellos'
|
||||
self.LASER_VOICE = 'Princess'
|
||||
except ValueError:
|
||||
self.synthesizer = None
|
||||
|
||||
# plugin disable itself if say is not present
|
||||
# ansible will not call any callback if disabled is set to True
|
||||
|
||||
@@ -31,7 +31,6 @@ DOCUMENTATION = '''
|
||||
- name: ansible_jail_user
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
@@ -39,6 +38,7 @@ import traceback
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
@@ -75,10 +75,10 @@ class Connection(ConnectionBase):
|
||||
|
||||
@staticmethod
|
||||
def _search_executable(executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
try:
|
||||
return get_bin_path(executable)
|
||||
except ValueError:
|
||||
raise AnsibleError("%s command not found in PATH" % executable)
|
||||
return cmd
|
||||
|
||||
def list_jails(self):
|
||||
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
||||
|
||||
@@ -43,10 +43,10 @@ DOCUMENTATION = '''
|
||||
'''
|
||||
|
||||
import os
|
||||
from distutils.spawn import find_executable
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
|
||||
@@ -62,9 +62,9 @@ class Connection(ConnectionBase):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self._host = self._play_context.remote_addr
|
||||
self._lxc_cmd = find_executable("lxc")
|
||||
|
||||
if not self._lxc_cmd:
|
||||
try:
|
||||
self._lxc_cmd = get_bin_path("lxc")
|
||||
except ValueError:
|
||||
raise AnsibleError("lxc command not found in PATH")
|
||||
|
||||
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
|
||||
@@ -89,9 +89,9 @@ class Connection(ConnectionBase):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"exec",
|
||||
"%s:%s" % (self.get_option("remote"), self._host),
|
||||
"%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
|
||||
"--",
|
||||
self._play_context.executable, "-c", cmd
|
||||
self.get_option("executable"), "-c", cmd
|
||||
])
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
@@ -126,7 +126,7 @@ class Connection(ConnectionBase):
|
||||
local_cmd.extend([
|
||||
"file", "push",
|
||||
in_path,
|
||||
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
|
||||
])
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
@@ -145,7 +145,7 @@ class Connection(ConnectionBase):
|
||||
local_cmd.extend(["--project", self.get_option("project")])
|
||||
local_cmd.extend([
|
||||
"file", "pull",
|
||||
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
||||
"%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
|
||||
out_path
|
||||
])
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ DOCUMENTATION = '''
|
||||
- name: ansible_zone_host
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
@@ -34,6 +33,7 @@ import traceback
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
@@ -64,10 +64,10 @@ class Connection(ConnectionBase):
|
||||
|
||||
@staticmethod
|
||||
def _search_executable(executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
try:
|
||||
return get_bin_path(executable)
|
||||
except ValueError:
|
||||
raise AnsibleError("%s command not found in PATH" % executable)
|
||||
return cmd
|
||||
|
||||
def list_zones(self):
|
||||
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
|
||||
|
||||
31
plugins/doc_fragments/gitlab.py
Normal file
31
plugins/doc_fragments/gitlab.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
requirements:
|
||||
- requests (Python library U(https://pypi.org/project/requests/))
|
||||
|
||||
options:
|
||||
api_token:
|
||||
description:
|
||||
- GitLab access token with API permissions.
|
||||
type: str
|
||||
api_oauth_token:
|
||||
description:
|
||||
- GitLab OAuth token for logging in.
|
||||
type: str
|
||||
version_added: 4.2.0
|
||||
api_job_token:
|
||||
description:
|
||||
- GitLab CI job token for logging in.
|
||||
type: str
|
||||
version_added: 4.2.0
|
||||
'''
|
||||
36
plugins/filter/counter.py
Normal file
36
plugins/filter/counter.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Remy Keil <remy.keil@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils.common._collections_compat import Sequence
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def counter(sequence):
|
||||
''' Count elements in a sequence. Returns dict with count result. '''
|
||||
if not isinstance(sequence, Sequence):
|
||||
raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
|
||||
(sequence, type(sequence)))
|
||||
|
||||
try:
|
||||
result = dict(Counter(sequence))
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterError(
|
||||
"community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Ansible counter jinja2 filters '''
|
||||
|
||||
def filters(self):
|
||||
filters = {
|
||||
'counter': counter,
|
||||
}
|
||||
|
||||
return filters
|
||||
@@ -5,7 +5,7 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
def version_sort(value, reverse=False):
|
||||
|
||||
@@ -68,7 +68,6 @@ user: ansible-tester
|
||||
password: secure
|
||||
'''
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
import socket
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
@@ -35,13 +35,23 @@ DOCUMENTATION = '''
|
||||
type: string
|
||||
required: true
|
||||
host_filter:
|
||||
description: An Icinga2 API valid host filter.
|
||||
description:
|
||||
- An Icinga2 API valid host filter. Leave blank for no filtering
|
||||
type: string
|
||||
required: false
|
||||
validate_certs:
|
||||
description: Enables or disables SSL certificate verification.
|
||||
type: boolean
|
||||
default: true
|
||||
inventory_attr:
|
||||
description:
|
||||
- Allows the override of the inventory name based on different attributes.
|
||||
- This allows for changing the way limits are used.
|
||||
- The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
|
||||
type: string
|
||||
default: address
|
||||
choices: ['name', 'display_name', 'address']
|
||||
version_added: 4.2.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -52,6 +62,7 @@ user: ansible
|
||||
password: secure
|
||||
host_filter: \"linux-servers\" in host.groups
|
||||
validate_certs: false
|
||||
inventory_attr: name
|
||||
'''
|
||||
|
||||
import json
|
||||
@@ -59,6 +70,7 @@ import json
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
@@ -76,6 +88,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
self.icinga2_password = None
|
||||
self.ssl_verify = None
|
||||
self.host_filter = None
|
||||
self.inventory_attr = None
|
||||
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
@@ -114,9 +127,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
if data is not None:
|
||||
request_args['data'] = json.dumps(data)
|
||||
self.display.vvv("Request Args: %s" % request_args)
|
||||
response = open_url(request_url, **request_args)
|
||||
try:
|
||||
response = open_url(request_url, **request_args)
|
||||
except HTTPError as e:
|
||||
try:
|
||||
error_body = json.loads(e.read().decode())
|
||||
self.display.vvv("Error returned: {0}".format(error_body))
|
||||
except Exception:
|
||||
error_body = {"status": None}
|
||||
if e.code == 404 and error_body.get('status') == "No objects found.":
|
||||
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
|
||||
raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
|
||||
|
||||
response_body = response.read()
|
||||
json_data = json.loads(response_body.decode('utf-8'))
|
||||
self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
|
||||
if 200 <= response.status <= 299:
|
||||
return json_data
|
||||
if response.status == 404 and json_data['status'] == "No objects found.":
|
||||
@@ -155,7 +180,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Query for all hosts """
|
||||
self.display.vvv("Querying Icinga2 for inventory")
|
||||
query_args = {
|
||||
"attrs": ["address", "state_type", "state", "groups"],
|
||||
"attrs": ["address", "display_name", "state_type", "state", "groups"],
|
||||
}
|
||||
if self.host_filter is not None:
|
||||
query_args['host_filter'] = self.host_filter
|
||||
@@ -177,24 +202,35 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Convert Icinga2 API data to JSON format for Ansible"""
|
||||
groups_dict = {"_meta": {"hostvars": {}}}
|
||||
for entry in json_data:
|
||||
host_name = entry['name']
|
||||
host_attrs = entry['attrs']
|
||||
if self.inventory_attr == "name":
|
||||
host_name = entry.get('name')
|
||||
if self.inventory_attr == "address":
|
||||
# When looking for address for inventory, if missing fallback to object name
|
||||
if host_attrs.get('address', '') != '':
|
||||
host_name = host_attrs.get('address')
|
||||
else:
|
||||
host_name = entry.get('name')
|
||||
if self.inventory_attr == "display_name":
|
||||
host_name = host_attrs.get('display_name')
|
||||
if host_attrs['state'] == 0:
|
||||
host_attrs['state'] = 'on'
|
||||
else:
|
||||
host_attrs['state'] = 'off'
|
||||
host_groups = host_attrs['groups']
|
||||
host_addr = host_attrs['address']
|
||||
self.inventory.add_host(host_addr)
|
||||
host_groups = host_attrs.get('groups')
|
||||
self.inventory.add_host(host_name)
|
||||
for group in host_groups:
|
||||
if group not in self.inventory.groups.keys():
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, host_addr)
|
||||
self.inventory.set_variable(host_addr, 'address', host_addr)
|
||||
self.inventory.set_variable(host_addr, 'hostname', host_name)
|
||||
self.inventory.set_variable(host_addr, 'state',
|
||||
self.inventory.add_child(group, host_name)
|
||||
# If the address attribute is populated, override ansible_host with the value
|
||||
if host_attrs.get('address') != '':
|
||||
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
|
||||
self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
|
||||
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
|
||||
self.inventory.set_variable(host_name, 'state',
|
||||
host_attrs['state'])
|
||||
self.inventory.set_variable(host_addr, 'state_type',
|
||||
self.inventory.set_variable(host_name, 'state_type',
|
||||
host_attrs['state_type'])
|
||||
return groups_dict
|
||||
|
||||
@@ -211,6 +247,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
self.icinga2_password = self.get_option('password')
|
||||
self.ssl_verify = self.get_option('validate_certs')
|
||||
self.host_filter = self.get_option('host_filter')
|
||||
self.inventory_attr = self.get_option('inventory_attr')
|
||||
# Not currently enabled
|
||||
# self.cache_key = self.get_cache_key(path)
|
||||
# self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
@@ -15,6 +15,7 @@ DOCUMENTATION = r'''
|
||||
author: "Frank Dornheim (@conloos)"
|
||||
requirements:
|
||||
- ipaddress
|
||||
- lxd >= 4.0
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||
@@ -49,26 +50,38 @@ DOCUMENTATION = r'''
|
||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||
type: str
|
||||
state:
|
||||
description: Filter the container according to the current status.
|
||||
description: Filter the instance according to the current status.
|
||||
type: str
|
||||
default: none
|
||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||
prefered_container_network_interface:
|
||||
type_filter:
|
||||
description:
|
||||
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- Filter the instances by type C(virtual-machine), C(container) or C(both).
|
||||
- The first version of the inventory only supported containers.
|
||||
type: str
|
||||
default: container
|
||||
choices: [ 'virtual-machine', 'container', 'both' ]
|
||||
version_added: 4.2.0
|
||||
prefered_instance_network_interface:
|
||||
description:
|
||||
- If an instance has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||
- The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
|
||||
The old name still works as an alias.
|
||||
type: str
|
||||
default: eth
|
||||
prefered_container_network_family:
|
||||
aliases:
|
||||
- prefered_container_network_interface
|
||||
prefered_instance_network_family:
|
||||
description:
|
||||
- If a container has multiple network interfaces, which one is the prefered by family.
|
||||
- If an instance has multiple network interfaces, which one is the prefered by family.
|
||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||
type: str
|
||||
default: inet
|
||||
choices: [ 'inet', 'inet6' ]
|
||||
groupby:
|
||||
description:
|
||||
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
||||
- Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
|
||||
- See example for syntax.
|
||||
type: dict
|
||||
'''
|
||||
@@ -83,38 +96,49 @@ plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
state: RUNNING
|
||||
|
||||
# simple lxd.yml including virtual machines and containers
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
type_filter: both
|
||||
|
||||
# grouping lxd.yml
|
||||
groupby:
|
||||
testpattern:
|
||||
type: pattern
|
||||
attribute: test
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
locationBerlin:
|
||||
type: location
|
||||
attribute: Berlin
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
releaseFocal:
|
||||
type: release
|
||||
attribute: focal
|
||||
releaseBionic:
|
||||
type: release
|
||||
attribute: bionic
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
netRangeIPv4:
|
||||
type: network_range
|
||||
attribute: 10.98.143.0/24
|
||||
netRangeIPv6:
|
||||
type: network_range
|
||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
testpattern:
|
||||
type: pattern
|
||||
attribute: test
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
releaseFocal:
|
||||
type: release
|
||||
attribute: focal
|
||||
releaseBionic:
|
||||
type: release
|
||||
attribute: bionic
|
||||
typeVM:
|
||||
type: type
|
||||
attribute: virtual-machine
|
||||
typeContainer:
|
||||
type: type
|
||||
attribute: container
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
'''
|
||||
|
||||
import binascii
|
||||
@@ -283,10 +307,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||
|
||||
def _get_containers(self):
|
||||
"""Get Containernames
|
||||
def _get_instances(self):
|
||||
"""Get instancenames
|
||||
|
||||
Returns all containernames
|
||||
Returns all instancenames
|
||||
|
||||
Args:
|
||||
None
|
||||
@@ -295,25 +319,27 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all containers"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
||||
containers = self.socket.do('GET', '/1.0/containers')
|
||||
return [m.split('/')[3] for m in containers['metadata']]
|
||||
list(names): names of all instances"""
|
||||
# e.g. {
|
||||
# "metadata": [
|
||||
# "/1.0/instances/foo",
|
||||
# "/1.0/instances/bar"
|
||||
# ],
|
||||
# "status": "Success",
|
||||
# "status_code": 200,
|
||||
# "type": "sync"
|
||||
# }
|
||||
instances = self.socket.do('GET', '/1.0/instances')
|
||||
return [m.split('/')[3] for m in instances['metadata']]
|
||||
|
||||
def _get_config(self, branch, name):
|
||||
"""Get inventory of container
|
||||
"""Get inventory of instance
|
||||
|
||||
Get config of container
|
||||
Get config of instance
|
||||
|
||||
Args:
|
||||
str(branch): Name oft the API-Branch
|
||||
str(name): Name of Container
|
||||
str(name): Name of instance
|
||||
Kwargs:
|
||||
None
|
||||
Source:
|
||||
@@ -321,7 +347,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(config): Config of the container"""
|
||||
dict(config): Config of the instance"""
|
||||
config = {}
|
||||
if isinstance(branch, (tuple, list)):
|
||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||
@@ -329,13 +355,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||
return config
|
||||
|
||||
def get_container_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
def get_instance_data(self, names):
|
||||
"""Create Inventory of the instance
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
Iterate through the different branches of the instances and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
list(names): List of instance names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -344,20 +370,20 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = ['containers', ('instances', 'state')]
|
||||
container_config = {}
|
||||
branches = ['instances', ('instances', 'state')]
|
||||
instance_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
container_config['containers'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(container_config, self.data)
|
||||
instance_config['instances'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(instance_config, self.data)
|
||||
|
||||
def get_network_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
"""Create Inventory of the instance
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
Iterate through the different branches of the instances and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
list(names): List of instance names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -376,26 +402,26 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_config['networks'] = {name: None}
|
||||
self.data = dict_merge(network_config, self.data)
|
||||
|
||||
def extract_network_information_from_container_config(self, container_name):
|
||||
def extract_network_information_from_instance_config(self, instance_name):
|
||||
"""Returns the network interface configuration
|
||||
|
||||
Returns the network ipv4 and ipv6 config of the container without local-link
|
||||
Returns the network ipv4 and ipv6 config of the instance without local-link
|
||||
|
||||
Args:
|
||||
str(container_name): Name oft he container
|
||||
str(instance_name): Name oft he instance
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(network_configuration): network config"""
|
||||
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
||||
instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
|
||||
network_configuration = None
|
||||
if container_network_interfaces:
|
||||
if instance_network_interfaces:
|
||||
network_configuration = {}
|
||||
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
||||
gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
|
||||
for interface_name in gen_interface_names:
|
||||
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
network_configuration[interface_name] = []
|
||||
for address in gen_address:
|
||||
address_set = {}
|
||||
@@ -406,24 +432,24 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
network_configuration[interface_name].append(address_set)
|
||||
return network_configuration
|
||||
|
||||
def get_prefered_container_network_interface(self, container_name):
|
||||
"""Helper to get the prefered interface of thr container
|
||||
def get_prefered_instance_network_interface(self, instance_name):
|
||||
"""Helper to get the prefered interface of thr instance
|
||||
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
str(containe_name): name of instance
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
str(prefered_interface): None or interface name"""
|
||||
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||
prefered_interface = None # init
|
||||
if container_network_interfaces: # container have network interfaces
|
||||
if instance_network_interfaces: # instance have network interfaces
|
||||
# generator if interfaces which start with the desired pattern
|
||||
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
||||
net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
|
||||
selected_interfaces = [] # init
|
||||
for interface in net_generator:
|
||||
selected_interfaces.append(interface)
|
||||
@@ -431,13 +457,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
prefered_interface = sorted(selected_interfaces)[0]
|
||||
return prefered_interface
|
||||
|
||||
def get_container_vlans(self, container_name):
|
||||
"""Get VLAN(s) from container
|
||||
def get_instance_vlans(self, instance_name):
|
||||
"""Get VLAN(s) from instance
|
||||
|
||||
Helper to get the VLAN_ID from the container
|
||||
Helper to get the VLAN_ID from the instance
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
str(containe_name): name of instance
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -450,13 +476,13 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||
|
||||
# get networkdevices of container and return
|
||||
# get networkdevices of instance and return
|
||||
# e.g.
|
||||
# "eth0":{ "name":"eth0",
|
||||
# "network":"lxdbr0",
|
||||
# "type":"nic"},
|
||||
vlan_ids = {}
|
||||
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
||||
devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
|
||||
for device in devices:
|
||||
if 'network' in devices[device]:
|
||||
if devices[device]['network'] in network_vlans:
|
||||
@@ -492,14 +518,14 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def _set_data_entry(self, container_name, key, value, path=None):
|
||||
def _set_data_entry(self, instance_name, key, value, path=None):
|
||||
"""Helper to save data
|
||||
|
||||
Helper to save the data in self.data
|
||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
str(instance_name): name of instance
|
||||
str(key): same as dict
|
||||
*(value): same as dict
|
||||
Kwargs:
|
||||
@@ -510,24 +536,24 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None"""
|
||||
if not path:
|
||||
path = self.data['inventory']
|
||||
if container_name not in path:
|
||||
path[container_name] = {}
|
||||
if instance_name not in path:
|
||||
path[instance_name] = {}
|
||||
|
||||
try:
|
||||
if isinstance(value, dict) and key in path[container_name]:
|
||||
path[container_name] = dict_merge(value, path[container_name][key])
|
||||
if isinstance(value, dict) and key in path[instance_name]:
|
||||
path[instance_name] = dict_merge(value, path[instance_name][key])
|
||||
else:
|
||||
path[container_name][key] = value
|
||||
path[instance_name][key] = value
|
||||
except KeyError as err:
|
||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||
|
||||
def extract_information_from_container_configs(self):
|
||||
def extract_information_from_instance_configs(self):
|
||||
"""Process configuration information
|
||||
|
||||
Preparation of the data
|
||||
|
||||
Args:
|
||||
dict(configs): Container configurations
|
||||
dict(configs): instance configurations
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -538,33 +564,35 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if 'inventory' not in self.data:
|
||||
self.data['inventory'] = {}
|
||||
|
||||
for container_name in self.data['containers']:
|
||||
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/location'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
||||
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
||||
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
||||
for instance_name in self.data['instances']:
|
||||
self._set_data_entry(instance_name, 'os', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'release', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'version', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/profiles'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'location', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/location'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'state', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'type', self._get_data_entry(
|
||||
'instances/{0}/instances/metadata/type'.format(instance_name)))
|
||||
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
|
||||
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
|
||||
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
|
||||
|
||||
def build_inventory_network(self, container_name):
|
||||
"""Add the network interfaces of the container to the inventory
|
||||
def build_inventory_network(self, instance_name):
|
||||
"""Add the network interfaces of the instance to the inventory
|
||||
|
||||
Logic:
|
||||
- if the container have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- if the instance have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
str(instance_name): name of instance
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
@@ -572,45 +600,45 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def interface_selection(container_name):
|
||||
"""Select container Interface for inventory
|
||||
def interface_selection(instance_name):
|
||||
"""Select instance Interface for inventory
|
||||
|
||||
Logic:
|
||||
- get preferred_interface & prefered_container_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
||||
- get preferred_interface & prefered_instance_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
str(instance_name): name of instance
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(interface_name: ip)"""
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
||||
prefered_container_network_family = self.prefered_container_network_family
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
|
||||
prefered_instance_network_family = self.prefered_instance_network_family
|
||||
|
||||
ip_address = ''
|
||||
if prefered_interface:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
if config['family'] == prefered_instance_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
else:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
|
||||
for interface in interfaces.values():
|
||||
for config in interface:
|
||||
if config['family'] == prefered_instance_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
return ip_address
|
||||
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
||||
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
|
||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
|
||||
else:
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
||||
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
|
||||
|
||||
def build_inventory_hosts(self):
|
||||
"""Build host-part dynamic inventory
|
||||
@@ -626,29 +654,33 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
for container_name in self.data['inventory']:
|
||||
# Only consider containers that match the "state" filter, if self.state is not None
|
||||
for instance_name in self.data['inventory']:
|
||||
instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
|
||||
|
||||
# Only consider instances that match the "state" filter, if self.state is not None
|
||||
if self.filter:
|
||||
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
||||
if self.filter.lower() != instance_state:
|
||||
continue
|
||||
# add container
|
||||
self.inventory.add_host(container_name)
|
||||
# add instance
|
||||
self.inventory.add_host(instance_name)
|
||||
# add network informations
|
||||
self.build_inventory_network(container_name)
|
||||
self.build_inventory_network(instance_name)
|
||||
# add os
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(instance_name)).lower())
|
||||
# add release
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(instance_name)).lower())
|
||||
# add profile
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
|
||||
# add state
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
|
||||
# add type
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
|
||||
# add location information
|
||||
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
||||
if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
|
||||
# add VLAN_ID information
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
|
||||
self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
|
||||
|
||||
def build_inventory_groups_location(self, group_name):
|
||||
"""create group by attribute: location
|
||||
@@ -665,9 +697,9 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
for instance_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_pattern(self, group_name):
|
||||
"""create group by name pattern
|
||||
@@ -686,10 +718,10 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
regex_pattern = self.groupby[group_name].get('attribute')
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, container_name)
|
||||
for instance_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, instance_name)
|
||||
if result:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_network_range(self, group_name):
|
||||
"""check if IP is in network-class
|
||||
@@ -712,14 +744,14 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
raise AnsibleParserError(
|
||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
||||
for instance_name in self.inventory.hosts:
|
||||
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][instance_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
|
||||
try:
|
||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||
if address.version == network.version and address in network:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
except ValueError:
|
||||
# Ignore invalid IP addresses returned by lxd
|
||||
pass
|
||||
@@ -730,7 +762,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
Noneself.data['inventory'][container_name][interface]
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
@@ -739,12 +771,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_release(self, group_name):
|
||||
"""create group by attribute: release
|
||||
@@ -761,12 +793,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_profile(self, group_name):
|
||||
"""create group by attribute: profile
|
||||
@@ -783,12 +815,12 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_vlanid(self, group_name):
|
||||
"""create group by attribute: vlanid
|
||||
@@ -805,12 +837,34 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups_type(self, group_name):
|
||||
"""create group by attribute: type
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_instances = [
|
||||
instance_name for instance_name in self.inventory.hosts
|
||||
if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
|
||||
for instance_name in gen_instances:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
|
||||
self.inventory.add_child(group_name, instance_name)
|
||||
|
||||
def build_inventory_groups(self):
|
||||
"""Build group-part dynamic inventory
|
||||
@@ -839,6 +893,7 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
* 'release'
|
||||
* 'profile'
|
||||
* 'vlanid'
|
||||
* 'type'
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
@@ -864,6 +919,8 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.build_inventory_groups_profile(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||
self.build_inventory_groups_vlanid(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'type':
|
||||
self.build_inventory_groups_type(group_name)
|
||||
else:
|
||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||
|
||||
@@ -890,10 +947,30 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.build_inventory_hosts()
|
||||
self.build_inventory_groups()
|
||||
|
||||
def cleandata(self):
|
||||
"""Clean the dynamic inventory
|
||||
|
||||
The first version of the inventory only supported container.
|
||||
This will change in the future.
|
||||
The following function cleans up the data and remove the all items with the wrong type.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
iter_keys = list(self.data['instances'].keys())
|
||||
for instance_name in iter_keys:
|
||||
if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
|
||||
del self.data['instances'][instance_name]
|
||||
|
||||
def _populate(self):
|
||||
"""Return the hosts and groups
|
||||
|
||||
Returns the processed container configurations from the lxd import
|
||||
Returns the processed instance configurations from the lxd import
|
||||
|
||||
Args:
|
||||
None
|
||||
@@ -906,10 +983,16 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||
self.socket = self._connect_to_socket()
|
||||
self.get_container_data(self._get_containers())
|
||||
self.get_instance_data(self._get_instances())
|
||||
self.get_network_data(self._get_networks())
|
||||
|
||||
self.extract_information_from_container_configs()
|
||||
# The first version of the inventory only supported containers.
|
||||
# This will change in the future.
|
||||
# The following function cleans up the data.
|
||||
if self.type_filter != 'both':
|
||||
self.cleandata()
|
||||
|
||||
self.extract_information_from_instance_configs()
|
||||
|
||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||
|
||||
@@ -948,8 +1031,9 @@ class InventoryModule(BaseInventoryPlugin):
|
||||
self.data = {} # store for inventory-data
|
||||
self.groupby = self.get_option('groupby')
|
||||
self.plugin = self.get_option('plugin')
|
||||
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
||||
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
||||
self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
|
||||
self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
|
||||
self.type_filter = self.get_option('type_filter')
|
||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||
self.filter = None
|
||||
else:
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
name: online
|
||||
author:
|
||||
- Remy Leone (@sieben)
|
||||
- Remy Leone (@remyleone)
|
||||
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
||||
description:
|
||||
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
|
||||
|
||||
@@ -119,12 +119,13 @@ compose:
|
||||
import re
|
||||
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
# 3rd party imports
|
||||
try:
|
||||
import requests
|
||||
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = r'''
|
||||
name: scaleway
|
||||
author:
|
||||
- Remy Leone (@sieben)
|
||||
- Remy Leone (@remyleone)
|
||||
short_description: Scaleway inventory source
|
||||
description:
|
||||
- Get inventory hosts from Scaleway.
|
||||
|
||||
328
plugins/inventory/xen_orchestra.py
Normal file
328
plugins/inventory/xen_orchestra.py
Normal file
@@ -0,0 +1,328 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: xen_orchestra
|
||||
short_description: Xen Orchestra inventory source
|
||||
version_added: 4.1.0
|
||||
author:
|
||||
- Dom Del Nano (@ddelnano) <ddelnano@gmail.com>
|
||||
- Samori Gorse (@shinuza) <samorigorse@gmail.com>
|
||||
requirements:
|
||||
- websocket-client >= 1.0.0
|
||||
description:
|
||||
- Get inventory hosts from a Xen Orchestra deployment.
|
||||
- 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
|
||||
required: yes
|
||||
choices: ['community.general.xen_orchestra']
|
||||
type: str
|
||||
api_host:
|
||||
description:
|
||||
- API host to XOA API.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_HOST
|
||||
user:
|
||||
description:
|
||||
- Xen Orchestra user.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
|
||||
required: yes
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_USER
|
||||
password:
|
||||
description:
|
||||
- Xen Orchestra password.
|
||||
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
|
||||
required: yes
|
||||
type: str
|
||||
env:
|
||||
- name: ANSIBLE_XO_PASSWORD
|
||||
validate_certs:
|
||||
description: Verify TLS certificate if using HTTPS.
|
||||
type: boolean
|
||||
default: true
|
||||
use_ssl:
|
||||
description: Use wss when connecting to the Xen Orchestra API
|
||||
type: boolean
|
||||
default: true
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# file must be named xen_orchestra.yaml or xen_orchestra.yml
|
||||
simple_config_file:
|
||||
plugin: community.general.xen_orchestra
|
||||
api_host: 192.168.1.255
|
||||
user: xo
|
||||
password: xo_pwd
|
||||
validate_certs: true
|
||||
use_ssl: true
|
||||
groups:
|
||||
kube_nodes: "'kube_node' in tags"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import ssl
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
# 3rd party imports
|
||||
try:
|
||||
HAS_WEBSOCKET = True
|
||||
import websocket
|
||||
from websocket import create_connection
|
||||
|
||||
if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'):
|
||||
raise ImportError
|
||||
except ImportError as e:
|
||||
HAS_WEBSOCKET = False
|
||||
|
||||
|
||||
HALTED = 'Halted'
|
||||
PAUSED = 'Paused'
|
||||
RUNNING = 'Running'
|
||||
SUSPENDED = 'Suspended'
|
||||
POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED]
|
||||
HOST_GROUP = 'xo_hosts'
|
||||
POOL_GROUP = 'xo_pools'
|
||||
|
||||
|
||||
def clean_group_name(label):
|
||||
return label.lower().replace(' ', '-').replace('-', '_')
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using XenOrchestra as source. '''
|
||||
|
||||
NAME = 'community.general.xen_orchestra'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# from config
|
||||
self.counter = -1
|
||||
self.session = None
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
@property
|
||||
def pointer(self):
|
||||
self.counter += 1
|
||||
return self.counter
|
||||
|
||||
def create_connection(self, xoa_api_host):
|
||||
validate_certs = self.get_option('validate_certs')
|
||||
use_ssl = self.get_option('use_ssl')
|
||||
proto = 'wss' if use_ssl else 'ws'
|
||||
|
||||
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
|
||||
self.conn = create_connection(
|
||||
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
|
||||
|
||||
def login(self, user, password):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
|
||||
'username': user, 'password': password}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
result = json.loads(self.conn.recv())
|
||||
|
||||
if 'error' in result:
|
||||
raise AnsibleError(
|
||||
'Could not connect: {0}'.format(result['error']))
|
||||
|
||||
def get_object(self, name):
|
||||
payload = {'id': self.pointer, 'jsonrpc': '2.0',
|
||||
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
|
||||
self.conn.send(json.dumps(payload))
|
||||
answer = json.loads(self.conn.recv())
|
||||
|
||||
if 'error' in answer:
|
||||
raise AnsibleError(
|
||||
'Could not request: {0}'.format(answer['error']))
|
||||
|
||||
return answer['result']
|
||||
|
||||
def _get_objects(self):
|
||||
self.create_connection(self.xoa_api_host)
|
||||
self.login(self.xoa_user, self.xoa_password)
|
||||
|
||||
return {
|
||||
'vms': self.get_object('VM'),
|
||||
'pools': self.get_object('pool'),
|
||||
'hosts': self.get_object('host'),
|
||||
}
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _add_vms(self, vms, hosts, pools):
|
||||
for uuid, vm in vms.items():
|
||||
group = 'with_ip'
|
||||
ip = vm.get('mainIpAddress')
|
||||
entry_name = uuid
|
||||
power_state = vm['power_state'].lower()
|
||||
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
|
||||
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
|
||||
|
||||
self.inventory.add_host(entry_name)
|
||||
|
||||
# Grouping by power state
|
||||
self.inventory.add_child(power_state, entry_name)
|
||||
|
||||
# Grouping by host
|
||||
if host_name:
|
||||
self.inventory.add_child(host_name, entry_name)
|
||||
|
||||
# Grouping by pool
|
||||
if pool_name:
|
||||
self.inventory.add_child(pool_name, entry_name)
|
||||
|
||||
# Grouping VMs with an IP together
|
||||
if ip is None:
|
||||
group = 'without_ip'
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, entry_name)
|
||||
|
||||
# Adding meta
|
||||
self.inventory.set_variable(entry_name, 'uuid', uuid)
|
||||
self.inventory.set_variable(entry_name, 'ip', ip)
|
||||
self.inventory.set_variable(entry_name, 'ansible_host', ip)
|
||||
self.inventory.set_variable(entry_name, 'power_state', power_state)
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'name_label', vm['name_label'])
|
||||
self.inventory.set_variable(entry_name, 'type', vm['type'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'cpus', vm['CPUs']['number'])
|
||||
self.inventory.set_variable(entry_name, 'tags', vm['tags'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'memory', vm['memory']['size'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'has_ip', group == 'with_ip')
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'is_managed', vm.get('managementAgentDetected', False))
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'os_version', vm['os_version'])
|
||||
|
||||
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
|
||||
|
||||
def _add_hosts(self, hosts, pools):
|
||||
for host in hosts.values():
|
||||
entry_name = host['uuid']
|
||||
group_name = 'xo_host_{0}'.format(
|
||||
clean_group_name(host['name_label']))
|
||||
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
self.inventory.add_host(entry_name)
|
||||
self.inventory.add_child(HOST_GROUP, entry_name)
|
||||
self.inventory.add_child(pool_name, entry_name)
|
||||
|
||||
self.inventory.set_variable(entry_name, 'enabled', host['enabled'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'hostname', host['hostname'])
|
||||
self.inventory.set_variable(entry_name, 'memory', host['memory'])
|
||||
self.inventory.set_variable(entry_name, 'address', host['address'])
|
||||
self.inventory.set_variable(entry_name, 'cpus', host['cpus'])
|
||||
self.inventory.set_variable(entry_name, 'type', 'host')
|
||||
self.inventory.set_variable(entry_name, 'tags', host['tags'])
|
||||
self.inventory.set_variable(entry_name, 'version', host['version'])
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'power_state', host['power_state'].lower())
|
||||
self.inventory.set_variable(
|
||||
entry_name, 'product_brand', host['productBrand'])
|
||||
|
||||
for pool in pools.values():
|
||||
group_name = 'xo_pool_{0}'.format(
|
||||
clean_group_name(pool['name_label']))
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
def _add_pools(self, pools):
|
||||
for pool in pools.values():
|
||||
group_name = 'xo_pool_{0}'.format(
|
||||
clean_group_name(pool['name_label']))
|
||||
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
# TODO: Refactor
|
||||
def _pool_group_name_for_uuid(self, pools, pool_uuid):
|
||||
for pool in pools:
|
||||
if pool == pool_uuid:
|
||||
return 'xo_pool_{0}'.format(
|
||||
clean_group_name(pools[pool_uuid]['name_label']))
|
||||
|
||||
# TODO: Refactor
|
||||
def _host_group_name_for_uuid(self, hosts, host_uuid):
|
||||
for host in hosts:
|
||||
if host == host_uuid:
|
||||
return 'xo_host_{0}'.format(
|
||||
clean_group_name(hosts[host_uuid]['name_label']
|
||||
))
|
||||
|
||||
def _populate(self, objects):
|
||||
# Prepare general groups
|
||||
self.inventory.add_group(HOST_GROUP)
|
||||
self.inventory.add_group(POOL_GROUP)
|
||||
for group in POWER_STATES:
|
||||
self.inventory.add_group(group.lower())
|
||||
|
||||
self._add_pools(objects['pools'])
|
||||
self._add_hosts(objects['hosts'], objects['pools'])
|
||||
self._add_vms(objects['vms'], objects['hosts'], objects['pools'])
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv(
|
||||
'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"')
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_WEBSOCKET:
|
||||
raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: '
|
||||
'https://github.com/websocket-client/websocket-client.')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
self.inventory = inventory
|
||||
|
||||
self.protocol = 'wss'
|
||||
self.xoa_api_host = self.get_option('api_host')
|
||||
self.xoa_user = self.get_option('user')
|
||||
self.xoa_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
self.validate_certs = self.get_option('validate_certs')
|
||||
if not self.get_option('use_ssl'):
|
||||
self.protocol = 'ws'
|
||||
|
||||
objects = self._get_objects()
|
||||
self._populate(objects)
|
||||
@@ -93,7 +93,7 @@ DOCUMENTATION = '''
|
||||
environment variable and keep I(endpoints), I(host), and I(port) unused.
|
||||
seealso:
|
||||
- module: community.general.etcd3
|
||||
- ref: etcd_lookup
|
||||
- ref: ansible_collections.community.general.etcd_lookup
|
||||
description: The etcd v2 lookup.
|
||||
|
||||
requirements:
|
||||
|
||||
@@ -23,7 +23,7 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = """
|
||||
- name: "'unnest' all elements into single list"
|
||||
ansible.builtin.debug:
|
||||
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
|
||||
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
|
||||
@@ -141,9 +141,9 @@ import time
|
||||
import yaml
|
||||
|
||||
|
||||
from distutils import util
|
||||
from ansible.errors import AnsibleError, AnsibleAssertionError
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.encrypt import random_password
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
@@ -211,7 +211,7 @@ class LookupModule(LookupBase):
|
||||
try:
|
||||
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
|
||||
if not isinstance(self.paramvals[key], bool):
|
||||
self.paramvals[key] = util.strtobool(self.paramvals[key])
|
||||
self.paramvals[key] = boolean(self.paramvals[key])
|
||||
except (ValueError, AssertionError) as e:
|
||||
raise AnsibleError(e)
|
||||
if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
|
||||
|
||||
107
plugins/lookup/revbitspss.py
Normal file
107
plugins/lookup/revbitspss.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, RevBits <info@revbits.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: revbitspss
|
||||
author: RevBits (@RevBits) <info@revbits.com>
|
||||
short_description: Get secrets from RevBits PAM server
|
||||
version_added: 4.1.0
|
||||
description:
|
||||
- Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
|
||||
Server using API key authentication with the REST API.
|
||||
requirements:
|
||||
- revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
|
||||
options:
|
||||
_terms:
|
||||
description:
|
||||
- This will be an array of keys for secrets which you want to fetch from RevBits PAM.
|
||||
required: true
|
||||
type: list
|
||||
elements: string
|
||||
base_url:
|
||||
description:
|
||||
- This will be the base URL of the server, for example C(https://server-url-here).
|
||||
required: true
|
||||
type: string
|
||||
api_key:
|
||||
description:
|
||||
- This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
|
||||
required: true
|
||||
type: string
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
_list:
|
||||
description:
|
||||
- The JSON responses which you can access with defined keys.
|
||||
- If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.revbitspss',
|
||||
'UUIDPAM', 'DB_PASS',
|
||||
base_url='https://server-url-here',
|
||||
api_key='API_KEY_GOES_HERE'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
|
||||
"""
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
try:
|
||||
from pam.revbits_ansible.server import SecretServer
|
||||
except ImportError as imp_exc:
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
else:
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
|
||||
@staticmethod
|
||||
def Client(server_parameters):
|
||||
return SecretServer(**server_parameters)
|
||||
|
||||
def run(self, terms, variables, **kwargs):
|
||||
if ANOTHER_LIBRARY_IMPORT_ERROR:
|
||||
raise_from(
|
||||
AnsibleError('revbits_ansible must be installed to use this plugin'),
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR
|
||||
)
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
secret_server = LookupModule.Client(
|
||||
{
|
||||
"base_url": self.get_option('base_url'),
|
||||
"api_key": self.get_option('api_key'),
|
||||
}
|
||||
)
|
||||
result = []
|
||||
for term in terms:
|
||||
try:
|
||||
display.vvv(u"Secret Server lookup of Secret with ID %s" % term)
|
||||
result.append({term: secret_server.get_pam_secret(term)})
|
||||
except Exception as error:
|
||||
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
|
||||
return result
|
||||
343
plugins/module_utils/_version.py
Normal file
343
plugins/module_utils/_version.py
Normal file
@@ -0,0 +1,343 @@
|
||||
# Vendored copy of distutils/version.py from CPython 3.9.5
|
||||
#
|
||||
# Implements multiple version numbering conventions for the
|
||||
# Python Module Distribution Utilities.
|
||||
#
|
||||
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
|
||||
#
|
||||
|
||||
"""Provides classes to represent module version numbers (one class for
|
||||
each style of version numbering). There are currently two such classes
|
||||
implemented: StrictVersion and LooseVersion.
|
||||
|
||||
Every version number class implements the following interface:
|
||||
* the 'parse' method takes a string and parses it to some internal
|
||||
representation; if the string is an invalid version number,
|
||||
'parse' raises a ValueError exception
|
||||
* the class constructor takes an optional string argument which,
|
||||
if supplied, is passed to 'parse'
|
||||
* __str__ reconstructs the string that was passed to 'parse' (or
|
||||
an equivalent string -- ie. one that will generate an equivalent
|
||||
version number instance)
|
||||
* __repr__ generates Python code to recreate the version number instance
|
||||
* _cmp compares the current instance with either another instance
|
||||
of the same class or a string (which will be parsed to an instance
|
||||
of the same class, thus must follow the same rules)
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
try:
|
||||
RE_FLAGS = re.VERBOSE | re.ASCII
|
||||
except AttributeError:
|
||||
RE_FLAGS = re.VERBOSE
|
||||
|
||||
|
||||
class Version:
|
||||
"""Abstract base class for version numbering classes. Just provides
|
||||
constructor (__init__) and reproducer (__repr__), because those
|
||||
seem to be the same for all version numbering classes; and route
|
||||
rich comparisons to _cmp.
|
||||
"""
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s ('%s')" % (self.__class__.__name__, str(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c == 0
|
||||
|
||||
def __lt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c < 0
|
||||
|
||||
def __le__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c <= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c >= 0
|
||||
|
||||
|
||||
# Interface for version-number classes -- must be implemented
|
||||
# by the following classes (the concrete ones -- Version should
|
||||
# be treated as an abstract class).
|
||||
# __init__ (string) - create and take same action as 'parse'
|
||||
# (string parameter is optional)
|
||||
# parse (string) - convert a string representation to whatever
|
||||
# internal representation is appropriate for
|
||||
# this style of version numbering
|
||||
# __str__ (self) - convert back to a string; should be very similar
|
||||
# (if not identical to) the string supplied to parse
|
||||
# __repr__ (self) - generate Python code to recreate
|
||||
# the instance
|
||||
# _cmp (self, other) - compare two version numbers ('other' may
|
||||
# be an unparsed version string, or another
|
||||
# instance of your version class)
|
||||
|
||||
|
||||
class StrictVersion(Version):
|
||||
"""Version numbering for anal retentives and software idealists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of two or three
|
||||
dot-separated numeric components, with an optional "pre-release" tag
|
||||
on the end. The pre-release tag consists of the letter 'a' or 'b'
|
||||
followed by a number. If the numeric components of two version
|
||||
numbers are equal, then one with a pre-release tag will always
|
||||
be deemed earlier (lesser) than one without.
|
||||
|
||||
The following are valid version numbers (shown in the order that
|
||||
would be obtained by sorting according to the supplied cmp function):
|
||||
|
||||
0.4 0.4.0 (these two are equivalent)
|
||||
0.4.1
|
||||
0.5a1
|
||||
0.5b3
|
||||
0.5
|
||||
0.9.6
|
||||
1.0
|
||||
1.0.4a3
|
||||
1.0.4b1
|
||||
1.0.4
|
||||
|
||||
The following are examples of invalid version numbers:
|
||||
|
||||
1
|
||||
2.7.2.2
|
||||
1.3.a4
|
||||
1.3pl1
|
||||
1.3c4
|
||||
|
||||
The rationale for this version numbering system will be explained
|
||||
in the distutils documentation.
|
||||
"""
|
||||
|
||||
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
|
||||
RE_FLAGS)
|
||||
|
||||
def parse(self, vstring):
|
||||
match = self.version_re.match(vstring)
|
||||
if not match:
|
||||
raise ValueError("invalid version number '%s'" % vstring)
|
||||
|
||||
(major, minor, patch, prerelease, prerelease_num) = \
|
||||
match.group(1, 2, 4, 5, 6)
|
||||
|
||||
if patch:
|
||||
self.version = tuple(map(int, [major, minor, patch]))
|
||||
else:
|
||||
self.version = tuple(map(int, [major, minor])) + (0,)
|
||||
|
||||
if prerelease:
|
||||
self.prerelease = (prerelease[0], int(prerelease_num))
|
||||
else:
|
||||
self.prerelease = None
|
||||
|
||||
def __str__(self):
|
||||
if self.version[2] == 0:
|
||||
vstring = '.'.join(map(str, self.version[0:2]))
|
||||
else:
|
||||
vstring = '.'.join(map(str, self.version))
|
||||
|
||||
if self.prerelease:
|
||||
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
|
||||
|
||||
return vstring
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = StrictVersion(other)
|
||||
elif not isinstance(other, StrictVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version != other.version:
|
||||
# numeric versions don't match
|
||||
# prerelease stuff doesn't matter
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
# have to compare prerelease
|
||||
# case 1: neither has prerelease; they're equal
|
||||
# case 2: self has prerelease, other doesn't; other is greater
|
||||
# case 3: self doesn't have prerelease, other does: self is greater
|
||||
# case 4: both have prerelease: must compare them!
|
||||
|
||||
if (not self.prerelease and not other.prerelease):
|
||||
return 0
|
||||
elif (self.prerelease and not other.prerelease):
|
||||
return -1
|
||||
elif (not self.prerelease and other.prerelease):
|
||||
return 1
|
||||
elif (self.prerelease and other.prerelease):
|
||||
if self.prerelease == other.prerelease:
|
||||
return 0
|
||||
elif self.prerelease < other.prerelease:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
else:
|
||||
raise AssertionError("never get here")
|
||||
|
||||
# end class StrictVersion
|
||||
|
||||
# The rules according to Greg Stein:
|
||||
# 1) a version number has 1 or more numbers separated by a period or by
|
||||
# sequences of letters. If only periods, then these are compared
|
||||
# left-to-right to determine an ordering.
|
||||
# 2) sequences of letters are part of the tuple for comparison and are
|
||||
# compared lexicographically
|
||||
# 3) recognize the numeric components may have leading zeroes
|
||||
#
|
||||
# The LooseVersion class below implements these rules: a version number
|
||||
# string is split up into a tuple of integer and string components, and
|
||||
# comparison is a simple tuple comparison. This means that version
|
||||
# numbers behave in a predictable and obvious way, but a way that might
|
||||
# not necessarily be how people *want* version numbers to behave. There
|
||||
# wouldn't be a problem if people could stick to purely numeric version
|
||||
# numbers: just split on period and compare the numbers as tuples.
|
||||
# However, people insist on putting letters into their version numbers;
|
||||
# the most common purpose seems to be:
|
||||
# - indicating a "pre-release" version
|
||||
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
|
||||
# - indicating a post-release patch ('p', 'pl', 'patch')
|
||||
# but of course this can't cover all version number schemes, and there's
|
||||
# no way to know what a programmer means without asking him.
|
||||
#
|
||||
# The problem is what to do with letters (and other non-numeric
|
||||
# characters) in a version number. The current implementation does the
|
||||
# obvious and predictable thing: keep them as strings and compare
|
||||
# lexically within a tuple comparison. This has the desired effect if
|
||||
# an appended letter sequence implies something "post-release":
|
||||
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
|
||||
#
|
||||
# However, if letters in a version number imply a pre-release version,
|
||||
# the "obvious" thing isn't correct. Eg. you would expect that
|
||||
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
|
||||
# implemented here, this just isn't so.
|
||||
#
|
||||
# Two possible solutions come to mind. The first is to tie the
|
||||
# comparison algorithm to a particular set of semantic rules, as has
|
||||
# been done in the StrictVersion class above. This works great as long
|
||||
# as everyone can go along with bondage and discipline. Hopefully a
|
||||
# (large) subset of Python module programmers will agree that the
|
||||
# particular flavour of bondage and discipline provided by StrictVersion
|
||||
# provides enough benefit to be worth using, and will submit their
|
||||
# version numbering scheme to its domination. The free-thinking
|
||||
# anarchists in the lot will never give in, though, and something needs
|
||||
# to be done to accommodate them.
|
||||
#
|
||||
# Perhaps a "moderately strict" version class could be implemented that
|
||||
# lets almost anything slide (syntactically), and makes some heuristic
|
||||
# assumptions about non-digits in version number strings. This could
|
||||
# sink into special-case-hell, though; if I was as talented and
|
||||
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
|
||||
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
|
||||
# just as happy dealing with things like "2g6" and "1.13++". I don't
|
||||
# think I'm smart enough to do it right though.
|
||||
#
|
||||
# In any case, I've coded the test suite for this module (see
|
||||
# ../test/test_version.py) specifically to fail on things like comparing
|
||||
# "1.2a2" and "1.2". That's not because the *code* is doing anything
|
||||
# wrong, it's because the simple, obvious design doesn't match my
|
||||
# complicated, hairy expectations for real-world version numbers. It
|
||||
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
|
||||
# the Right Thing" (ie. the code matches the conception). But I'd rather
|
||||
# have a conception that matches common notions about version numbers.
|
||||
|
||||
|
||||
class LooseVersion(Version):
|
||||
"""Version numbering for anarchists and software realists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of a series of numbers,
|
||||
separated by either periods or strings of letters. When comparing
|
||||
version numbers, the numeric components will be compared
|
||||
numerically, and the alphabetic components lexically. The following
|
||||
are all valid version numbers, in no particular order:
|
||||
|
||||
1.5.1
|
||||
1.5.2b2
|
||||
161
|
||||
3.10a
|
||||
8.02
|
||||
3.4j
|
||||
1996.07.12
|
||||
3.2.pl0
|
||||
3.1.1.6
|
||||
2g6
|
||||
11g
|
||||
0.960923
|
||||
2.2beta29
|
||||
1.13++
|
||||
5.5.kw
|
||||
2.0b1pl0
|
||||
|
||||
In fact, there is no such thing as an invalid version number under
|
||||
this scheme; the rules for comparison are simple and predictable,
|
||||
but may not always give the results you want (for some definition
|
||||
of "want").
|
||||
"""
|
||||
|
||||
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def parse(self, vstring):
|
||||
# I've given up on thinking I can reconstruct the version string
|
||||
# from the parsed tuple -- so I just store the string here for
|
||||
# use by __str__
|
||||
self.vstring = vstring
|
||||
components = [x for x in self.component_re.split(vstring) if x and x != '.']
|
||||
for i, obj in enumerate(components):
|
||||
try:
|
||||
components[i] = int(obj)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
self.version = components
|
||||
|
||||
def __str__(self):
|
||||
return self.vstring
|
||||
|
||||
def __repr__(self):
|
||||
return "LooseVersion ('%s')" % str(self)
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = LooseVersion(other)
|
||||
elif not isinstance(other, LooseVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version == other.version:
|
||||
return 0
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
if self.version > other.version:
|
||||
return 1
|
||||
|
||||
# end class LooseVersion
|
||||
@@ -7,54 +7,41 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from urllib import quote_plus # Python 2.X
|
||||
from urlparse import urljoin
|
||||
except ImportError:
|
||||
from urllib.parse import quote_plus # Python 3+
|
||||
from urllib.parse import quote_plus, urljoin # Python 3+
|
||||
|
||||
import traceback
|
||||
|
||||
GITLAB_IMP_ERR = None
|
||||
try:
|
||||
import gitlab
|
||||
import requests
|
||||
HAS_GITLAB_PACKAGE = True
|
||||
except Exception:
|
||||
GITLAB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITLAB_PACKAGE = False
|
||||
|
||||
|
||||
def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
|
||||
url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
|
||||
headers = {}
|
||||
if access_token:
|
||||
headers['Authorization'] = "Bearer %s" % access_token
|
||||
else:
|
||||
headers['Private-Token'] = private_token
|
||||
|
||||
headers['Accept'] = "application/json"
|
||||
headers['Content-Type'] = "application/json"
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
|
||||
status = info['status']
|
||||
content = ""
|
||||
if response:
|
||||
content = response.read()
|
||||
if status == 204:
|
||||
return True, content
|
||||
elif status == 200 or status == 201:
|
||||
return True, json.loads(content)
|
||||
else:
|
||||
return False, str(status) + ": " + content
|
||||
def auth_argument_spec(spec=None):
|
||||
arg_spec = (dict(
|
||||
api_token=dict(type='str', no_log=True),
|
||||
api_oauth_token=dict(type='str', no_log=True),
|
||||
api_job_token=dict(type='str', no_log=True),
|
||||
))
|
||||
if spec:
|
||||
arg_spec.update(spec)
|
||||
return arg_spec
|
||||
|
||||
|
||||
def findProject(gitlab_instance, identifier):
|
||||
def find_project(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.projects.get(identifier)
|
||||
except Exception as e:
|
||||
@@ -67,7 +54,7 @@ def findProject(gitlab_instance, identifier):
|
||||
return project
|
||||
|
||||
|
||||
def findGroup(gitlab_instance, identifier):
|
||||
def find_group(gitlab_instance, identifier):
|
||||
try:
|
||||
project = gitlab_instance.groups.get(identifier)
|
||||
except Exception as e:
|
||||
@@ -76,12 +63,14 @@ def findGroup(gitlab_instance, identifier):
|
||||
return project
|
||||
|
||||
|
||||
def gitlabAuthentication(module):
|
||||
def gitlab_authentication(module):
|
||||
gitlab_url = module.params['api_url']
|
||||
validate_certs = module.params['validate_certs']
|
||||
gitlab_user = module.params['api_username']
|
||||
gitlab_password = module.params['api_password']
|
||||
gitlab_token = module.params['api_token']
|
||||
gitlab_oauth_token = module.params['api_oauth_token']
|
||||
gitlab_job_token = module.params['api_job_token']
|
||||
|
||||
if not HAS_GITLAB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
|
||||
@@ -90,11 +79,20 @@ def gitlabAuthentication(module):
|
||||
# python-gitlab library remove support for username/password authentication since 1.13.0
|
||||
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
|
||||
# This condition allow to still support older version of the python-gitlab library
|
||||
if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
|
||||
if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
|
||||
private_token=gitlab_token, api_version=4)
|
||||
else:
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
|
||||
# We can create an oauth_token using a username and password
|
||||
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
|
||||
if gitlab_user:
|
||||
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
|
||||
resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
|
||||
resp_data = resp.json()
|
||||
gitlab_oauth_token = resp_data["access_token"]
|
||||
|
||||
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
|
||||
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
|
||||
|
||||
gitlab_instance.auth()
|
||||
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
|
||||
|
||||
@@ -38,6 +38,7 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
|
||||
URL_REALM_INFO = "{url}/realms/{realm}"
|
||||
URL_REALMS = "{url}/admin/realms"
|
||||
URL_REALM = "{url}/admin/realms/{realm}"
|
||||
|
||||
@@ -230,6 +231,31 @@ class KeycloakAPI(object):
|
||||
self.validate_certs = self.module.params.get('validate_certs')
|
||||
self.restheaders = connection_header
|
||||
|
||||
def get_realm_info_by_id(self, realm='master'):
|
||||
""" Obtain realm public info by id
|
||||
|
||||
:param realm: realm id
|
||||
:return: dict of real, representation or None if none matching exist
|
||||
"""
|
||||
realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def get_realm_by_id(self, realm='master'):
|
||||
""" Obtain realm representation by id
|
||||
|
||||
|
||||
232
plugins/module_utils/ilo_redfish_utils.py
Normal file
232
plugins/module_utils/ilo_redfish_utils.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
|
||||
|
||||
class iLORedfishUtils(RedfishUtils):
|
||||
|
||||
def get_ilo_sessions(self):
|
||||
result = {}
|
||||
# listing all users has always been slower than other operations, why?
|
||||
session_list = []
|
||||
sessions_results = []
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['Description', 'Id', 'Name', 'UserName']
|
||||
|
||||
# Changed self.sessions_uri to Hardcoded string.
|
||||
response = self.get_request(
|
||||
self.root_uri + self.service_root + "SessionService/Sessions/")
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if 'Oem' in data:
|
||||
if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
|
||||
current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
|
||||
|
||||
for sessions in data[u'Members']:
|
||||
# session_list[] are URIs
|
||||
session_list.append(sessions[u'@odata.id'])
|
||||
# for each session, get details
|
||||
for uri in session_list:
|
||||
session = {}
|
||||
if uri != current_session:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
for property in properties:
|
||||
if property in data:
|
||||
session[property] = data[property]
|
||||
sessions_results.append(session)
|
||||
result["msg"] = sessions_results
|
||||
result["ret"] = True
|
||||
return result
|
||||
|
||||
def set_ntp_server(self, mgr_attributes):
|
||||
result = {}
|
||||
setkey = mgr_attributes['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
payload = {"DHCPv4": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseNTPServers"]:
|
||||
payload["DHCPv4"]["UseNTPServers"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseNTPServers": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseNTPServers"]:
|
||||
payload["DHCPv6"]["UseNTPServers"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
datetime_uri = self.manager_uri + "DateTime"
|
||||
|
||||
response = self.get_request(self.root_uri + datetime_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
ntp_list = data[setkey]
|
||||
if(len(ntp_list) == 2):
|
||||
ntp_list.pop(0)
|
||||
|
||||
ntp_list.append(mgr_attributes['mgr_attr_value'])
|
||||
|
||||
payload = {setkey: ntp_list}
|
||||
|
||||
response1 = self.patch_request(self.root_uri + datetime_uri, payload)
|
||||
if not response1['ret']:
|
||||
return response1
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
|
||||
|
||||
def set_time_zone(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
uri = self.manager_uri + "DateTime/"
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response["data"]
|
||||
|
||||
if key not in data:
|
||||
return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
|
||||
|
||||
timezones = data["TimeZoneList"]
|
||||
index = ""
|
||||
for tz in timezones:
|
||||
if attr['mgr_attr_value'] in tz["Name"]:
|
||||
index = tz["Index"]
|
||||
break
|
||||
|
||||
payload = {key: {"Index": index}}
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_dns_server(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
uri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
dns_list = data["Oem"]["Hpe"]["IPv4"][key]
|
||||
|
||||
if len(dns_list) == 3:
|
||||
dns_list.pop(0)
|
||||
|
||||
dns_list.append(attr['mgr_attr_value'])
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
key: dns_list
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + uri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_domain_name(self, attr):
|
||||
key = attr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
response = self.get_request(self.root_uri + ethuri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
|
||||
data = response['data']
|
||||
|
||||
payload = {"DHCPv4": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv4"]["UseDomainName"]:
|
||||
payload["DHCPv4"]["UseDomainName"] = False
|
||||
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv4['ret']:
|
||||
return res_dhv4
|
||||
|
||||
payload = {"DHCPv6": {
|
||||
"UseDomainName": ""
|
||||
}}
|
||||
|
||||
if data["DHCPv6"]["UseDomainName"]:
|
||||
payload["DHCPv6"]["UseDomainName"] = False
|
||||
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not res_dhv6['ret']:
|
||||
return res_dhv6
|
||||
|
||||
domain_name = attr['mgr_attr_value']
|
||||
|
||||
payload = {"Oem": {
|
||||
"Hpe": {
|
||||
key: domain_name
|
||||
}
|
||||
}}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
|
||||
|
||||
def set_wins_registration(self, mgrattr):
|
||||
Key = mgrattr['mgr_attr_name']
|
||||
|
||||
nic_info = self.get_manager_ethernet_uri()
|
||||
ethuri = nic_info["nic_addr"]
|
||||
|
||||
payload = {
|
||||
"Oem": {
|
||||
"Hpe": {
|
||||
"IPv4": {
|
||||
Key: False
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = self.patch_request(self.root_uri + ethuri, payload)
|
||||
if not response['ret']:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
|
||||
@@ -9,7 +9,8 @@ __metaclass__ = type
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -52,3 +52,36 @@ def module_fails_on_exception(func):
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if not self.module.check_mode:
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_mode_skip_returns(callable=None, value=None):
|
||||
|
||||
def deco(func):
|
||||
if callable is not None:
|
||||
@wraps(func)
|
||||
def wrapper_callable(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return callable(self, *args, **kwargs)
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_callable
|
||||
|
||||
if value is not None:
|
||||
@wraps(func)
|
||||
def wrapper_value(self, *args, **kwargs):
|
||||
if self.module.check_mode:
|
||||
return value
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper_value
|
||||
|
||||
if callable is None and value is None:
|
||||
return check_mode_skip
|
||||
|
||||
return deco
|
||||
|
||||
61
plugins/module_utils/mh/mixins/deprecate_attrs.py
Normal file
61
plugins/module_utils/mh/mixins/deprecate_attrs.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2020, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright: (c) 2020, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class DeprecateAttrsMixin(object):
|
||||
|
||||
def _deprecate_setup(self, attr, target, module):
|
||||
if target is None:
|
||||
target = self
|
||||
if not hasattr(target, attr):
|
||||
raise ValueError("Target {0} has no attribute {1}".format(target, attr))
|
||||
if module is None:
|
||||
if isinstance(target, AnsibleModule):
|
||||
module = target
|
||||
elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
|
||||
module = target.module
|
||||
else:
|
||||
raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
|
||||
|
||||
# setup internal state dicts
|
||||
value_attr = "__deprecated_attr_value"
|
||||
trigger_attr = "__deprecated_attr_trigger"
|
||||
if not hasattr(target, value_attr):
|
||||
setattr(target, value_attr, {})
|
||||
if not hasattr(target, trigger_attr):
|
||||
setattr(target, trigger_attr, {})
|
||||
value_dict = getattr(target, value_attr)
|
||||
trigger_dict = getattr(target, trigger_attr)
|
||||
return target, module, value_dict, trigger_dict
|
||||
|
||||
def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
|
||||
target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
|
||||
|
||||
value_dict[attr] = getattr(target, attr, value)
|
||||
trigger_dict[attr] = False
|
||||
|
||||
def _trigger():
|
||||
if not trigger_dict[attr]:
|
||||
module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||
trigger_dict[attr] = True
|
||||
|
||||
def _getter(_self):
|
||||
_trigger()
|
||||
return value_dict[attr]
|
||||
|
||||
def _setter(_self, new_value):
|
||||
_trigger()
|
||||
value_dict[attr] = new_value
|
||||
|
||||
# override attribute
|
||||
prop = property(_getter)
|
||||
setattr(target, attr, prop)
|
||||
setattr(target, "_{0}_setter".format(attr), prop.setter(_setter))
|
||||
@@ -13,9 +13,10 @@ from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd im
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
|
||||
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
|
||||
|
||||
|
||||
class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
@@ -36,6 +37,15 @@ class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase):
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
self._deprecate_attr(
|
||||
attr="VarDict",
|
||||
msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
|
||||
"the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
|
||||
version="6.0.0",
|
||||
collection_name="community.general",
|
||||
target=ModuleHelper,
|
||||
module=self.module)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
|
||||
|
||||
@@ -54,6 +54,17 @@ def proxmox_to_ansible_bool(value):
|
||||
return True if value == 1 else False
|
||||
|
||||
|
||||
def ansible_to_proxmox_bool(value):
|
||||
'''Convert Ansible representation of a boolean to be proxmox-friendly'''
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if not isinstance(value, bool):
|
||||
raise ValueError("%s must be of type bool not %s" % (value, type(value)))
|
||||
|
||||
return 1 if value else 0
|
||||
|
||||
|
||||
class ProxmoxAnsible(object):
|
||||
"""Base class for Proxmox modules"""
|
||||
def __init__(self, module):
|
||||
|
||||
@@ -1834,12 +1834,16 @@ class RedfishUtils(object):
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
for device in data[u'Fans']:
|
||||
fan = {}
|
||||
for property in properties:
|
||||
if property in device:
|
||||
fan[property] = device[property]
|
||||
fan_results.append(fan)
|
||||
# Checking if fans are present
|
||||
if u'Fans' in data:
|
||||
for device in data[u'Fans']:
|
||||
fan = {}
|
||||
for property in properties:
|
||||
if property in device:
|
||||
fan[property] = device[property]
|
||||
fan_results.append(fan)
|
||||
else:
|
||||
return {'ret': False, 'msg': "No Fans present"}
|
||||
result["entries"] = fan_results
|
||||
return result
|
||||
|
||||
@@ -2029,15 +2033,28 @@ class RedfishUtils(object):
|
||||
def get_multi_memory_inventory(self):
|
||||
return self.aggregate_systems(self.get_memory_inventory)
|
||||
|
||||
def get_nic(self, resource_uri):
|
||||
result = {}
|
||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
nic = {}
|
||||
for property in properties:
|
||||
if property in data:
|
||||
nic[property] = data[property]
|
||||
result['entries'] = nic
|
||||
return(result)
|
||||
|
||||
def get_nic_inventory(self, resource_uri):
|
||||
result = {}
|
||||
nic_list = []
|
||||
nic_results = []
|
||||
key = "EthernetInterfaces"
|
||||
# Get these entries, but does not fail if not found
|
||||
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
|
||||
'NameServers', 'MACAddress', 'PermanentMACAddress',
|
||||
'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
|
||||
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
@@ -2061,18 +2078,9 @@ class RedfishUtils(object):
|
||||
nic_list.append(nic[u'@odata.id'])
|
||||
|
||||
for n in nic_list:
|
||||
nic = {}
|
||||
uri = self.root_uri + n
|
||||
response = self.get_request(uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
for property in properties:
|
||||
if property in data:
|
||||
nic[property] = data[property]
|
||||
|
||||
nic_results.append(nic)
|
||||
nic = self.get_nic(n)
|
||||
if nic['ret']:
|
||||
nic_results.append(nic['entries'])
|
||||
result["entries"] = nic_results
|
||||
return result
|
||||
|
||||
@@ -2697,39 +2705,14 @@ class RedfishUtils(object):
|
||||
return self.aggregate_managers(self.get_manager_health_report)
|
||||
|
||||
def set_manager_nic(self, nic_addr, nic_config):
|
||||
# Get EthernetInterface collection
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'EthernetInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
||||
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
||||
a.get('@odata.id')]
|
||||
# Get the manager ethernet interface uri
|
||||
nic_info = self.get_manager_ethernet_uri(nic_addr)
|
||||
|
||||
# Find target EthernetInterface
|
||||
target_ethernet_uri = None
|
||||
target_ethernet_current_setting = None
|
||||
if nic_addr == 'null':
|
||||
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
||||
nic_addr = (self.root_uri).split('/')[-1]
|
||||
nic_addr = nic_addr.split(':')[0] # split port if existing
|
||||
for uri in uris:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if '"' + nic_addr.lower() + '"' in str(data).lower() or "'" + nic_addr.lower() + "'" in str(data).lower():
|
||||
target_ethernet_uri = uri
|
||||
target_ethernet_current_setting = data
|
||||
break
|
||||
if target_ethernet_uri is None:
|
||||
return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
|
||||
if nic_info.get('nic_addr') is None:
|
||||
return nic_info
|
||||
else:
|
||||
target_ethernet_uri = nic_info['nic_addr']
|
||||
target_ethernet_current_setting = nic_info['ethernet_setting']
|
||||
|
||||
# Convert input to payload and check validity
|
||||
payload = {}
|
||||
@@ -2792,3 +2775,208 @@ class RedfishUtils(object):
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
|
||||
|
||||
# A helper function to get the EthernetInterface URI
|
||||
def get_manager_ethernet_uri(self, nic_addr='null'):
|
||||
# Get EthernetInterface collection
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'EthernetInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
|
||||
ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + ethernetinterfaces_uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if
|
||||
a.get('@odata.id')]
|
||||
|
||||
# Find target EthernetInterface
|
||||
target_ethernet_uri = None
|
||||
target_ethernet_current_setting = None
|
||||
if nic_addr == 'null':
|
||||
# Find root_uri matched EthernetInterface when nic_addr is not specified
|
||||
nic_addr = (self.root_uri).split('/')[-1]
|
||||
nic_addr = nic_addr.split(':')[0] # split port if existing
|
||||
for uri in uris:
|
||||
response = self.get_request(self.root_uri + uri)
|
||||
if not response['ret']:
|
||||
return response
|
||||
data = response['data']
|
||||
data_string = json.dumps(data)
|
||||
if nic_addr.lower() in data_string.lower():
|
||||
target_ethernet_uri = uri
|
||||
target_ethernet_current_setting = data
|
||||
break
|
||||
|
||||
nic_info = {}
|
||||
nic_info['nic_addr'] = target_ethernet_uri
|
||||
nic_info['ethernet_setting'] = target_ethernet_current_setting
|
||||
|
||||
if target_ethernet_uri is None:
|
||||
return {}
|
||||
else:
|
||||
return nic_info
|
||||
|
||||
def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None):
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
if 'HostInterfaces' not in data:
|
||||
return {'ret': False, 'msg': "HostInterfaces resource not found"}
|
||||
|
||||
hostinterfaces_uri = data["HostInterfaces"]["@odata.id"]
|
||||
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')]
|
||||
# Capture list of URIs that match a specified HostInterface resource ID
|
||||
if hostinterface_id:
|
||||
matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]]
|
||||
|
||||
if hostinterface_id and matching_hostinterface_uris:
|
||||
hostinterface_uri = list.pop(matching_hostinterface_uris)
|
||||
elif hostinterface_id and not matching_hostinterface_uris:
|
||||
return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id}
|
||||
elif len(uris) == 1:
|
||||
hostinterface_uri = list.pop(uris)
|
||||
else:
|
||||
return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."}
|
||||
|
||||
response = self.get_request(self.root_uri + hostinterface_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
current_hostinterface_config = response['data']
|
||||
payload = {}
|
||||
for property in hostinterface_config.keys():
|
||||
value = hostinterface_config[property]
|
||||
if property not in current_hostinterface_config:
|
||||
return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property}
|
||||
if isinstance(value, dict):
|
||||
if isinstance(current_hostinterface_config[property], dict):
|
||||
payload[property] = value
|
||||
elif isinstance(current_hostinterface_config[property], list):
|
||||
payload[property] = list()
|
||||
payload[property].append(value)
|
||||
else:
|
||||
return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property}
|
||||
else:
|
||||
payload[property] = value
|
||||
|
||||
need_change = False
|
||||
for property in payload.keys():
|
||||
set_value = payload[property]
|
||||
cur_value = current_hostinterface_config[property]
|
||||
if not isinstance(set_value, dict) and not isinstance(set_value, list):
|
||||
if set_value != cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, dict):
|
||||
for subprop in payload[property].keys():
|
||||
if subprop not in current_hostinterface_config[property]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][subprop]
|
||||
sub_cur_value = current_hostinterface_config[property][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in current_hostinterface_config[property][i]:
|
||||
need_change = True
|
||||
break
|
||||
sub_set_value = payload[property][i][subprop]
|
||||
sub_cur_value = current_hostinterface_config[property][i][subprop]
|
||||
if sub_set_value != sub_cur_value:
|
||||
need_change = True
|
||||
if not need_change:
|
||||
return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"}
|
||||
|
||||
response = self.patch_request(self.root_uri + hostinterface_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"}
|
||||
|
||||
def get_hostinterfaces(self):
|
||||
result = {}
|
||||
hostinterface_results = []
|
||||
properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status',
|
||||
'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes',
|
||||
'AuthNoneRoleId', 'CredentialBootstrapping']
|
||||
manager_uri_list = self.manager_uris
|
||||
for manager_uri in manager_uri_list:
|
||||
response = self.get_request(self.root_uri + manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if 'HostInterfaces' in data:
|
||||
hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id']
|
||||
else:
|
||||
continue
|
||||
|
||||
response = self.get_request(self.root_uri + hostinterfaces_uri)
|
||||
data = response['data']
|
||||
|
||||
if 'Members' in data:
|
||||
for hostinterface in data['Members']:
|
||||
hostinterface_uri = hostinterface['@odata.id']
|
||||
hostinterface_response = self.get_request(self.root_uri + hostinterface_uri)
|
||||
# dictionary for capturing individual HostInterface properties
|
||||
hostinterface_data_temp = {}
|
||||
if hostinterface_response['ret'] is False:
|
||||
return hostinterface_response
|
||||
hostinterface_data = hostinterface_response['data']
|
||||
for property in properties:
|
||||
if property in hostinterface_data:
|
||||
if hostinterface_data[property] is not None:
|
||||
hostinterface_data_temp[property] = hostinterface_data[property]
|
||||
# Check for the presence of a ManagerEthernetInterface
|
||||
# object, a link to a _single_ EthernetInterface that the
|
||||
# BMC uses to communicate with the host.
|
||||
if 'ManagerEthernetInterface' in hostinterface_data:
|
||||
interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id']
|
||||
interface_response = self.get_nic(interface_uri)
|
||||
if interface_response['ret'] is False:
|
||||
return interface_response
|
||||
hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries']
|
||||
|
||||
# Check for the presence of a HostEthernetInterfaces
|
||||
# object, a link to a _collection_ of EthernetInterfaces
|
||||
# that the host uses to communicate with the BMC.
|
||||
if 'HostEthernetInterfaces' in hostinterface_data:
|
||||
interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id']
|
||||
interfaces_response = self.get_request(self.root_uri + interfaces_uri)
|
||||
if interfaces_response['ret'] is False:
|
||||
return interfaces_response
|
||||
interfaces_data = interfaces_response['data']
|
||||
if 'Members' in interfaces_data:
|
||||
for interface in interfaces_data['Members']:
|
||||
interface_uri = interface['@odata.id']
|
||||
interface_response = self.get_nic(interface_uri)
|
||||
if interface_response['ret'] is False:
|
||||
return interface_response
|
||||
# Check if this is the first
|
||||
# HostEthernetInterfaces item and create empty
|
||||
# list if so.
|
||||
if 'HostEthernetInterfaces' not in hostinterface_data_temp:
|
||||
hostinterface_data_temp['HostEthernetInterfaces'] = []
|
||||
|
||||
hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries'])
|
||||
|
||||
hostinterface_results.append(hostinterface_data_temp)
|
||||
else:
|
||||
continue
|
||||
result["entries"] = hostinterface_results
|
||||
if not result["entries"]:
|
||||
return {'ret': False, 'msg': "No HostInterface objects found"}
|
||||
return result
|
||||
|
||||
17
plugins/module_utils/version.py
Normal file
17
plugins/module_utils/version.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""Provide version object to compare version numbers."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
|
||||
# remove the _version.py file, and replace the following import by
|
||||
#
|
||||
# from ansible.module_utils.compat.version import LooseVersion
|
||||
|
||||
from ._version import LooseVersion
|
||||
1
plugins/modules/cargo.py
Symbolic link
1
plugins/modules/cargo.py
Symbolic link
@@ -0,0 +1 @@
|
||||
packaging/language/cargo.py
|
||||
@@ -120,7 +120,7 @@ __version__ = '${version}'
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -161,7 +161,8 @@ __version__ = '${version}'
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -89,7 +89,8 @@ __version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -132,8 +133,7 @@ class ClcBlueprintPackage:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -162,7 +162,8 @@ import os
|
||||
import traceback
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from time import sleep
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -203,8 +204,7 @@ class ClcFirewallPolicy:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -207,7 +207,8 @@ __version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -210,7 +210,8 @@ import json
|
||||
import os
|
||||
import traceback
|
||||
from time import sleep
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -255,8 +256,7 @@ class ClcLoadBalancer:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -311,7 +311,8 @@ __version__ = '${version}'
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -355,8 +356,7 @@ class ClcModifyServer:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -117,7 +117,8 @@ __version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -433,7 +433,8 @@ import json
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -478,8 +479,7 @@ class ClcServer:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -101,7 +101,8 @@ __version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
@@ -145,8 +146,7 @@ class ClcSnapshot:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(
|
||||
requests.__version__) < LooseVersion('2.5.0'):
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
|
||||
@@ -422,6 +422,7 @@ import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
import shlex
|
||||
|
||||
try:
|
||||
import lxc
|
||||
@@ -661,9 +662,8 @@ class LxcContainerManagement(object):
|
||||
"""
|
||||
|
||||
for key, value in variables_dict.items():
|
||||
build_command.append(
|
||||
'%s %s' % (key, value)
|
||||
)
|
||||
build_command.append(str(key))
|
||||
build_command.append(str(value))
|
||||
return build_command
|
||||
|
||||
def _get_vars(self, variables):
|
||||
@@ -686,24 +686,6 @@ class LxcContainerManagement(object):
|
||||
return_dict[v] = _var
|
||||
return return_dict
|
||||
|
||||
def _run_command(self, build_command, unsafe_shell=False):
|
||||
"""Return information from running an Ansible Command.
|
||||
|
||||
This will squash the build command list into a string and then
|
||||
execute the command via Ansible. The output is returned to the method.
|
||||
This output is returned as `return_code`, `stdout`, `stderr`.
|
||||
|
||||
:param build_command: Used for the command and all options.
|
||||
:type build_command: ``list``
|
||||
:param unsafe_shell: Enable or Disable unsafe sell commands.
|
||||
:type unsafe_shell: ``bol``
|
||||
"""
|
||||
|
||||
return self.module.run_command(
|
||||
' '.join(build_command),
|
||||
use_unsafe_shell=unsafe_shell
|
||||
)
|
||||
|
||||
def _config(self):
|
||||
"""Configure an LXC container.
|
||||
|
||||
@@ -810,7 +792,7 @@ class LxcContainerManagement(object):
|
||||
elif self.module.params.get('backing_store') == 'overlayfs':
|
||||
build_command.append('--snapshot')
|
||||
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
rc, return_data, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
message = "Failed executing %s." % os.path.basename(clone_cmd)
|
||||
self.failure(
|
||||
@@ -843,7 +825,7 @@ class LxcContainerManagement(object):
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('lxc-create', True),
|
||||
'--name %s' % self.container_name,
|
||||
'--name', self.container_name,
|
||||
'--quiet'
|
||||
]
|
||||
|
||||
@@ -869,10 +851,12 @@ class LxcContainerManagement(object):
|
||||
log_path = os.getenv('HOME')
|
||||
|
||||
build_command.extend([
|
||||
'--logfile %s' % os.path.join(
|
||||
'--logfile',
|
||||
os.path.join(
|
||||
log_path, 'lxc-%s.log' % self.container_name
|
||||
),
|
||||
'--logpriority %s' % self.module.params.get(
|
||||
'--logpriority',
|
||||
self.module.params.get(
|
||||
'container_log_level'
|
||||
).upper()
|
||||
])
|
||||
@@ -880,9 +864,10 @@ class LxcContainerManagement(object):
|
||||
# Add the template commands to the end of the command if there are any
|
||||
template_options = self.module.params.get('template_options', None)
|
||||
if template_options:
|
||||
build_command.append('-- %s' % template_options)
|
||||
build_command.append('--')
|
||||
build_command += shlex.split(template_options)
|
||||
|
||||
rc, return_data, err = self._run_command(build_command)
|
||||
rc, return_data, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
message = "Failed executing lxc-create."
|
||||
self.failure(
|
||||
@@ -1186,7 +1171,7 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('lxc-config', True),
|
||||
"lxc.bdev.lvm.vg"
|
||||
]
|
||||
rc, vg, err = self._run_command(build_command)
|
||||
rc, vg, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1204,7 +1189,7 @@ class LxcContainerManagement(object):
|
||||
build_command = [
|
||||
self.module.get_bin_path('lvs', True)
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1231,7 +1216,7 @@ class LxcContainerManagement(object):
|
||||
'--units',
|
||||
'g'
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1262,7 +1247,7 @@ class LxcContainerManagement(object):
|
||||
'--units',
|
||||
'g'
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1311,7 +1296,7 @@ class LxcContainerManagement(object):
|
||||
os.path.join(vg, source_lv),
|
||||
"-L%sg" % snapshot_size_gb
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1336,7 +1321,7 @@ class LxcContainerManagement(object):
|
||||
"/dev/%s/%s" % (vg, lv_name),
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1380,9 +1365,8 @@ class LxcContainerManagement(object):
|
||||
'.'
|
||||
]
|
||||
|
||||
rc, stdout, err = self._run_command(
|
||||
build_command=build_command,
|
||||
unsafe_shell=True
|
||||
rc, stdout, err = self.module.run_command(
|
||||
build_command
|
||||
)
|
||||
|
||||
os.umask(old_umask)
|
||||
@@ -1410,7 +1394,7 @@ class LxcContainerManagement(object):
|
||||
"-f",
|
||||
"%s/%s" % (vg, lv_name),
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1442,11 +1426,10 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('rsync', True),
|
||||
'-aHAX',
|
||||
fs_path,
|
||||
temp_dir
|
||||
temp_dir,
|
||||
]
|
||||
rc, stdout, err = self._run_command(
|
||||
rc, stdout, err = self.module.run_command(
|
||||
build_command,
|
||||
unsafe_shell=True
|
||||
)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
@@ -1467,7 +1450,7 @@ class LxcContainerManagement(object):
|
||||
self.module.get_bin_path('umount', True),
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
@@ -1489,12 +1472,12 @@ class LxcContainerManagement(object):
|
||||
|
||||
build_command = [
|
||||
self.module.get_bin_path('mount', True),
|
||||
'-t overlayfs',
|
||||
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||
'-t', 'overlayfs',
|
||||
'-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
|
||||
'overlayfs',
|
||||
mount_point,
|
||||
]
|
||||
rc, stdout, err = self._run_command(build_command)
|
||||
rc, stdout, err = self.module.run_command(build_command)
|
||||
if rc != 0:
|
||||
self.failure(
|
||||
err=err,
|
||||
|
||||
@@ -11,29 +11,28 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lxd_container
|
||||
short_description: Manage LXD Containers
|
||||
short_description: Manage LXD instances
|
||||
description:
|
||||
- Management of LXD containers
|
||||
- Management of LXD containers and virtual machines.
|
||||
author: "Hiroaki Nakamura (@hnakamur)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of a container.
|
||||
- Name of an instance.
|
||||
type: str
|
||||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- 'The architecture for the container (for example C(x86_64) or C(i686)).
|
||||
- 'The architecture for the instance (for example C(x86_64) or C(i686)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: str
|
||||
required: false
|
||||
config:
|
||||
description:
|
||||
- 'The config for the container (for example C({"limits.cpu": "2"})).
|
||||
- 'The config for the instance (for example C({"limits.cpu": "2"})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
- If the container already exists and its "config" values in metadata
|
||||
obtained from GET /1.0/containers/<name>
|
||||
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
|
||||
- If the instance already exists and its "config" values in metadata
|
||||
obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
|
||||
are different, this module tries to apply the configurations.
|
||||
- The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
|
||||
type: dict
|
||||
@@ -50,25 +49,25 @@ options:
|
||||
version_added: 3.7.0
|
||||
profiles:
|
||||
description:
|
||||
- Profile to be used by the container.
|
||||
- Profile to be used by the instance.
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the container
|
||||
- 'The devices for the instance
|
||||
(for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
|
||||
type: dict
|
||||
required: false
|
||||
ephemeral:
|
||||
description:
|
||||
- Whether or not the container is ephemeral (for example C(true) or C(false)).
|
||||
- Whether or not the instance is ephemeral (for example C(true) or C(false)).
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
|
||||
required: false
|
||||
type: bool
|
||||
source:
|
||||
description:
|
||||
- 'The source for the container
|
||||
- 'The source for the instance
|
||||
(e.g. { "type": "image",
|
||||
"mode": "pull",
|
||||
"server": "https://images.linuxcontainers.org",
|
||||
@@ -86,39 +85,49 @@ options:
|
||||
- absent
|
||||
- frozen
|
||||
description:
|
||||
- Define the state of a container.
|
||||
- Define the state of an instance.
|
||||
required: false
|
||||
default: started
|
||||
type: str
|
||||
target:
|
||||
description:
|
||||
- For cluster deployments. Will attempt to create a container on a target node.
|
||||
If container exists elsewhere in a cluster, then container will not be replaced or moved.
|
||||
- For cluster deployments. Will attempt to create an instance on a target node.
|
||||
If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
|
||||
The name should respond to same name of the node you see in C(lxc cluster list).
|
||||
type: str
|
||||
required: false
|
||||
version_added: 1.0.0
|
||||
timeout:
|
||||
description:
|
||||
- A timeout for changing the state of the container.
|
||||
- A timeout for changing the state of the instance.
|
||||
- This is also used as a timeout for waiting until IPv4 addresses
|
||||
are set to the all network interfaces in the container after
|
||||
are set to the all network interfaces in the instance after
|
||||
starting or restarting.
|
||||
required: false
|
||||
default: 30
|
||||
type: int
|
||||
type:
|
||||
description:
|
||||
- Instance type can be either C(virtual-machine) or C(container).
|
||||
required: false
|
||||
default: container
|
||||
choices:
|
||||
- container
|
||||
- virtual-machine
|
||||
type: str
|
||||
version_added: 4.1.0
|
||||
wait_for_ipv4_addresses:
|
||||
description:
|
||||
- If this is true, the C(lxd_container) waits until IPv4 addresses
|
||||
are set to the all network interfaces in the container after
|
||||
are set to the all network interfaces in the instance after
|
||||
starting or restarting.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
force_stop:
|
||||
description:
|
||||
- If this is true, the C(lxd_container) forces to stop the container
|
||||
when it stops or restarts the container.
|
||||
- If this is true, the C(lxd_container) forces to stop the instance
|
||||
when it stops or restarts the instance.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
@@ -160,18 +169,18 @@ options:
|
||||
required: false
|
||||
type: str
|
||||
notes:
|
||||
- Containers must have a unique name. If you attempt to create a container
|
||||
- Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
|
||||
with a name that already existed in the users namespace the module will
|
||||
simply return as "unchanged".
|
||||
- There are two ways to run commands in containers, using the command
|
||||
- There are two ways to run commands inside a container or virtual machine, using the command
|
||||
module or using the ansible lxd connection plugin bundled in Ansible >=
|
||||
2.1, the later requires python to be installed in the container which can
|
||||
2.1, the later requires python to be installed in the instance which can
|
||||
be done with the command module.
|
||||
- You can copy a file from the host to the container
|
||||
- You can copy a file from the host to the instance
|
||||
with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
|
||||
See the example below.
|
||||
- You can copy a file in the created container to the localhost
|
||||
with `command=lxc file pull container_name/dir/filename filename`.
|
||||
- You can copy a file in the created instance to the localhost
|
||||
with `command=lxc file pull instance_name/dir/filename filename`.
|
||||
See the first example below.
|
||||
'''
|
||||
|
||||
@@ -240,6 +249,7 @@ EXAMPLES = '''
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
state: absent
|
||||
type: container
|
||||
|
||||
# An example for restarting a container
|
||||
- hosts: localhost
|
||||
@@ -249,6 +259,7 @@ EXAMPLES = '''
|
||||
community.general.lxd_container:
|
||||
name: mycontainer
|
||||
state: restarted
|
||||
type: container
|
||||
|
||||
# An example for restarting a container using https to connect to the LXD server
|
||||
- hosts: localhost
|
||||
@@ -306,16 +317,36 @@ EXAMPLES = '''
|
||||
mode: pull
|
||||
alias: ubuntu/xenial/amd64
|
||||
target: node02
|
||||
|
||||
# An example for creating a virtual machine
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create container on another node
|
||||
community.general.lxd_container:
|
||||
name: new-vm-1
|
||||
type: virtual-machine
|
||||
state: started
|
||||
ignore_volatile_options: true
|
||||
wait_for_ipv4_addresses: true
|
||||
profiles: ["default"]
|
||||
source:
|
||||
protocol: simplestreams
|
||||
type: image
|
||||
mode: pull
|
||||
server: https://images.linuxcontainers.org
|
||||
alias: debian/11
|
||||
timeout: 600
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
addresses:
|
||||
description: Mapping from the network device name to a list of IPv4 addresses in the container
|
||||
description: Mapping from the network device name to a list of IPv4 addresses in the instance.
|
||||
returned: when state is started or restarted
|
||||
type: dict
|
||||
sample: {"eth0": ["10.155.92.191"]}
|
||||
old_state:
|
||||
description: The old state of the container
|
||||
description: The old state of the instance.
|
||||
returned: when state is started or restarted
|
||||
type: str
|
||||
sample: "stopped"
|
||||
@@ -325,7 +356,7 @@ logs:
|
||||
type: list
|
||||
sample: "(too long to be placed here)"
|
||||
actions:
|
||||
description: List of actions performed for the container.
|
||||
description: List of actions performed for the instance.
|
||||
returned: success
|
||||
type: list
|
||||
sample: '["create", "start"]'
|
||||
@@ -384,6 +415,15 @@ class LXDContainerManagement(object):
|
||||
self.addresses = None
|
||||
self.target = self.module.params['target']
|
||||
|
||||
self.type = self.module.params['type']
|
||||
|
||||
# LXD Rest API provides additional endpoints for creating containers and virtual-machines.
|
||||
self.api_endpoint = None
|
||||
if self.type == 'container':
|
||||
self.api_endpoint = '/1.0/containers'
|
||||
elif self.type == 'virtual-machine':
|
||||
self.api_endpoint = '/1.0/virtual-machines'
|
||||
|
||||
self.key_file = self.module.params.get('client_key')
|
||||
if self.key_file is None:
|
||||
self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
|
||||
@@ -419,20 +459,20 @@ class LXDContainerManagement(object):
|
||||
if param_val is not None:
|
||||
self.config[attr] = param_val
|
||||
|
||||
def _get_container_json(self):
|
||||
def _get_instance_json(self):
|
||||
return self.client.do(
|
||||
'GET', '/1.0/containers/{0}'.format(self.name),
|
||||
'GET', '{0}/{1}'.format(self.api_endpoint, self.name),
|
||||
ok_error_codes=[404]
|
||||
)
|
||||
|
||||
def _get_container_state_json(self):
|
||||
def _get_instance_state_json(self):
|
||||
return self.client.do(
|
||||
'GET', '/1.0/containers/{0}/state'.format(self.name),
|
||||
'GET', '{0}/{1}/state'.format(self.api_endpoint, self.name),
|
||||
ok_error_codes=[404]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _container_json_to_module_state(resp_json):
|
||||
def _instance_json_to_module_state(resp_json):
|
||||
if resp_json['type'] == 'error':
|
||||
return 'absent'
|
||||
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
||||
@@ -441,45 +481,45 @@ class LXDContainerManagement(object):
|
||||
body_json = {'action': action, 'timeout': self.timeout}
|
||||
if force_stop:
|
||||
body_json['force'] = True
|
||||
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
|
||||
return self.client.do('PUT', '{0}/{1}/state'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||
|
||||
def _create_container(self):
|
||||
def _create_instance(self):
|
||||
config = self.config.copy()
|
||||
config['name'] = self.name
|
||||
if self.target:
|
||||
self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
|
||||
self.client.do('POST', '{0}?{1}'.format(self.api_endpoint, urlencode(dict(target=self.target))), config)
|
||||
else:
|
||||
self.client.do('POST', '/1.0/containers', config)
|
||||
self.client.do('POST', self.api_endpoint, config)
|
||||
self.actions.append('create')
|
||||
|
||||
def _start_container(self):
|
||||
def _start_instance(self):
|
||||
self._change_state('start')
|
||||
self.actions.append('start')
|
||||
|
||||
def _stop_container(self):
|
||||
def _stop_instance(self):
|
||||
self._change_state('stop', self.force_stop)
|
||||
self.actions.append('stop')
|
||||
|
||||
def _restart_container(self):
|
||||
def _restart_instance(self):
|
||||
self._change_state('restart', self.force_stop)
|
||||
self.actions.append('restart')
|
||||
|
||||
def _delete_container(self):
|
||||
self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
|
||||
def _delete_instance(self):
|
||||
self.client.do('DELETE', '{0}/{1}'.format(self.api_endpoint, self.name))
|
||||
self.actions.append('delete')
|
||||
|
||||
def _freeze_container(self):
|
||||
def _freeze_instance(self):
|
||||
self._change_state('freeze')
|
||||
self.actions.append('freeze')
|
||||
|
||||
def _unfreeze_container(self):
|
||||
def _unfreeze_instance(self):
|
||||
self._change_state('unfreeze')
|
||||
self.actions.append('unfreez')
|
||||
|
||||
def _container_ipv4_addresses(self, ignore_devices=None):
|
||||
def _instance_ipv4_addresses(self, ignore_devices=None):
|
||||
ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
|
||||
|
||||
resp_json = self._get_container_state_json()
|
||||
resp_json = self._get_instance_state_json()
|
||||
network = resp_json['metadata']['network'] or {}
|
||||
network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
|
||||
addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
|
||||
@@ -494,7 +534,7 @@ class LXDContainerManagement(object):
|
||||
due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
|
||||
while datetime.datetime.now() < due:
|
||||
time.sleep(1)
|
||||
addresses = self._container_ipv4_addresses()
|
||||
addresses = self._instance_ipv4_addresses()
|
||||
if self._has_all_ipv4_addresses(addresses):
|
||||
self.addresses = addresses
|
||||
return
|
||||
@@ -504,72 +544,72 @@ class LXDContainerManagement(object):
|
||||
|
||||
def _started(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_container()
|
||||
self._unfreeze_instance()
|
||||
elif self.old_state == 'stopped':
|
||||
self._start_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._start_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
if self.wait_for_ipv4_addresses:
|
||||
self._get_addresses()
|
||||
|
||||
def _stopped(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_container()
|
||||
self._create_instance()
|
||||
else:
|
||||
if self.old_state == 'stopped':
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._start_container()
|
||||
self._apply_container_configs()
|
||||
self._stop_container()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._start_instance()
|
||||
self._apply_instance_configs()
|
||||
self._stop_instance()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._stop_container()
|
||||
self._unfreeze_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._stop_instance()
|
||||
|
||||
def _restarted(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
else:
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._restart_container()
|
||||
self._unfreeze_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._restart_instance()
|
||||
if self.wait_for_ipv4_addresses:
|
||||
self._get_addresses()
|
||||
|
||||
def _destroyed(self):
|
||||
if self.old_state != 'absent':
|
||||
if self.old_state == 'frozen':
|
||||
self._unfreeze_container()
|
||||
self._unfreeze_instance()
|
||||
if self.old_state != 'stopped':
|
||||
self._stop_container()
|
||||
self._delete_container()
|
||||
self._stop_instance()
|
||||
self._delete_instance()
|
||||
|
||||
def _frozen(self):
|
||||
if self.old_state == 'absent':
|
||||
self._create_container()
|
||||
self._start_container()
|
||||
self._freeze_container()
|
||||
self._create_instance()
|
||||
self._start_instance()
|
||||
self._freeze_instance()
|
||||
else:
|
||||
if self.old_state == 'stopped':
|
||||
self._start_container()
|
||||
if self._needs_to_apply_container_configs():
|
||||
self._apply_container_configs()
|
||||
self._freeze_container()
|
||||
self._start_instance()
|
||||
if self._needs_to_apply_instance_configs():
|
||||
self._apply_instance_configs()
|
||||
self._freeze_instance()
|
||||
|
||||
def _needs_to_change_container_config(self, key):
|
||||
def _needs_to_change_instance_config(self, key):
|
||||
if key not in self.config:
|
||||
return False
|
||||
if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.'))
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
@@ -577,7 +617,7 @@ class LXDContainerManagement(object):
|
||||
return True
|
||||
return False
|
||||
elif key == 'config': # next default behavior
|
||||
old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items())
|
||||
old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items())
|
||||
for k, v in self.config['config'].items():
|
||||
if k not in old_configs:
|
||||
return True
|
||||
@@ -585,39 +625,41 @@ class LXDContainerManagement(object):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
old_configs = self.old_container_json['metadata'][key]
|
||||
old_configs = self.old_instance_json['metadata'][key]
|
||||
return self.config[key] != old_configs
|
||||
|
||||
def _needs_to_apply_container_configs(self):
|
||||
def _needs_to_apply_instance_configs(self):
|
||||
return (
|
||||
self._needs_to_change_container_config('architecture') or
|
||||
self._needs_to_change_container_config('config') or
|
||||
self._needs_to_change_container_config('ephemeral') or
|
||||
self._needs_to_change_container_config('devices') or
|
||||
self._needs_to_change_container_config('profiles')
|
||||
self._needs_to_change_instance_config('architecture') or
|
||||
self._needs_to_change_instance_config('config') or
|
||||
self._needs_to_change_instance_config('ephemeral') or
|
||||
self._needs_to_change_instance_config('devices') or
|
||||
self._needs_to_change_instance_config('profiles')
|
||||
)
|
||||
|
||||
def _apply_container_configs(self):
|
||||
old_metadata = self.old_container_json['metadata']
|
||||
def _apply_instance_configs(self):
|
||||
old_metadata = self.old_instance_json['metadata']
|
||||
body_json = {
|
||||
'architecture': old_metadata['architecture'],
|
||||
'config': old_metadata['config'],
|
||||
'devices': old_metadata['devices'],
|
||||
'profiles': old_metadata['profiles']
|
||||
}
|
||||
if self._needs_to_change_container_config('architecture'):
|
||||
|
||||
if self._needs_to_change_instance_config('architecture'):
|
||||
body_json['architecture'] = self.config['architecture']
|
||||
if self._needs_to_change_container_config('config'):
|
||||
if self._needs_to_change_instance_config('config'):
|
||||
for k, v in self.config['config'].items():
|
||||
body_json['config'][k] = v
|
||||
if self._needs_to_change_container_config('ephemeral'):
|
||||
if self._needs_to_change_instance_config('ephemeral'):
|
||||
body_json['ephemeral'] = self.config['ephemeral']
|
||||
if self._needs_to_change_container_config('devices'):
|
||||
if self._needs_to_change_instance_config('devices'):
|
||||
body_json['devices'] = self.config['devices']
|
||||
if self._needs_to_change_container_config('profiles'):
|
||||
if self._needs_to_change_instance_config('profiles'):
|
||||
body_json['profiles'] = self.config['profiles']
|
||||
self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
|
||||
self.actions.append('apply_container_configs')
|
||||
|
||||
self.client.do('PUT', '{0}/{1}'.format(self.api_endpoint, self.name), body_json=body_json)
|
||||
self.actions.append('apply_instance_configs')
|
||||
|
||||
def run(self):
|
||||
"""Run the main method."""
|
||||
@@ -627,8 +669,8 @@ class LXDContainerManagement(object):
|
||||
self.client.authenticate(self.trust_password)
|
||||
self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
|
||||
|
||||
self.old_container_json = self._get_container_json()
|
||||
self.old_state = self._container_json_to_module_state(self.old_container_json)
|
||||
self.old_instance_json = self._get_instance_json()
|
||||
self.old_state = self._instance_json_to_module_state(self.old_instance_json)
|
||||
action = getattr(self, LXD_ANSIBLE_STATES[self.state])
|
||||
action()
|
||||
|
||||
@@ -698,6 +740,11 @@ def main():
|
||||
type='int',
|
||||
default=30
|
||||
),
|
||||
type=dict(
|
||||
type='str',
|
||||
default='container',
|
||||
choices=['container', 'virtual-machine'],
|
||||
),
|
||||
wait_for_ipv4_addresses=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
@@ -736,6 +783,7 @@ def main():
|
||||
'This will change in the future. Please test your scripts'
|
||||
'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"',
|
||||
version='6.0.0', collection_name='community.general')
|
||||
|
||||
lxd_manage = LXDContainerManagement(module=module)
|
||||
lxd_manage.run()
|
||||
|
||||
|
||||
@@ -167,6 +167,25 @@ options:
|
||||
- compatibility
|
||||
- no_defaults
|
||||
version_added: "1.3.0"
|
||||
clone:
|
||||
description:
|
||||
- ID of the container to be cloned.
|
||||
- I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
|
||||
- The type of clone created is defined by the I(clone_type) parameter.
|
||||
- This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
|
||||
type: int
|
||||
version_added: 4.3.0
|
||||
clone_type:
|
||||
description:
|
||||
- Type of the clone created.
|
||||
- C(full) creates a full clone, and I(storage) must be specified.
|
||||
- C(linked) creates a linked clone, and the cloned container must be a template container.
|
||||
- C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
|
||||
I(storage) may be specified, if not it will fall back to the default.
|
||||
type: str
|
||||
choices: ['full', 'linked', 'opportunistic']
|
||||
default: opportunistic
|
||||
version_added: 4.3.0
|
||||
author: Sergei Antipov (@UnderGreen)
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
@@ -292,6 +311,28 @@ EXAMPLES = r'''
|
||||
- nesting=1
|
||||
- mount=cifs,nfs
|
||||
|
||||
- name: >
|
||||
Create a linked clone of the template container with id 100. The newly created container with be a
|
||||
linked clone, because no storage parameter is defined
|
||||
community.general.proxmox:
|
||||
vmid: 201
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
clone: 100
|
||||
hostname: clone.example.org
|
||||
|
||||
- name: Create a full clone of the container with id 100
|
||||
community.general.proxmox:
|
||||
vmid: 201
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
clone: 100
|
||||
hostname: clone.example.org
|
||||
storage: local
|
||||
|
||||
- name: Start container
|
||||
community.general.proxmox:
|
||||
@@ -348,7 +389,8 @@ EXAMPLES = r'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
@@ -359,6 +401,10 @@ except ImportError:
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
ansible_to_proxmox_bool
|
||||
)
|
||||
|
||||
|
||||
VZ_TYPE = None
|
||||
|
||||
@@ -384,6 +430,13 @@ def content_check(proxmox, node, ostemplate, template_store):
|
||||
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
|
||||
|
||||
|
||||
def is_template_container(proxmox, node, vmid):
|
||||
"""Check if the specified container is a template."""
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
|
||||
return config['template']
|
||||
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
|
||||
|
||||
@@ -393,8 +446,10 @@ def proxmox_version(proxmox):
|
||||
return LooseVersion(apireturn['version'])
|
||||
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
|
||||
# Remove all empty kwarg entries
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
|
||||
if VZ_TYPE == 'lxc':
|
||||
@@ -414,7 +469,49 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw
|
||||
kwargs['cpus'] = cpus
|
||||
kwargs['disk'] = disk
|
||||
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
if clone is not None:
|
||||
if VZ_TYPE != 'lxc':
|
||||
module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
|
||||
|
||||
clone_is_template = is_template_container(proxmox, node, clone)
|
||||
|
||||
# By default, create a full copy only when the cloned container is not a template.
|
||||
create_full_copy = not clone_is_template
|
||||
|
||||
# Only accept parameters that are compatible with the clone endpoint.
|
||||
valid_clone_parameters = ['hostname', 'pool', 'description']
|
||||
if module.params['storage'] is not None and clone_is_template:
|
||||
# Cloning a template, so create a full copy instead of a linked copy
|
||||
create_full_copy = True
|
||||
elif module.params['storage'] is None and not clone_is_template:
|
||||
# Not cloning a template, but also no defined storage. This isn't possible.
|
||||
module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
|
||||
|
||||
if module.params['clone_type'] == 'linked':
|
||||
if not clone_is_template:
|
||||
module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
|
||||
# Don't need to do more, by default create_full_copy is set to false already
|
||||
elif module.params['clone_type'] == 'opportunistic':
|
||||
if not clone_is_template:
|
||||
# Cloned container is not a template, so we need our 'storage' parameter
|
||||
valid_clone_parameters.append('storage')
|
||||
elif module.params['clone_type'] == 'full':
|
||||
create_full_copy = True
|
||||
valid_clone_parameters.append('storage')
|
||||
|
||||
clone_parameters = {}
|
||||
|
||||
if create_full_copy:
|
||||
clone_parameters['full'] = '1'
|
||||
else:
|
||||
clone_parameters['full'] = '0'
|
||||
for param in valid_clone_parameters:
|
||||
if module.params[param] is not None:
|
||||
clone_parameters[param] = module.params[param]
|
||||
|
||||
taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
|
||||
else:
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
@@ -515,10 +612,19 @@ def main():
|
||||
description=dict(type='str'),
|
||||
hookscript=dict(type='str'),
|
||||
proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||
clone=dict(type='int'),
|
||||
clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
|
||||
),
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_if=[
|
||||
('state', 'present', ['node', 'hostname']),
|
||||
('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
|
||||
# either clone a container or create a new one from a template file.
|
||||
],
|
||||
required_together=[
|
||||
('api_token_id', 'api_token_secret')
|
||||
],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
@@ -542,6 +648,7 @@ def main():
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
clone = module.params['clone']
|
||||
|
||||
if module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
old_default_values = dict(
|
||||
@@ -583,7 +690,8 @@ def main():
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
if state == 'present':
|
||||
# Create a new container
|
||||
if state == 'present' and clone is None:
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
@@ -595,8 +703,11 @@ def main():
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
try:
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
|
||||
cores=module.params['cores'],
|
||||
pool=module.params['pool'],
|
||||
password=module.params['password'],
|
||||
@@ -605,20 +716,40 @@ def main():
|
||||
netif=module.params['netif'],
|
||||
mounts=module.params['mounts'],
|
||||
ip_address=module.params['ip_address'],
|
||||
onboot=int(module.params['onboot']),
|
||||
onboot=ansible_to_proxmox_bool(module.params['onboot']),
|
||||
cpuunits=module.params['cpuunits'],
|
||||
nameserver=module.params['nameserver'],
|
||||
searchdomain=module.params['searchdomain'],
|
||||
force=int(module.params['force']),
|
||||
force=ansible_to_proxmox_bool(module.params['force']),
|
||||
pubkey=module.params['pubkey'],
|
||||
features=",".join(module.params['features']) if module.params['features'] is not None else None,
|
||||
unprivileged=int(module.params['unprivileged']),
|
||||
unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
|
||||
description=module.params['description'],
|
||||
hookscript=module.params['hookscript'])
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
# Clone a container
|
||||
elif state == 'present' and clone is not None:
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
if not get_instance(proxmox, clone):
|
||||
module.exit_json(changed=False, msg="Container to be cloned does not exist")
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
|
||||
|
||||
try:
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
|
||||
|
||||
module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
|
||||
@@ -725,9 +725,10 @@ msg:
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
|
||||
@@ -230,11 +230,12 @@ command:
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
module = None
|
||||
|
||||
|
||||
@@ -319,11 +320,25 @@ def remove_workspace(bin_path, project_path, workspace):
|
||||
_workspace_cmd(bin_path, project_path, 'delete', workspace)
|
||||
|
||||
|
||||
def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
|
||||
def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None):
|
||||
if plan_path is None:
|
||||
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
|
||||
|
||||
plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
|
||||
local_command = command.copy()
|
||||
|
||||
plan_command = [command[0], 'plan']
|
||||
|
||||
if state == "planned":
|
||||
for c in local_command[1:]:
|
||||
plan_command.append(c)
|
||||
|
||||
if state == "present":
|
||||
for a in apply_args:
|
||||
local_command.remove(a)
|
||||
for c in local_command[1:]:
|
||||
plan_command.append(c)
|
||||
|
||||
plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
|
||||
|
||||
for t in targets:
|
||||
plan_command.extend(['-target', t])
|
||||
@@ -461,7 +476,7 @@ def main():
|
||||
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
|
||||
else:
|
||||
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
|
||||
module.params.get('targets'), state, plan_file)
|
||||
module.params.get('targets'), state, APPLY_ARGS, plan_file)
|
||||
if state == 'present' and check_destroy and '- destroy' in out:
|
||||
module.fail_json(msg="Aborting command because it would destroy some resources. "
|
||||
"Consider switching the 'check_destroy' to false to suppress this error")
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the servers.
|
||||
- U(https://www.online.net/en/dedicated-server)
|
||||
author:
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.online
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ short_description: Gather information about Online user.
|
||||
description:
|
||||
- Gather information about the user.
|
||||
author:
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.online
|
||||
'''
|
||||
|
||||
@@ -97,7 +97,7 @@ EXAMPLES = '''
|
||||
register: my_volume
|
||||
'''
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
|
||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_compute
|
||||
short_description: Scaleway compute management module
|
||||
author: Remy Leone (@sieben)
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- "This module manages compute instances on Scaleway."
|
||||
extends_documentation_fragment:
|
||||
@@ -54,8 +54,15 @@ options:
|
||||
organization:
|
||||
type: str
|
||||
description:
|
||||
- Organization identifier
|
||||
required: true
|
||||
- Organization identifier.
|
||||
- Exactly one of I(project) and I(organization) must be specified.
|
||||
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- Project identifier.
|
||||
- Exactly one of I(project) and I(organization) must be specified.
|
||||
version_added: 4.3.0
|
||||
|
||||
state:
|
||||
type: str
|
||||
@@ -132,7 +139,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
tags:
|
||||
@@ -144,7 +151,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: present
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
|
||||
@@ -157,7 +164,7 @@ EXAMPLES = '''
|
||||
name: foobar
|
||||
state: absent
|
||||
image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
|
||||
organization: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
project: 951df375-e094-4d26-97c1-ba548eeb9c42
|
||||
region: ams1
|
||||
commercial_type: VC1S
|
||||
'''
|
||||
@@ -269,10 +276,15 @@ def create_server(compute_api, server):
|
||||
"commercial_type": server["commercial_type"],
|
||||
"image": server["image"],
|
||||
"dynamic_ip_required": server["dynamic_ip_required"],
|
||||
"name": server["name"],
|
||||
"organization": server["organization"]
|
||||
"name": server["name"]
|
||||
}
|
||||
|
||||
if server["project"]:
|
||||
data["project"] = server["project"]
|
||||
|
||||
if server["organization"]:
|
||||
data["organization"] = server["organization"]
|
||||
|
||||
if server["security_group"]:
|
||||
data["security_group"] = server["security_group"]
|
||||
|
||||
@@ -628,6 +640,7 @@ def core(module):
|
||||
"enable_ipv6": module.params["enable_ipv6"],
|
||||
"tags": module.params["tags"],
|
||||
"organization": module.params["organization"],
|
||||
"project": module.params["project"],
|
||||
"security_group": module.params["security_group"]
|
||||
}
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
@@ -655,7 +668,8 @@ def main():
|
||||
public_ip=dict(default="absent"),
|
||||
state=dict(choices=list(state_strategy.keys()), default='present'),
|
||||
tags=dict(type="list", elements="str", default=[]),
|
||||
organization=dict(required=True),
|
||||
organization=dict(),
|
||||
project=dict(),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
wait_sleep_time=dict(type="int", default=3),
|
||||
@@ -664,6 +678,12 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
required_one_of=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway images available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_ip
|
||||
short_description: Scaleway IP management module
|
||||
author: Remy Leone (@sieben)
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- This module manages IP on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway ips available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_lb
|
||||
short_description: Scaleway load-balancer management module
|
||||
author: Remy Leone (@sieben)
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- "This module manages load-balancers on Scaleway."
|
||||
extends_documentation_fragment:
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway organizations available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
options:
|
||||
api_url:
|
||||
description:
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway security groups available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
options:
|
||||
region:
|
||||
type: str
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway servers available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway snapshot available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_sshkey
|
||||
short_description: Scaleway SSH keys management module
|
||||
author: Remy Leone (@sieben)
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- This module manages SSH keys on Scaleway account
|
||||
U(https://developer.scaleway.com)
|
||||
|
||||
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_user_data
|
||||
short_description: Scaleway user_data management module
|
||||
author: Remy Leone (@sieben)
|
||||
author: Remy Leone (@remyleone)
|
||||
description:
|
||||
- "This module manages user_data on compute instances on Scaleway."
|
||||
- "It can be used to configure cloud-init for instance"
|
||||
@@ -75,7 +75,7 @@ def patch_user_data(compute_api, server_id, key, value):
|
||||
compute_api.module.debug("Starting patching user_data attributes")
|
||||
|
||||
path = "servers/%s/user_data/%s" % (server_id, key)
|
||||
response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
|
||||
response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"})
|
||||
if not response.ok:
|
||||
msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
|
||||
compute_api.module.fail_json(msg=msg)
|
||||
|
||||
@@ -51,6 +51,11 @@ options:
|
||||
description:
|
||||
- Name used to identify the volume.
|
||||
required: true
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- Scaleway project ID to which volume belongs.
|
||||
version_added: 4.3.0
|
||||
organization:
|
||||
type: str
|
||||
description:
|
||||
@@ -71,7 +76,7 @@ EXAMPLES = '''
|
||||
name: my-volume
|
||||
state: present
|
||||
region: par1
|
||||
organization: "{{ scw_org }}"
|
||||
project: "{{ scw_org }}"
|
||||
"size": 10000000000
|
||||
volume_type: l_ssd
|
||||
register: server_creation_check_task
|
||||
@@ -93,7 +98,7 @@ data:
|
||||
"export_uri": null,
|
||||
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
|
||||
"name": "volume-0-3",
|
||||
"organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||
"project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
|
||||
"server": null,
|
||||
"size": 10000000000,
|
||||
"volume_type": "l_ssd"
|
||||
@@ -106,31 +111,37 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def core(module):
|
||||
region = module.params["region"]
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
organization = module.params['organization']
|
||||
project = module.params['project']
|
||||
size = module.params['size']
|
||||
volume_type = module.params['volume_type']
|
||||
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
|
||||
|
||||
account_api = Scaleway(module)
|
||||
response = account_api.get('volumes')
|
||||
status_code = response.status_code
|
||||
volumes_json = response.json
|
||||
|
||||
if project is None:
|
||||
project = organization
|
||||
|
||||
if not response.ok:
|
||||
module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
|
||||
status_code, response.json['message']))
|
||||
|
||||
volumeByName = None
|
||||
for volume in volumes_json['volumes']:
|
||||
if volume['organization'] == organization and volume['name'] == name:
|
||||
if volume['project'] == project and volume['name'] == name:
|
||||
volumeByName = volume
|
||||
|
||||
if state in ('present',):
|
||||
if volumeByName is not None:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
|
||||
payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type}
|
||||
|
||||
response = account_api.post('/volumes', payload)
|
||||
|
||||
@@ -161,6 +172,7 @@ def main():
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
name=dict(required=True),
|
||||
size=dict(type='int'),
|
||||
project=dict(),
|
||||
organization=dict(),
|
||||
volume_type=dict(),
|
||||
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
|
||||
@@ -168,6 +180,12 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
required_one_of=[
|
||||
('organization', 'project'),
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
@@ -15,7 +15,7 @@ description:
|
||||
- Gather information about the Scaleway volumes available.
|
||||
author:
|
||||
- "Yanis Guenane (@Spredzy)"
|
||||
- "Remy Leone (@sieben)"
|
||||
- "Remy Leone (@remyleone)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
|
||||
|
||||
@@ -117,9 +117,10 @@ state:
|
||||
'''
|
||||
|
||||
import os
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
PACKAGE_STATE_MAP = dict(
|
||||
present="--install",
|
||||
|
||||
@@ -60,6 +60,7 @@ extends_documentation_fragment:
|
||||
- community.general.redis.documentation
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data_incr
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
@@ -47,7 +47,7 @@ notes:
|
||||
run the C(GET) command on the key, otherwise the module will fail.
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_set
|
||||
- module: community.general.redis_data
|
||||
- module: community.general.redis_data_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
@@ -26,6 +26,8 @@ extends_documentation_fragment:
|
||||
- community.general.redis
|
||||
|
||||
seealso:
|
||||
- module: community.general.redis_data
|
||||
- module: community.general.redis_data_incr
|
||||
- module: community.general.redis_info
|
||||
- module: community.general.redis
|
||||
'''
|
||||
|
||||
1
plugins/modules/dnsimple_info.py
Symbolic link
1
plugins/modules/dnsimple_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./net_tools/dnsimple_info.py
|
||||
@@ -85,11 +85,6 @@ import os.path
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
try: # python 3.3+
|
||||
from shlex import quote
|
||||
except ImportError: # older python
|
||||
from pipes import quote
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
@@ -154,9 +149,9 @@ def main():
|
||||
|
||||
# Use 7zip when we have a binary, otherwise try to mount
|
||||
if binary:
|
||||
cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
|
||||
cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files
|
||||
else:
|
||||
cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
|
||||
cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir]
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
@@ -201,7 +196,7 @@ def main():
|
||||
result['changed'] = True
|
||||
finally:
|
||||
if not binary:
|
||||
module.run_command('umount "%s"' % tmp_dir)
|
||||
module.run_command([module.get_bin_path('umount'), tmp_dir])
|
||||
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ DOCUMENTATION = '''
|
||||
module: xattr
|
||||
short_description: Manage user defined extended attributes
|
||||
description:
|
||||
- Manages filesystem user defined extended attributes.
|
||||
- Requires that extended attributes are enabled on the target filesystem
|
||||
and that the setfattr/getfattr utilities are present.
|
||||
- Manages filesystem user defined extended attributes.
|
||||
- Requires that extended attributes are enabled on the target filesystem
|
||||
and that the setfattr/getfattr utilities are present.
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
@@ -34,13 +34,13 @@ options:
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- The value to set the named name/key to, it automatically sets the C(state) to 'set'.
|
||||
- The value to set the named name/key to, it automatically sets the I(state) to C(present).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- defines which state you want to do.
|
||||
C(read) retrieves the current value for a C(key) (default)
|
||||
C(present) sets C(name) to C(value), default if value is set
|
||||
C(read) retrieves the current value for a I(key) (default)
|
||||
C(present) sets I(path) to C(value), default if value is set
|
||||
C(all) dumps all data
|
||||
C(keys) retrieves all keys
|
||||
C(absent) deletes the key
|
||||
@@ -49,14 +49,14 @@ options:
|
||||
default: read
|
||||
follow:
|
||||
description:
|
||||
- If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
|
||||
- If C(true), dereferences symlinks and sets/gets attributes on symlink target,
|
||||
otherwise acts on symlink itself.
|
||||
type: bool
|
||||
default: yes
|
||||
default: true
|
||||
notes:
|
||||
- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
|
||||
author:
|
||||
- Brian Coca (@bcoca)
|
||||
- Brian Coca (@bcoca)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -116,7 +116,8 @@ def get_xattr(module, path, key, follow):
|
||||
if key is None:
|
||||
cmd.append('-d')
|
||||
else:
|
||||
cmd.append('-n %s' % key)
|
||||
cmd.append('-n')
|
||||
cmd.append(key)
|
||||
cmd.append(path)
|
||||
|
||||
return _run_xattr(module, cmd, False)
|
||||
@@ -127,8 +128,10 @@ def set_xattr(module, path, key, value, follow):
|
||||
cmd = [module.get_bin_path('setfattr', True)]
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
cmd.append('-n %s' % key)
|
||||
cmd.append('-v %s' % value)
|
||||
cmd.append('-n')
|
||||
cmd.append(key)
|
||||
cmd.append('-v')
|
||||
cmd.append(value)
|
||||
cmd.append(path)
|
||||
|
||||
return _run_xattr(module, cmd)
|
||||
@@ -139,7 +142,8 @@ def rm_xattr(module, path, key, follow):
|
||||
cmd = [module.get_bin_path('setfattr', True)]
|
||||
if not follow:
|
||||
cmd.append('-h')
|
||||
cmd.append('-x %s' % key)
|
||||
cmd.append('-x')
|
||||
cmd.append(key)
|
||||
cmd.append(path)
|
||||
|
||||
return _run_xattr(module, cmd, False)
|
||||
@@ -148,7 +152,7 @@ def rm_xattr(module, path, key, follow):
|
||||
def _run_xattr(module, cmd, check_rc=True):
|
||||
|
||||
try:
|
||||
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
|
||||
(rc, out, err) = module.run_command(cmd, check_rc=check_rc)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="%s!" % to_native(e))
|
||||
|
||||
|
||||
@@ -356,9 +356,10 @@ import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from io import BytesIO
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
LXML_IMP_ERR = None
|
||||
try:
|
||||
from lxml import etree, objectify
|
||||
|
||||
1
plugins/modules/gitlab_branch.py
Symbolic link
1
plugins/modules/gitlab_branch.py
Symbolic link
@@ -0,0 +1 @@
|
||||
source_control/gitlab/gitlab_branch.py
|
||||
@@ -27,11 +27,14 @@ options:
|
||||
choices: ["absent", "present"]
|
||||
type: str
|
||||
dynamicupdate:
|
||||
description: Apply dynamic update to zone
|
||||
required: false
|
||||
default: "false"
|
||||
choices: ["false", "true"]
|
||||
type: str
|
||||
description: Apply dynamic update to zone.
|
||||
default: false
|
||||
type: bool
|
||||
allowsyncptr:
|
||||
description: Allow synchronization of forward and reverse records in the zone.
|
||||
default: false
|
||||
type: bool
|
||||
version_added: 4.3.0
|
||||
extends_documentation_fragment:
|
||||
- community.general.ipa.documentation
|
||||
|
||||
@@ -60,6 +63,14 @@ EXAMPLES = r'''
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
state: absent
|
||||
|
||||
- name: Ensure dns zone is present and is allowing sync
|
||||
community.general.ipa_dnszone:
|
||||
ipa_host: spider.example.com
|
||||
ipa_pass: Passw0rd!
|
||||
state: present
|
||||
zone_name: example.com
|
||||
allowsyncptr: true
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -79,25 +90,37 @@ class DNSZoneIPAClient(IPAClient):
|
||||
super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
|
||||
|
||||
def dnszone_find(self, zone_name, details=None):
|
||||
itens = {'idnsname': zone_name}
|
||||
items = {'all': 'true',
|
||||
'idnsname': zone_name, }
|
||||
if details is not None:
|
||||
itens.update(details)
|
||||
items.update(details)
|
||||
|
||||
return self._post_json(
|
||||
method='dnszone_find',
|
||||
name=zone_name,
|
||||
item=itens
|
||||
item=items
|
||||
)
|
||||
|
||||
def dnszone_add(self, zone_name=None, details=None):
|
||||
itens = {}
|
||||
items = {}
|
||||
if details is not None:
|
||||
itens.update(details)
|
||||
items.update(details)
|
||||
|
||||
return self._post_json(
|
||||
method='dnszone_add',
|
||||
name=zone_name,
|
||||
item=itens
|
||||
item=items
|
||||
)
|
||||
|
||||
def dnszone_mod(self, zone_name=None, details=None):
|
||||
items = {}
|
||||
if details is not None:
|
||||
items.update(details)
|
||||
|
||||
return self._post_json(
|
||||
method='dnszone_mod',
|
||||
name=zone_name,
|
||||
item=items
|
||||
)
|
||||
|
||||
def dnszone_del(self, zone_name=None, record_name=None, details=None):
|
||||
@@ -109,18 +132,29 @@ def ensure(module, client):
|
||||
zone_name = module.params['zone_name']
|
||||
state = module.params['state']
|
||||
dynamicupdate = module.params['dynamicupdate']
|
||||
|
||||
ipa_dnszone = client.dnszone_find(zone_name)
|
||||
allowsyncptr = module.params['allowsyncptr']
|
||||
|
||||
changed = False
|
||||
|
||||
# does zone exist
|
||||
ipa_dnszone = client.dnszone_find(zone_name)
|
||||
|
||||
if state == 'present':
|
||||
if not ipa_dnszone:
|
||||
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate})
|
||||
client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
|
||||
elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
|
||||
else:
|
||||
changed = False
|
||||
|
||||
# state is absent
|
||||
else:
|
||||
# check for generic zone existence
|
||||
if ipa_dnszone:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
@@ -133,7 +167,8 @@ def main():
|
||||
argument_spec = ipa_argument_spec()
|
||||
argument_spec.update(zone_name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']),
|
||||
dynamicupdate=dict(type='bool', required=False, default=False),
|
||||
allowsyncptr=dict(type='bool', required=False, default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
|
||||
@@ -74,11 +74,12 @@ subca:
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class SubCAIPAClient(IPAClient):
|
||||
def __init__(self, module, host, port, protocol):
|
||||
|
||||
132
plugins/modules/identity/keycloak/keycloak_realm_info.py
Normal file
132
plugins/modules/identity/keycloak/keycloak_realm_info.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keycloak_realm_info
|
||||
|
||||
short_description: Allows obtaining Keycloak realm public information via Keycloak API
|
||||
|
||||
version_added: 4.3.0
|
||||
|
||||
description:
|
||||
- This module allows you to get Keycloak realm public information via the Keycloak REST API.
|
||||
|
||||
- The names of module options are snake_cased versions of the camelCase ones found in the
|
||||
Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
|
||||
|
||||
- Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
|
||||
be returned that way by this module. You may pass single values for attributes when calling the module,
|
||||
and this will be translated into a list suitable for the API.
|
||||
|
||||
options:
|
||||
auth_keycloak_url:
|
||||
description:
|
||||
- URL to the Keycloak instance.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- url
|
||||
validate_certs:
|
||||
description:
|
||||
- Verify TLS certificates (do not disable this in production).
|
||||
type: bool
|
||||
default: yes
|
||||
|
||||
realm:
|
||||
type: str
|
||||
description:
|
||||
- They Keycloak realm ID.
|
||||
default: 'master'
|
||||
|
||||
author:
|
||||
- Fynn Chen (@fynncfchen)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get a Keycloak public key
|
||||
community.general.keycloak_realm_info:
|
||||
realm: MyCustomRealm
|
||||
auth_keycloak_url: https://auth.example.com/auth
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message as to what action was taken.
|
||||
returned: always
|
||||
type: str
|
||||
|
||||
realm_info:
|
||||
description:
|
||||
- Representation of the realm public infomation.
|
||||
returned: always
|
||||
type: dict
|
||||
contains:
|
||||
realm:
|
||||
description: Realm ID.
|
||||
type: str
|
||||
returned: always
|
||||
sample: MyRealm
|
||||
public_key:
|
||||
description: Public key of the realm.
|
||||
type: str
|
||||
returned: always
|
||||
sample: MIIBIjANBgkqhkiG9w0BAQEFAAO...
|
||||
token-service:
|
||||
description: Token endpoint URL.
|
||||
type: str
|
||||
returned: always
|
||||
sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect
|
||||
account-service:
|
||||
description: Account console URL.
|
||||
type: str
|
||||
returned: always
|
||||
sample: https://auth.example.com/auth/realms/MyRealm/account
|
||||
tokens-not-before:
|
||||
description: The token not before.
|
||||
type: int
|
||||
returned: always
|
||||
sample: 0
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module execution
|
||||
|
||||
:return:
|
||||
"""
|
||||
argument_spec = dict(
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
|
||||
realm=dict(default='master'),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
result = dict(changed=False, msg='', realm_info='')
|
||||
|
||||
kc = KeycloakAPI(module, {})
|
||||
|
||||
realm = module.params.get('realm')
|
||||
|
||||
realm_info = kc.get_realm_info_by_id(realm=realm)
|
||||
|
||||
result['realm_info'] = realm_info
|
||||
result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -64,6 +64,7 @@ options:
|
||||
choices:
|
||||
- ldap
|
||||
- kerberos
|
||||
- sssd
|
||||
|
||||
provider_type:
|
||||
description:
|
||||
@@ -83,9 +84,10 @@ options:
|
||||
config:
|
||||
description:
|
||||
- Dict specifying the configuration options for the provider; the contents differ depending on
|
||||
the value of I(provider_id). Examples are given below for C(ldap) and C(kerberos). It is easiest
|
||||
to obtain valid config values by dumping an already-existing user federation configuration
|
||||
through check-mode in the I(existing) field.
|
||||
the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd).
|
||||
It is easiest to obtain valid config values by dumping an already-existing user federation
|
||||
configuration through check-mode in the I(existing) field.
|
||||
- The value C(sssd) has been supported since community.general 4.2.0.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
@@ -531,6 +533,22 @@ EXAMPLES = '''
|
||||
allowPasswordAuthentication: false
|
||||
updateProfileFirstLogin: false
|
||||
|
||||
- name: Create sssd user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
auth_realm: master
|
||||
auth_username: admin
|
||||
auth_password: password
|
||||
realm: my-realm
|
||||
name: my-sssd
|
||||
state: present
|
||||
provider_id: sssd
|
||||
provider_type: org.keycloak.storage.UserStorageProvider
|
||||
config:
|
||||
priority: 0
|
||||
enabled: true
|
||||
cachePolicy: DEFAULT
|
||||
|
||||
- name: Delete user federation
|
||||
community.general.keycloak_user_federation:
|
||||
auth_keycloak_url: https://keycloak.example.com/auth
|
||||
@@ -765,7 +783,7 @@ def main():
|
||||
realm=dict(type='str', default='master'),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos']),
|
||||
provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']),
|
||||
provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'),
|
||||
parent_id=dict(type='str', aliases=['parentId']),
|
||||
mappers=dict(type='list', elements='dict', options=mapper_spec),
|
||||
@@ -843,8 +861,8 @@ def main():
|
||||
|
||||
# special handling of mappers list to allow change detection
|
||||
if module.params.get('mappers') is not None:
|
||||
if module.params['provider_id'] == 'kerberos':
|
||||
module.fail_json(msg='Cannot configure mappers for Kerberos federations.')
|
||||
if module.params['provider_id'] in ['kerberos', 'sssd']:
|
||||
module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id']))
|
||||
for change in module.params['mappers']:
|
||||
change = dict((k, v) for k, v in change.items() if change[k] is not None)
|
||||
if change.get('id') is None and change.get('name') is None:
|
||||
|
||||
1
plugins/modules/ilo_redfish_config.py
Symbolic link
1
plugins/modules/ilo_redfish_config.py
Symbolic link
@@ -0,0 +1 @@
|
||||
remote_management/redfish/ilo_redfish_config.py
|
||||
1
plugins/modules/ilo_redfish_info.py
Symbolic link
1
plugins/modules/ilo_redfish_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
remote_management/redfish/ilo_redfish_info.py
|
||||
1
plugins/modules/keycloak_realm_info.py
Symbolic link
1
plugins/modules/keycloak_realm_info.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./identity/keycloak/keycloak_realm_info.py
|
||||
@@ -143,7 +143,8 @@ annotation:
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -63,7 +63,7 @@ def query_log_status(module, le_path, path, state="present"):
|
||||
""" Returns whether a log is followed or not. """
|
||||
|
||||
if state == "present":
|
||||
rc, out, err = module.run_command("%s followed %s" % (le_path, path))
|
||||
rc, out, err = module.run_command([le_path, "followed", path])
|
||||
if rc == 0:
|
||||
return True
|
||||
|
||||
@@ -87,7 +87,7 @@ def follow_log(module, le_path, logs, name=None, logtype=None):
|
||||
cmd.extend(['--name', name])
|
||||
if logtype:
|
||||
cmd.extend(['--type', logtype])
|
||||
rc, out, err = module.run_command(' '.join(cmd))
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
if not query_log_status(module, le_path, log):
|
||||
module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
|
||||
|
||||
@@ -82,7 +82,7 @@ PACKAGE_STATE_MAP = dict(
|
||||
|
||||
def is_plugin_present(module, plugin_bin, plugin_name):
|
||||
cmd_args = [plugin_bin, "list", plugin_name]
|
||||
rc, out, err = module.run_command(" ".join(cmd_args))
|
||||
rc, out, err = module.run_command(cmd_args)
|
||||
return rc == 0
|
||||
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ class Monit(object):
|
||||
return self._monit_version
|
||||
|
||||
def _get_monit_version(self):
|
||||
rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True)
|
||||
rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True)
|
||||
version_line = out.split('\n')[0]
|
||||
raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
|
||||
return raw_version, tuple(map(int, raw_version.split('.')))
|
||||
@@ -140,7 +140,7 @@ class Monit(object):
|
||||
|
||||
@property
|
||||
def command_args(self):
|
||||
return "-B" if self.monit_version() > (5, 18) else ""
|
||||
return ["-B"] if self.monit_version() > (5, 18) else []
|
||||
|
||||
def get_status(self, validate=False):
|
||||
"""Return the status of the process in monit.
|
||||
@@ -149,7 +149,7 @@ class Monit(object):
|
||||
"""
|
||||
monit_command = "validate" if validate else "status"
|
||||
check_rc = False if validate else True # 'validate' always has rc = 1
|
||||
command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name])
|
||||
command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name]
|
||||
rc, out, err = self.module.run_command(command, check_rc=check_rc)
|
||||
return self._parse_status(out, err)
|
||||
|
||||
@@ -182,7 +182,8 @@ class Monit(object):
|
||||
return status
|
||||
|
||||
def is_process_present(self):
|
||||
rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True)
|
||||
command = [self.monit_bin_path, 'summary'] + self.command_args
|
||||
rc, out, err = self.module.run_command(command, check_rc=True)
|
||||
return bool(re.findall(r'\b%s\b' % self.process_name, out))
|
||||
|
||||
def is_process_running(self):
|
||||
@@ -190,7 +191,7 @@ class Monit(object):
|
||||
|
||||
def run_command(self, command):
|
||||
"""Runs a monit command, and returns the new status."""
|
||||
return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True)
|
||||
return self.module.run_command([self.monit_bin_path, command, self.process_name], check_rc=True)
|
||||
|
||||
def wait_for_status_change(self, current_status):
|
||||
running_status = self.get_status()
|
||||
@@ -228,7 +229,7 @@ class Monit(object):
|
||||
return current_status
|
||||
|
||||
def reload(self):
|
||||
rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path)
|
||||
rc, out, err = self.module.run_command([self.monit_bin_path, 'reload'])
|
||||
if rc != 0:
|
||||
self.exit_fail('monit reload failed', stdout=out, stderr=err)
|
||||
self.exit_success(state='reloaded')
|
||||
|
||||
@@ -148,9 +148,10 @@ EXAMPLES = '''
|
||||
RETURN = r"""# """
|
||||
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
import re
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class DNSimpleV1():
|
||||
"""class which uses dnsimple-python < 2"""
|
||||
|
||||
335
plugins/modules/net_tools/dnsimple_info.py
Normal file
335
plugins/modules/net_tools/dnsimple_info.py
Normal file
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: Edward Hilgendorf, <edward@hilgendorf.me>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: dnsimple_info
|
||||
|
||||
short_description: Pull basic info from DNSimple API
|
||||
|
||||
version_added: "4.2.0"
|
||||
|
||||
description: Retrieve existing records and domains from DNSimple API.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The domain name to retrieve info from.
|
||||
- Will return all associated records for this domain if specified.
|
||||
- If not specified, will return all domains associated with the account ID.
|
||||
type: str
|
||||
|
||||
account_id:
|
||||
description: The account ID to query.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
api_key:
|
||||
description: The API key to use.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
record:
|
||||
description:
|
||||
- The record to find.
|
||||
- If specified, only this record will be returned instead of all records.
|
||||
required: false
|
||||
type: str
|
||||
|
||||
sandbox:
|
||||
description: Whether or not to use sandbox environment.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
author:
|
||||
- Edward Hilgendorf (@edhilgendorf)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get all domains from an account
|
||||
community.general.dnsimple_info:
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
|
||||
- name: Get all records from a domain
|
||||
community.general.dnsimple_info:
|
||||
name: "example.com"
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
|
||||
- name: Get all info from a matching record
|
||||
community.general.dnsimple_info:
|
||||
name: "example.com"
|
||||
record: "subdomain"
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dnsimple_domain_info:
|
||||
description: Returns a list of dictionaries of all domains associated with the supplied account ID.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success when I(name) is not specified
|
||||
sample:
|
||||
- account_id: 1234
|
||||
created_at: '2021-10-16T21:25:42Z'
|
||||
id: 123456
|
||||
last_transferred_at:
|
||||
name: example.com
|
||||
reverse: false
|
||||
secondary: false
|
||||
updated_at: '2021-11-10T20:22:50Z'
|
||||
contains:
|
||||
account_id:
|
||||
description: The account ID.
|
||||
type: int
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
last_transferred_at:
|
||||
description: Date the domain was transferred, or empty if not.
|
||||
type: str
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
reverse:
|
||||
description: Whether or not it is a reverse zone record.
|
||||
type: bool
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
|
||||
dnsimple_records_info:
|
||||
description: Returns a list of dictionaries with all records for the domain supplied.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success when I(name) is specified, but I(record) is not
|
||||
sample:
|
||||
- content: ns1.dnsimple.com admin.dnsimple.com
|
||||
created_at: '2021-10-16T19:07:34Z'
|
||||
id: 12345
|
||||
name: 'catheadbiscuit'
|
||||
parent_id: null
|
||||
priority: null
|
||||
regions:
|
||||
- global
|
||||
system_record: true
|
||||
ttl: 3600
|
||||
type: SOA
|
||||
updated_at: '2021-11-15T23:55:51Z'
|
||||
zone_id: example.com
|
||||
contains:
|
||||
content:
|
||||
description: Content of the returned record.
|
||||
type: str
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
parent_id:
|
||||
description: Parent record or null.
|
||||
type: int
|
||||
priority:
|
||||
description: Priority setting of the record.
|
||||
type: str
|
||||
regions:
|
||||
description: List of regions where the record is available.
|
||||
type: list
|
||||
system_record:
|
||||
description: Whether or not it is a system record.
|
||||
type: bool
|
||||
ttl:
|
||||
description: Record TTL.
|
||||
type: int
|
||||
type:
|
||||
description: Record type.
|
||||
type: str
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
zone_id:
|
||||
description: ID of the zone that the record is associated with.
|
||||
type: str
|
||||
dnsimple_record_info:
|
||||
description: Returns a list of dictionaries that match the record supplied.
|
||||
returned: success when I(name) and I(record) are specified
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
- content: 1.2.3.4
|
||||
created_at: '2021-11-15T23:55:51Z'
|
||||
id: 123456
|
||||
name: catheadbiscuit
|
||||
parent_id: null
|
||||
priority: null
|
||||
regions:
|
||||
- global
|
||||
system_record: false
|
||||
ttl: 3600
|
||||
type: A
|
||||
updated_at: '2021-11-15T23:55:51Z'
|
||||
zone_id: example.com
|
||||
contains:
|
||||
content:
|
||||
description: Content of the returned record.
|
||||
type: str
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
parent_id:
|
||||
description: Parent record or null.
|
||||
type: int
|
||||
priority:
|
||||
description: Priority setting of the record.
|
||||
type: str
|
||||
regions:
|
||||
description: List of regions where the record is available.
|
||||
type: list
|
||||
system_record:
|
||||
description: Whether or not it is a system record.
|
||||
type: bool
|
||||
ttl:
|
||||
description: Record TTL.
|
||||
type: int
|
||||
type:
|
||||
description: Record type.
|
||||
type: str
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
zone_id:
|
||||
description: ID of the zone that the record is associated with.
|
||||
type: str
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
import json
|
||||
|
||||
try:
|
||||
from requests import Request, Session
|
||||
except ImportError:
|
||||
HAS_ANOTHER_LIBRARY = False
|
||||
ANOTHER_LIBRARY_IMPORT_ERROR = traceback.format_exc()
|
||||
else:
|
||||
HAS_ANOTHER_LIBRARY = True
|
||||
|
||||
|
||||
def build_url(account, key, is_sandbox):
|
||||
headers = {'Accept': 'application/json',
|
||||
'Authorization': 'Bearer ' + key}
|
||||
url = 'https://api{sandbox}.dnsimple.com/'.format(
|
||||
sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account
|
||||
req = Request(url=url, headers=headers)
|
||||
prepped_request = req.prepare()
|
||||
return prepped_request
|
||||
|
||||
|
||||
def iterate_data(module, request_object):
|
||||
base_url = request_object.url
|
||||
response = Session().send(request_object)
|
||||
if 'pagination' in response.json():
|
||||
data = response.json()["data"]
|
||||
pages = response.json()["pagination"]["total_pages"]
|
||||
if int(pages) > 1:
|
||||
for page in range(1, pages):
|
||||
page = page + 1
|
||||
request_object.url = base_url + '&page=' + str(page)
|
||||
new_results = Session().send(request_object)
|
||||
data = data + new_results.json()["data"]
|
||||
return(data)
|
||||
else:
|
||||
module.fail_json('API Call failed, check ID, key and sandbox values')
|
||||
|
||||
|
||||
def record_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def domain_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def account_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def main():
|
||||
# define available arguments/parameters a user can pass to the module
|
||||
fields = {
|
||||
"account_id": {"required": True, "type": "str"},
|
||||
"api_key": {"required": True, "type": "str", "no_log": True},
|
||||
"name": {"required": False, "type": "str"},
|
||||
"record": {"required": False, "type": "str"},
|
||||
"sandbox": {"required": False, "type": "bool", "default": False}
|
||||
}
|
||||
|
||||
result = {
|
||||
'changed': False
|
||||
}
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=fields,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
params = module.params
|
||||
req = build_url(params['account_id'],
|
||||
params['api_key'],
|
||||
params['sandbox'])
|
||||
|
||||
if not HAS_ANOTHER_LIBRARY:
|
||||
# Needs: from ansible.module_utils.basic import missing_required_lib
|
||||
module.exit_json(
|
||||
msg=missing_required_lib('another_library'),
|
||||
exception=ANOTHER_LIBRARY_IMPORT_ERROR)
|
||||
|
||||
# At minimum we need account and key
|
||||
if params['account_id'] and params['api_key']:
|
||||
# If we have a record return info on that record
|
||||
if params['name'] and params['record']:
|
||||
result['dnsimple_record_info'] = record_info(module, req)
|
||||
module.exit_json(**result)
|
||||
|
||||
# If we have the account only and domain, return records for the domain
|
||||
elif params['name']:
|
||||
result['dnsimple_records_info'] = domain_info(module, req)
|
||||
module.exit_json(**result)
|
||||
|
||||
# If we have the account only, return domains
|
||||
else:
|
||||
result['dnsimple_domain_info'] = account_info(module, req)
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
module.fail_json(msg="Need at least account_id and api_key")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -76,7 +76,7 @@ class Namespace(object):
|
||||
|
||||
def exists(self):
|
||||
'''Check if the namespace already exists'''
|
||||
rc, out, err = self.module.run_command('ip netns list')
|
||||
rc, out, err = self.module.run_command(['ip', 'netns', 'list'])
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg=to_text(err))
|
||||
return self.name in out
|
||||
|
||||
@@ -55,8 +55,10 @@ options:
|
||||
- Type C(generic) is added in Ansible 2.5.
|
||||
- Type C(infiniband) is added in community.general 2.0.0.
|
||||
- Type C(gsm) is added in community.general 3.7.0.
|
||||
- Type C(wireguard) is added in community.general 4.3.0
|
||||
type: str
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ]
|
||||
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm,
|
||||
wireguard ]
|
||||
mode:
|
||||
description:
|
||||
- This is the type of device or network connection that you wish to create for a bond or bridge.
|
||||
@@ -69,10 +71,11 @@ options:
|
||||
type: str
|
||||
ip4:
|
||||
description:
|
||||
- The IPv4 address to this interface.
|
||||
- Use the format C(192.0.2.24/24).
|
||||
- List of IPv4 addresses to this interface.
|
||||
- Use the format C(192.0.2.24/24) or C(192.0.2.24).
|
||||
- If defined and I(method4) is not specified, automatically set C(ipv4.method) to C(manual).
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
gw4:
|
||||
description:
|
||||
- The IPv4 gateway for this interface.
|
||||
@@ -142,10 +145,11 @@ options:
|
||||
version_added: 3.3.0
|
||||
ip6:
|
||||
description:
|
||||
- The IPv6 address to this interface.
|
||||
- Use the format C(abbe::cafe).
|
||||
- List of IPv6 addresses to this interface.
|
||||
- Use the format C(abbe::cafe/128) or C(abbe::cafe).
|
||||
- If defined and I(method6) is not specified, automatically set C(ipv6.method) to C(manual).
|
||||
type: str
|
||||
type: list
|
||||
elements: str
|
||||
gw6:
|
||||
description:
|
||||
- The IPv6 gateway for this interface.
|
||||
@@ -182,6 +186,18 @@ options:
|
||||
type: str
|
||||
choices: [ignore, auto, dhcp, link-local, manual, shared, disabled]
|
||||
version_added: 2.2.0
|
||||
ip_privacy6:
|
||||
description:
|
||||
- If enabled, it makes the kernel generate a temporary IPv6 address in addition to the public one.
|
||||
type: str
|
||||
choices: [disabled, prefer-public-addr, prefer-temp-addr, unknown]
|
||||
version_added: 4.2.0
|
||||
addr_gen_mode6:
|
||||
description:
|
||||
- Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration.
|
||||
type: str
|
||||
choices: [eui64, stable-privacy]
|
||||
version_added: 4.2.0
|
||||
mtu:
|
||||
description:
|
||||
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
|
||||
@@ -740,6 +756,62 @@ options:
|
||||
- The username used to authenticate with the network, if required.
|
||||
- Many providers do not require a username, or accept any username.
|
||||
- But if a username is required, it is specified here.
|
||||
wireguard:
|
||||
description:
|
||||
- The configuration of the Wireguard connection.
|
||||
- Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
|
||||
- 'An up-to-date list of supported attributes can be found here:
|
||||
U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).'
|
||||
- 'For instance to configure a listen port:
|
||||
C({listen-port: 12345}).'
|
||||
type: dict
|
||||
version_added: 4.3.0
|
||||
suboptions:
|
||||
fwmark:
|
||||
description:
|
||||
- The 32-bit fwmark for outgoing packets.
|
||||
- The use of fwmark is optional and is by default off. Setting it to 0 disables it.
|
||||
- Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark.
|
||||
type: int
|
||||
ip4-auto-default-route:
|
||||
description:
|
||||
- Whether to enable special handling of the IPv4 default route.
|
||||
- If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy
|
||||
routing rules will be added.
|
||||
- The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen
|
||||
automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing"
|
||||
type: bool
|
||||
ip6-auto-default-route:
|
||||
description:
|
||||
- Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route.
|
||||
type: bool
|
||||
listen-port:
|
||||
description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the
|
||||
interface comes up.
|
||||
type: int
|
||||
mtu:
|
||||
description:
|
||||
- If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments.
|
||||
- If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes
|
||||
at the time of activation.
|
||||
type: int
|
||||
peer-routes:
|
||||
description:
|
||||
- Whether to automatically add routes for the AllowedIPs ranges of the peers.
|
||||
- If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and
|
||||
C(ipv6.route-table). Usually you want this automatism enabled.
|
||||
- If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes)
|
||||
and C(ipv6.routes), respectively.
|
||||
- Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
|
||||
setting is enabled, the peer route for this peer won't be added automatically.
|
||||
type: bool
|
||||
private-key:
|
||||
description: The 256 bit private-key in base64 encoding.
|
||||
type: str
|
||||
private-key-flags:
|
||||
description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property.
|
||||
type: int
|
||||
choices: [ 0, 1, 2 ]
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -822,7 +894,9 @@ EXAMPLES = r'''
|
||||
# nmcli_ethernet:
|
||||
# - conn_name: em1
|
||||
# ifname: em1
|
||||
# ip4: '{{ tenant_ip }}'
|
||||
# ip4:
|
||||
# - '{{ tenant_ip }}'
|
||||
# - '{{ second_tenant_ip }}'
|
||||
# gw4: '{{ tenant_gw }}'
|
||||
# - conn_name: em2
|
||||
# ifname: em2
|
||||
@@ -844,6 +918,7 @@ EXAMPLES = r'''
|
||||
# storage_ip: "192.0.2.91/23"
|
||||
# external_ip: "198.51.100.23/21"
|
||||
# tenant_ip: "203.0.113.77/23"
|
||||
# second_tenant_ip: "204.0.113.77/23"
|
||||
# ```
|
||||
|
||||
|
||||
@@ -997,6 +1072,26 @@ EXAMPLES = r'''
|
||||
type: ethernet
|
||||
state: present
|
||||
|
||||
- name: Add second ip4 address
|
||||
community.general.nmcli:
|
||||
conn_name: my-eth1
|
||||
ifname: eth1
|
||||
type: ethernet
|
||||
ip4:
|
||||
- 192.0.2.100/24
|
||||
- 192.0.3.100/24
|
||||
state: present
|
||||
|
||||
- name: Add second ip6 address
|
||||
community.general.nmcli:
|
||||
conn_name: my-eth1
|
||||
ifname: eth1
|
||||
type: ethernet
|
||||
ip6:
|
||||
- 2001:db8::cafe
|
||||
- 2002:db8::cafe
|
||||
state: present
|
||||
|
||||
- name: Add VxLan
|
||||
community.general.nmcli:
|
||||
type: vxlan
|
||||
@@ -1089,6 +1184,17 @@ EXAMPLES = r'''
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
- name: Create a wireguard connection
|
||||
community.general.nmcli:
|
||||
type: wireguard
|
||||
conn_name: my-wg-provider
|
||||
ifname: mywg0
|
||||
wireguard:
|
||||
listen-port: 51820
|
||||
private-key: my-private-key
|
||||
autoconnect: true
|
||||
state: present
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r"""#
|
||||
@@ -1157,6 +1263,8 @@ class Nmcli(object):
|
||||
self.dns6_search = module.params['dns6_search']
|
||||
self.dns6_ignore_auto = module.params['dns6_ignore_auto']
|
||||
self.method6 = module.params['method6']
|
||||
self.ip_privacy6 = module.params['ip_privacy6']
|
||||
self.addr_gen_mode6 = module.params['addr_gen_mode6']
|
||||
self.mtu = module.params['mtu']
|
||||
self.stp = module.params['stp']
|
||||
self.priority = module.params['priority']
|
||||
@@ -1197,10 +1305,11 @@ class Nmcli(object):
|
||||
self.wifi = module.params['wifi']
|
||||
self.wifi_sec = module.params['wifi_sec']
|
||||
self.gsm = module.params['gsm']
|
||||
self.wireguard = module.params['wireguard']
|
||||
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
elif self.type == 'dummy' and not self.ip4:
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip4:
|
||||
self.ipv4_method = 'disabled'
|
||||
elif self.ip4:
|
||||
self.ipv4_method = 'manual'
|
||||
@@ -1209,7 +1318,7 @@ class Nmcli(object):
|
||||
|
||||
if self.method6:
|
||||
self.ipv6_method = self.method6
|
||||
elif self.type == 'dummy' and not self.ip6:
|
||||
elif self.type in ('dummy', 'wireguard') and not self.ip6:
|
||||
self.ipv6_method = 'disabled'
|
||||
elif self.ip6:
|
||||
self.ipv6_method = 'manual'
|
||||
@@ -1241,7 +1350,7 @@ class Nmcli(object):
|
||||
# IP address options.
|
||||
if self.ip_conn_type and not self.master:
|
||||
options.update({
|
||||
'ipv4.addresses': self.ip4,
|
||||
'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4),
|
||||
'ipv4.dhcp-client-id': self.dhcp_client_id,
|
||||
'ipv4.dns': self.dns4,
|
||||
'ipv4.dns-search': self.dns4_search,
|
||||
@@ -1254,13 +1363,15 @@ class Nmcli(object):
|
||||
'ipv4.never-default': self.never_default4,
|
||||
'ipv4.method': self.ipv4_method,
|
||||
'ipv4.may-fail': self.may_fail4,
|
||||
'ipv6.addresses': self.ip6,
|
||||
'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6),
|
||||
'ipv6.dns': self.dns6,
|
||||
'ipv6.dns-search': self.dns6_search,
|
||||
'ipv6.ignore-auto-dns': self.dns6_ignore_auto,
|
||||
'ipv6.gateway': self.gw6,
|
||||
'ipv6.ignore-auto-routes': self.gw6_ignore_auto,
|
||||
'ipv6.method': self.ipv6_method,
|
||||
'ipv6.ip6-privacy': self.ip_privacy6,
|
||||
'ipv6.addr-gen-mode': self.addr_gen_mode6
|
||||
})
|
||||
|
||||
# Layer 2 options.
|
||||
@@ -1332,6 +1443,9 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'vlan.id': self.vlanid,
|
||||
'vlan.parent': self.vlandev,
|
||||
'vlan.flags': self.flags,
|
||||
'vlan.ingress': self.ingress,
|
||||
'vlan.egress': self.egress,
|
||||
})
|
||||
elif self.type == 'vxlan':
|
||||
options.update({
|
||||
@@ -1360,6 +1474,12 @@ class Nmcli(object):
|
||||
options.update({
|
||||
'gsm.%s' % name: value,
|
||||
})
|
||||
elif self.type == 'wireguard':
|
||||
if self.wireguard:
|
||||
for name, value in self.wireguard.items():
|
||||
options.update({
|
||||
'wireguard.%s' % name: value,
|
||||
})
|
||||
# Convert settings values based on the situation.
|
||||
for setting, value in options.items():
|
||||
setting_type = self.settings_type(setting)
|
||||
@@ -1374,6 +1494,8 @@ class Nmcli(object):
|
||||
elif setting == self.mtu_setting:
|
||||
# MTU is 'auto' by default when detecting changes.
|
||||
convert_func = self.mtu_to_string
|
||||
elif setting == 'ipv6.ip6-privacy':
|
||||
convert_func = self.ip6_privacy_to_num
|
||||
elif setting_type is list:
|
||||
# Convert lists to strings for nmcli create/modify commands.
|
||||
convert_func = self.list_to_string
|
||||
@@ -1399,6 +1521,7 @@ class Nmcli(object):
|
||||
'vlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
'wireguard',
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -1427,6 +1550,23 @@ class Nmcli(object):
|
||||
else:
|
||||
return to_text(mtu)
|
||||
|
||||
@staticmethod
|
||||
def ip6_privacy_to_num(privacy):
|
||||
ip6_privacy_values = {
|
||||
'disabled': '0',
|
||||
'prefer-public-addr': '1 (enabled, prefer public IP)',
|
||||
'prefer-temp-addr': '2 (enabled, prefer temporary IP)',
|
||||
'unknown': '-1',
|
||||
}
|
||||
|
||||
if privacy is None:
|
||||
return None
|
||||
|
||||
if privacy not in ip6_privacy_values:
|
||||
raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy))
|
||||
|
||||
return ip6_privacy_values[privacy]
|
||||
|
||||
@property
|
||||
def slave_conn_type(self):
|
||||
return self.type in (
|
||||
@@ -1444,6 +1584,18 @@ class Nmcli(object):
|
||||
'sit',
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def enforce_ipv4_cidr_notation(ip4_addresses):
|
||||
if ip4_addresses is None:
|
||||
return None
|
||||
return [address if '/' in address else address + '/32' for address in ip4_addresses]
|
||||
|
||||
@staticmethod
|
||||
def enforce_ipv6_cidr_notation(ip6_addresses):
|
||||
if ip6_addresses is None:
|
||||
return None
|
||||
return [address if '/' in address else address + '/128' for address in ip6_addresses]
|
||||
|
||||
@staticmethod
|
||||
def bool_to_string(boolean):
|
||||
if boolean:
|
||||
@@ -1468,7 +1620,9 @@ class Nmcli(object):
|
||||
'ipv6.ignore-auto-routes',
|
||||
'802-11-wireless.hidden'):
|
||||
return bool
|
||||
elif setting in ('ipv4.dns',
|
||||
elif setting in ('ipv4.addresses',
|
||||
'ipv6.addresses',
|
||||
'ipv4.dns',
|
||||
'ipv4.dns-search',
|
||||
'ipv4.routes',
|
||||
'ipv4.routing-rules',
|
||||
@@ -1757,8 +1911,9 @@ def main():
|
||||
'vxlan',
|
||||
'wifi',
|
||||
'gsm',
|
||||
'wireguard',
|
||||
]),
|
||||
ip4=dict(type='str'),
|
||||
ip4=dict(type='list', elements='str'),
|
||||
gw4=dict(type='str'),
|
||||
gw4_ignore_auto=dict(type='bool', default=False),
|
||||
routes4=dict(type='list', elements='str'),
|
||||
@@ -1771,13 +1926,15 @@ def main():
|
||||
method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']),
|
||||
may_fail4=dict(type='bool', default=True),
|
||||
dhcp_client_id=dict(type='str'),
|
||||
ip6=dict(type='str'),
|
||||
ip6=dict(type='list', elements='str'),
|
||||
gw6=dict(type='str'),
|
||||
gw6_ignore_auto=dict(type='bool', default=False),
|
||||
dns6=dict(type='list', elements='str'),
|
||||
dns6_search=dict(type='list', elements='str'),
|
||||
dns6_ignore_auto=dict(type='bool', default=False),
|
||||
method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']),
|
||||
ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']),
|
||||
addr_gen_mode6=dict(type='str', choices=['eui64', 'stable-privacy']),
|
||||
# Bond Specific vars
|
||||
mode=dict(type='str', default='balance-rr',
|
||||
choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
|
||||
@@ -1828,6 +1985,7 @@ def main():
|
||||
wifi=dict(type='dict'),
|
||||
wifi_sec=dict(type='dict', no_log=True),
|
||||
gsm=dict(type='dict'),
|
||||
wireguard=dict(type='dict'),
|
||||
),
|
||||
mutually_exclusive=[['never_default4', 'gw4']],
|
||||
required_if=[("type", "wifi", [("ssid")])],
|
||||
|
||||
@@ -127,7 +127,7 @@ ansible_sysname:
|
||||
type: str
|
||||
sample: ubuntu-user
|
||||
ansible_syslocation:
|
||||
description: The physical location of this node (e.g., `telephone closet, 3rd floor').
|
||||
description: The physical location of this node (e.g., C(telephone closet, 3rd floor)).
|
||||
returned: success
|
||||
type: str
|
||||
sample: Sitting on the Dock of the Bay
|
||||
|
||||
@@ -38,7 +38,15 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- Text to send. Note that the module does not handle escaping characters.
|
||||
required: true
|
||||
- Required when I(attachments) is not set.
|
||||
attachments:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- Define a list of attachments.
|
||||
- For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/).
|
||||
- Required when I(text) is not set.
|
||||
version_added: 4.3.0
|
||||
channel:
|
||||
type: str
|
||||
description:
|
||||
@@ -76,6 +84,22 @@ EXAMPLES = """
|
||||
channel: notifications
|
||||
username: 'Ansible on {{ inventory_hostname }}'
|
||||
icon_url: http://www.example.com/some-image-file.png
|
||||
|
||||
- name: Send attachments message via Mattermost
|
||||
community.general.mattermost:
|
||||
url: http://mattermost.example.com
|
||||
api_key: my_api_key
|
||||
attachments:
|
||||
- text: Display my system load on host A and B
|
||||
color: '#ff00dd'
|
||||
title: System load
|
||||
fields:
|
||||
- title: System A
|
||||
value: "load average: 0,74, 0,66, 0,63"
|
||||
short: True
|
||||
- title: System B
|
||||
value: 'load average: 5,16, 4,64, 2,43'
|
||||
short: True
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
@@ -99,12 +123,16 @@ def main():
|
||||
argument_spec=dict(
|
||||
url=dict(type='str', required=True),
|
||||
api_key=dict(type='str', required=True, no_log=True),
|
||||
text=dict(type='str', required=True),
|
||||
text=dict(type='str'),
|
||||
channel=dict(type='str', default=None),
|
||||
username=dict(type='str', default='Ansible'),
|
||||
icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
)
|
||||
attachments=dict(type='list', elements='dict'),
|
||||
),
|
||||
required_one_of=[
|
||||
('text', 'attachments'),
|
||||
],
|
||||
)
|
||||
# init return dict
|
||||
result = dict(changed=False, msg="OK")
|
||||
@@ -115,7 +143,7 @@ def main():
|
||||
|
||||
# define payload
|
||||
payload = {}
|
||||
for param in ['text', 'channel', 'username', 'icon_url']:
|
||||
for param in ['text', 'channel', 'username', 'icon_url', 'attachments']:
|
||||
if module.params[param] is not None:
|
||||
payload[param] = module.params[param]
|
||||
|
||||
|
||||
@@ -124,7 +124,8 @@ import os
|
||||
import ssl
|
||||
import traceback
|
||||
import platform
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
HAS_PAHOMQTT = True
|
||||
PAHOMQTT_IMP_ERR = None
|
||||
@@ -207,7 +208,7 @@ def main():
|
||||
if tls_version:
|
||||
tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
|
||||
else:
|
||||
if LooseVersion(platform.python_version()) <= "3.5.2":
|
||||
if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"):
|
||||
# Specifying `None` on later versions of python seems sufficient to
|
||||
# instruct python to autonegotiate the SSL/TLS connection. On versions
|
||||
# 3.5.2 and lower though we need to specify the version.
|
||||
|
||||
@@ -124,7 +124,7 @@ EXAMPLES = r'''
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
SENDGRID_IMP_ERR = None
|
||||
try:
|
||||
|
||||
@@ -346,7 +346,7 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e
|
||||
|
||||
def get_slack_message(module, token, channel, ts):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Type': 'application/json; charset=UTF-8',
|
||||
'Accept': 'application/json',
|
||||
'Authorization': 'Bearer ' + token
|
||||
}
|
||||
@@ -383,7 +383,7 @@ def do_notify_slack(module, domain, token, payload):
|
||||
slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
|
||||
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Type': 'application/json; charset=UTF-8',
|
||||
'Accept': 'application/json',
|
||||
}
|
||||
if use_webapi:
|
||||
|
||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = """
|
||||
module: ansible_galaxy_install
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
short_description: Install Ansible roles or collections using ansible-galaxy
|
||||
version_added: 3.5.0
|
||||
description:
|
||||
@@ -24,44 +24,46 @@ requirements:
|
||||
options:
|
||||
type:
|
||||
description:
|
||||
- The type of installation performed by C(ansible-galaxy).
|
||||
- If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections.
|
||||
- "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices."
|
||||
- "B(Ansible 2.9): The option C(both) will have the same effect as C(role)."
|
||||
- The type of installation performed by C(ansible-galaxy).
|
||||
- If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections.
|
||||
- "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices."
|
||||
- "B(Ansible 2.9): The option C(both) will have the same effect as C(role)."
|
||||
type: str
|
||||
choices: [collection, role, both]
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the collection or role being installed.
|
||||
- Versions can be specified with C(ansible-galaxy) usual formats. For example, C(community.docker:1.6.1) or C(ansistrano.deploy,3.8.0).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
- Name of the collection or role being installed.
|
||||
- >
|
||||
Versions can be specified with C(ansible-galaxy) usual formats.
|
||||
For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
type: str
|
||||
requirements_file:
|
||||
description:
|
||||
- Path to a file containing a list of requirements to be installed.
|
||||
- It works for I(type) equals to C(collection) and C(role).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
- "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run."
|
||||
- Path to a file containing a list of requirements to be installed.
|
||||
- It works for I(type) equals to C(collection) and C(role).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
- "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run."
|
||||
type: path
|
||||
dest:
|
||||
description:
|
||||
- The path to the directory containing your collections or roles, according to the value of I(type).
|
||||
- >
|
||||
Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file)
|
||||
contains both roles and collections and I(dest) is specified.
|
||||
- The path to the directory containing your collections or roles, according to the value of I(type).
|
||||
- >
|
||||
Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file)
|
||||
contains both roles and collections and I(dest) is specified.
|
||||
type: path
|
||||
force:
|
||||
description:
|
||||
- Force overwriting an existing role or collection.
|
||||
- Using I(force=true) is mandatory when downgrading.
|
||||
- "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections."
|
||||
- Force overwriting an existing role or collection.
|
||||
- Using I(force=true) is mandatory when downgrading.
|
||||
- "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections."
|
||||
type: bool
|
||||
default: false
|
||||
ack_ansible29:
|
||||
description:
|
||||
- Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them.
|
||||
- This option is completely ignored if using a version Ansible greater than C(2.9.x).
|
||||
- Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them.
|
||||
- This option is completely ignored if using a version of Ansible greater than C(2.9.x).
|
||||
type: bool
|
||||
default: false
|
||||
"""
|
||||
@@ -114,9 +116,9 @@ RETURN = """
|
||||
returned: always
|
||||
installed_roles:
|
||||
description:
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
|
||||
- If I(name) is specified, returns that role name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
|
||||
- If I(name) is specified, returns that role name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
type: dict
|
||||
returned: always when installing roles
|
||||
contains:
|
||||
@@ -131,9 +133,9 @@ RETURN = """
|
||||
ansistrano.deploy: 3.8.0
|
||||
installed_collections:
|
||||
description:
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
|
||||
- If I(name) is specified, returns that collection name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
|
||||
- If I(name) is specified, returns that collection name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
type: dict
|
||||
returned: always when installing collections
|
||||
contains:
|
||||
|
||||
205
plugins/modules/packaging/language/cargo.py
Normal file
205
plugins/modules/packaging/language/cargo.py
Normal file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021 Radek Sprta <mail@radeksprta.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: cargo
|
||||
short_description: Manage Rust packages with cargo
|
||||
version_added: 4.3.0
|
||||
description:
|
||||
- Manage Rust packages with cargo.
|
||||
author: "Radek Sprta (@radek-sprta)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of a Rust package to install.
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
path:
|
||||
description:
|
||||
->
|
||||
The base path where to install the Rust packages. Cargo automatically appends
|
||||
C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin).
|
||||
type: path
|
||||
version:
|
||||
description:
|
||||
->
|
||||
The version to install. If I(name) contains multiple values, the module will
|
||||
try to install all of them in this version.
|
||||
type: str
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- The state of the Rust package.
|
||||
required: false
|
||||
type: str
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
requirements:
|
||||
- cargo installed in bin path (recommended /usr/local/bin)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Install "ludusavi" Rust package
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
|
||||
- name: Install "ludusavi" Rust package in version 0.10.0
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
version: '0.10.0'
|
||||
|
||||
- name: Install "ludusavi" Rust package to global location
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
path: /usr/local
|
||||
|
||||
- name: Remove "ludusavi" Rust package
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
state: absent
|
||||
|
||||
- name: Update "ludusavi" Rust package its latest version
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
state: latest
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class Cargo(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs["name"]
|
||||
self.path = kwargs["path"]
|
||||
self.state = kwargs["state"]
|
||||
self.version = kwargs["version"]
|
||||
|
||||
self.executable = [module.get_bin_path("cargo", True)]
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self._path
|
||||
|
||||
@path.setter
|
||||
def path(self, path):
|
||||
if path is not None and not os.path.isdir(path):
|
||||
self.module.fail_json(msg="Path %s is not a directory" % path)
|
||||
self._path = path
|
||||
|
||||
def _exec(
|
||||
self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
|
||||
):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = self.executable + args
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
|
||||
return out, err
|
||||
return "", ""
|
||||
|
||||
def get_installed(self):
|
||||
cmd = ["install", "--list"]
|
||||
data, dummy = self._exec(cmd, True, False, False)
|
||||
|
||||
package_regex = re.compile(r"^(\w+) v(.+):$")
|
||||
installed = {}
|
||||
for line in data.splitlines():
|
||||
package_info = package_regex.match(line)
|
||||
if package_info:
|
||||
installed[package_info.group(1)] = package_info.group(2)
|
||||
|
||||
return installed
|
||||
|
||||
def install(self, packages=None):
|
||||
cmd = ["install"]
|
||||
cmd.extend(packages or self.name)
|
||||
if self.path:
|
||||
cmd.append("--root")
|
||||
cmd.append(self.path)
|
||||
if self.version:
|
||||
cmd.append("--version")
|
||||
cmd.append(self.version)
|
||||
return self._exec(cmd)
|
||||
|
||||
def is_outdated(self, name):
|
||||
installed_version = self.get_installed().get(name)
|
||||
|
||||
cmd = ["search", name, "--limit", "1"]
|
||||
data = self._exec(cmd, True, False, False)
|
||||
|
||||
match = re.search(r'"(.+)"', data)
|
||||
if match:
|
||||
latest_version = match[1]
|
||||
|
||||
return installed_version != latest_version
|
||||
|
||||
def uninstall(self, packages=None):
|
||||
cmd = ["uninstall"]
|
||||
cmd.extend(packages or self.name)
|
||||
return self._exec(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(required=True, type="list", elements="str"),
|
||||
path=dict(default=None, type="path"),
|
||||
state=dict(default="present", choices=["present", "absent", "latest"]),
|
||||
version=dict(default=None, type="str"),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
||||
name = module.params["name"]
|
||||
path = module.params["path"]
|
||||
state = module.params["state"]
|
||||
version = module.params["version"]
|
||||
|
||||
if not name:
|
||||
module.fail_json(msg="Package name must be specified")
|
||||
|
||||
# Set LANG env since we parse stdout
|
||||
module.run_command_environ_update = dict(
|
||||
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
|
||||
)
|
||||
|
||||
cargo = Cargo(module, name=name, path=path, state=state, version=version)
|
||||
changed, out, err = False, None, None
|
||||
installed_packages = cargo.get_installed()
|
||||
if state == "present":
|
||||
to_install = [
|
||||
n
|
||||
for n in name
|
||||
if (n not in installed_packages)
|
||||
or (version and version != installed_packages[n])
|
||||
]
|
||||
if to_install:
|
||||
changed = True
|
||||
out, err = cargo.install(to_install)
|
||||
elif state == "latest":
|
||||
to_update = [
|
||||
n for n in name if n not in installed_packages or cargo.is_outdated(n)
|
||||
]
|
||||
if to_update:
|
||||
changed = True
|
||||
out, err = cargo.install(to_update)
|
||||
else: # absent
|
||||
to_uninstall = [n for n in name if n in installed_packages]
|
||||
if to_uninstall:
|
||||
changed = True
|
||||
out, err = cargo.uninstall(to_uninstall)
|
||||
|
||||
module.exit_json(changed=changed, stdout=out, stderr=err)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user