mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
73 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b44f6b8114 | ||
|
|
53a145ecb0 | ||
|
|
b22b44088f | ||
|
|
e0a1aa2f46 | ||
|
|
53e7e48834 | ||
|
|
62e3a2ed2f | ||
|
|
ecede6ca99 | ||
|
|
e1ac1fa6db | ||
|
|
81cef0bd05 | ||
|
|
a2bb118e95 | ||
|
|
bf9bcd9bb4 | ||
|
|
9bfd61e117 | ||
|
|
ca81a5cf2f | ||
|
|
853dd21eab | ||
|
|
6f267d8f35 | ||
|
|
1f975eff56 | ||
|
|
0ca922248f | ||
|
|
ef7ade6a56 | ||
|
|
d721283846 | ||
|
|
af410f5572 | ||
|
|
442dabbcc6 | ||
|
|
bbb155409e | ||
|
|
a83556af80 | ||
|
|
13a5e5a1ba | ||
|
|
466bd89bd4 | ||
|
|
bd4d5fe9db | ||
|
|
cf889faf42 | ||
|
|
ea313503dd | ||
|
|
57fa6526c4 | ||
|
|
ae4bee2627 | ||
|
|
87000ae491 | ||
|
|
46e221cbc6 | ||
|
|
3f2111582d | ||
|
|
bd8634e04e | ||
|
|
1ae57fc5dd | ||
|
|
1e5e0824d2 | ||
|
|
7eaf795774 | ||
|
|
3dc25edeac | ||
|
|
a67ee6cead | ||
|
|
9c5461dc12 | ||
|
|
0b59a71ae7 | ||
|
|
720de141b5 | ||
|
|
7ec6025690 | ||
|
|
53a5cdaed7 | ||
|
|
693efb35b3 | ||
|
|
07cd51a33b | ||
|
|
c80416164b | ||
|
|
a61bc5ab34 | ||
|
|
8ac8fa0aa9 | ||
|
|
b76994ee6e | ||
|
|
746bd3ea5d | ||
|
|
68baf56ea6 | ||
|
|
87377dd23f | ||
|
|
29f028e33b | ||
|
|
196e8fe4e3 | ||
|
|
83c6d18bc0 | ||
|
|
1314b0d7b2 | ||
|
|
be94a014c8 | ||
|
|
039c3da7dc | ||
|
|
2480250f1b | ||
|
|
860f0e12c0 | ||
|
|
2f56fd7b2a | ||
|
|
084879632a | ||
|
|
4eef56b7b3 | ||
|
|
13929acf02 | ||
|
|
070bcf80c4 | ||
|
|
0cf2a5ad05 | ||
|
|
76a64ea733 | ||
|
|
115eab2cfa | ||
|
|
dbba813e23 | ||
|
|
7daf78962b | ||
|
|
cf9fff5238 | ||
|
|
d8d68babe4 |
7
.github/BOTMETA.yml
vendored
7
.github/BOTMETA.yml
vendored
@@ -295,7 +295,8 @@ files:
|
||||
$modules/clustering/consul/:
|
||||
maintainers: $team_consul
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp vfauth
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
$modules/clustering/nomad/:
|
||||
maintainers: chris93111
|
||||
$modules/clustering/pacemaker_cluster.py:
|
||||
@@ -1013,7 +1014,7 @@ macros:
|
||||
team_ipa: Akasurde Nosmoht fxfitz
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||
@@ -1021,7 +1022,7 @@ macros:
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: billdodd mraineri tomasg2012
|
||||
team_redfish: billdodd mraineri tomasg2012 xmadsen renxulei
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
|
||||
248
CHANGELOG.rst
248
CHANGELOG.rst
@@ -6,6 +6,213 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 1.0.0.
|
||||
|
||||
v2.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Fixes compatibility issues with the latest ansible-core 2.11 beta, some more bugs, and contains several new features, modules and plugins.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- archive - refactored some reused code out into a couple of functions (https://github.com/ansible-collections/community.general/pull/2061).
|
||||
- csv module utils - new module_utils for shared functions between ``from_csv`` filter and ``read_csv`` module (https://github.com/ansible-collections/community.general/pull/2037).
|
||||
- ipa_sudorule - add support for setting sudo runasuser (https://github.com/ansible-collections/community.general/pull/2031).
|
||||
- jenkins_job - add a ``validate_certs`` parameter that allows disabling TLS/SSL certificate validation (https://github.com/ansible-collections/community.general/issues/255).
|
||||
- kibana_plugin - add parameter for passing ``--allow-root`` flag to kibana and kibana-plugin commands (https://github.com/ansible-collections/community.general/pull/2014).
|
||||
- proxmox - added ``purge`` module parameter for use when deleting lxc's with HA options (https://github.com/ansible-collections/community.general/pull/2013).
|
||||
- proxmox inventory plugin - added ``tags_parsed`` fact containing tags parsed as a list (https://github.com/ansible-collections/community.general/pull/1949).
|
||||
- proxmox_kvm - added new module parameter ``tags`` for use with PVE 6+ (https://github.com/ansible-collections/community.general/pull/2000).
|
||||
- rax - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- rax_cdb_user - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- rax_scaling_group - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- read_csv - refactored read_csv module to use shared csv functions from csv module_utils (https://github.com/ansible-collections/community.general/pull/2037).
|
||||
- redfish_* modules, redfish_utils module utils - add support for Redfish session create, delete, and authenticate (https://github.com/ansible-collections/community.general/issues/1975).
|
||||
- snmp_facts - added parameters ``timeout`` and ``retries`` to module (https://github.com/ansible-collections/community.general/issues/980).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Mark various module options with ``no_log=False`` which have a name that potentially could leak secrets, but which do not (https://github.com/ansible-collections/community.general/pull/2001).
|
||||
- module_helper module utils - actually ignoring formatting of parameters with value ``None`` (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- module_helper module utils - handling ``ModuleHelperException`` now properly calls ``fail_json()`` (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- module_helper module utils - use the command name as-is in ``CmdMixin`` if it fails ``get_bin_path()`` - allowing full path names to be passed (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- nios* modules - fix modules to work with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/2057).
|
||||
- proxmox - removed requirement that root password is provided when containter state is ``present`` (https://github.com/ansible-collections/community.general/pull/1999).
|
||||
- proxmox inventory - exclude qemu templates from inclusion to the inventory via pools (https://github.com/ansible-collections/community.general/issues/1986, https://github.com/ansible-collections/community.general/pull/1991).
|
||||
- proxmox inventory plugin - allowed proxomox tag string to contain commas when returned as fact (https://github.com/ansible-collections/community.general/pull/1949).
|
||||
- redfish_config module, redfish_utils module utils - fix IndexError in ``SetManagerNic`` command (https://github.com/ansible-collections/community.general/issues/1692).
|
||||
- scaleway inventory plugin - fix pagination on scaleway inventory plugin (https://github.com/ansible-collections/community.general/pull/2036).
|
||||
- stacki_host - replaced ``default`` to environment variables with ``fallback`` to them (https://github.com/ansible-collections/community.general/pull/2072).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- from_csv - Converts CSV text input into list of dicts
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Net Tools
|
||||
~~~~~~~~~
|
||||
|
||||
- gandi_livedns - Manage Gandi LiveDNS records
|
||||
|
||||
pritunl
|
||||
^^^^^^^
|
||||
|
||||
- pritunl_user - Manage Pritunl Users using the Pritunl API
|
||||
- pritunl_user_info - List Pritunl Users using the Pritunl API
|
||||
|
||||
v2.2.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- bundler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul_acl - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul_io inventory script - conf options - allow custom configuration options via env variables (https://github.com/ansible-collections/community.general/pull/620).
|
||||
- consul_session - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- datadog_monitor - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- grove - the option ``message`` has been renamed to ``message_content``. The old name ``message`` is kept as an alias and will be removed for community.general 4.0.0. This was done because ``message`` is used internally by Ansible (https://github.com/ansible-collections/community.general/pull/1929).
|
||||
- heroku_collaborator - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- linode_v4 - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_alert_profiles - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_policies - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_tags - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_tags and manageiq_policies - added new parameter ``resource_id``. This parameter can be used instead of parameter ``resource_name`` (https://github.com/ansible-collections/community.general/pull/719).
|
||||
- module_helper module utils - ``CmdMixin.run_command()`` now accepts ``dict`` command arguments, providing the parameter and its value (https://github.com/ansible-collections/community.general/pull/1867).
|
||||
- one_host - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- one_image_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- one_vm - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneandone_firewall_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_load_balancer - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_monitoring_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_private_network - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_server - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneview_datacenter_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_enclosure_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_ethernet_network_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_network_set_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- profitbricks - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- profitbricks_volume - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- scaleway_compute - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- scaleway_lb - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- sensu_check - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- sensu_client - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- sensu_handler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- webfaction_domain - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- webfaction_site - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- yum_versionlock - Do the lock/unlock concurrently to speed up (https://github.com/ansible-collections/community.general/pull/1912).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- apt_rpm - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- composer - deprecated invalid parameter aliases ``working-dir``, ``global-command``, ``prefer-source``, ``prefer-dist``, ``no-dev``, ``no-scripts``, ``no-plugins``, ``optimize-autoloader``, ``classmap-authoritative``, ``apcu-autoloader``, ``ignore-platform-reqs``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- github_deploy_key - deprecated invalid parameter alias ``2fa_token``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- grove - the option ``message`` will be removed in community.general 4.0.0. Use the new option ``message_content`` instead (https://github.com/ansible-collections/community.general/pull/1929).
|
||||
- homebrew - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- homebrew_cask - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- opkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- pacman - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- puppet - deprecated undocumented parameter ``show_diff``, will be removed in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- xbps - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- bigpanda - actually use the ``deployment_message`` option (https://github.com/ansible-collections/community.general/pull/1928).
|
||||
- chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- cloudforms inventory - fixed issue that non-existing (archived) VMs were synced (https://github.com/ansible-collections/community.general/pull/720).
|
||||
- cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs`` set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880).
|
||||
- consul_io inventory script - kv_groups - fix byte chain decoding for Python 3 (https://github.com/ansible-collections/community.general/pull/620).
|
||||
- deploy_helper - allow ``state=clean`` to be used without defining a ``release`` (https://github.com/ansible-collections/community.general/issues/1852).
|
||||
- diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- imc_rest - explicitly logging out instead of registering the call in ```atexit``` (https://github.com/ansible-collections/community.general/issues/1735).
|
||||
- infoblox inventory script - make sure that the script also works with Ansible 2.9, and returns a more helpful error when community.general is not installed as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871).
|
||||
- ini_file - allows an empty string as a value for an option (https://github.com/ansible-collections/community.general/pull/1972).
|
||||
- lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- manageiq_provider - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- meta/runtime.yml - improve deprecation messages (https://github.com/ansible-collections/community.general/pull/1918).
|
||||
- net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- nmcli - add ``method4`` and ``method6`` options (https://github.com/ansible-collections/community.general/pull/1894).
|
||||
- nmcli - ensure the ``slave-type`` option is passed to ``nmcli`` for type ``bond-slave`` (https://github.com/ansible-collections/community.general/pull/1882).
|
||||
- nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- packet_volume_attachment - removed extraneous ``print`` call - old debug? (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- proxmox inventory - added handling of extra trailing slashes in the URL (https://github.com/ansible-collections/community.general/pull/1914).
|
||||
- proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875, https://github.com/ansible-collections/community.general/pull/1895).
|
||||
- redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- xfs_quota - the feedback for initializing project quota using xfs_quota binary from ``xfsprogs`` has changed since the version it was written for (https://github.com/ansible-collections/community.general/pull/1596).
|
||||
- zfs - some ZFS properties could be passed when the dataset/volume did not exist, but would fail if the dataset already existed, even if the property matched what was specified in the ansible task (https://github.com/ansible-collections/community.general/issues/868, https://github.com/ansible-collections/community.general/pull/1833).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Filter
|
||||
~~~~~~
|
||||
|
||||
- version_sort - Sort a list according to version order instead of pure alphabetical one
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_storage_info - Retrieve information about one or more Proxmox VE storages
|
||||
|
||||
Source Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
github
|
||||
^^^^^^
|
||||
|
||||
- github_repo - Manage your repositories on Github
|
||||
|
||||
gitlab
|
||||
^^^^^^
|
||||
|
||||
- gitlab_project_members - Manage project members on GitLab Server
|
||||
|
||||
Web Infrastructure
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- jenkins_build - Manage jenkins builds
|
||||
|
||||
v2.1.1
|
||||
======
|
||||
|
||||
@@ -611,6 +818,15 @@ Bugfixes
|
||||
- zfs - fixed ``invalid character '@' in pool name"`` error when working with snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932).
|
||||
- zypper - force ``LANG=C`` to as zypper is looking in XML output where attribute could be translated (https://github.com/ansible-collections/community.general/issues/1175).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- proxmox - Proxmox inventory source
|
||||
- stackpath_compute - StackPath Edge Computing inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
@@ -620,7 +836,24 @@ Cloud
|
||||
misc
|
||||
^^^^
|
||||
|
||||
- proxmox_domain_info - Retrieve information about one or more Proxmox VE domains
|
||||
- proxmox_group_info - Retrieve information about one or more Proxmox VE groups
|
||||
- proxmox_snap - Snapshot management of instances in Proxmox VE cluster
|
||||
- proxmox_user_info - Retrieve information about one or more Proxmox VE users
|
||||
|
||||
scaleway
|
||||
^^^^^^^^
|
||||
|
||||
- scaleway_database_backup - Scaleway database backups management module
|
||||
|
||||
Clustering
|
||||
~~~~~~~~~~
|
||||
|
||||
nomad
|
||||
^^^^^
|
||||
|
||||
- nomad_job - Launch a Nomad Job
|
||||
- nomad_job_info - Get Nomad Jobs info
|
||||
|
||||
Identity
|
||||
~~~~~~~~
|
||||
@@ -633,6 +866,9 @@ ipa
|
||||
Monitoring
|
||||
~~~~~~~~~~
|
||||
|
||||
- pagerduty_change - Track a code or infrastructure change as a PagerDuty change event
|
||||
- pagerduty_user - Manage a user account on PagerDuty
|
||||
|
||||
datadog
|
||||
^^^^^^^
|
||||
|
||||
@@ -648,8 +884,20 @@ os
|
||||
- rpm_ostree_pkg - Install or uninstall overlay additional packages
|
||||
- yum_versionlock - Locks / unlocks a installed package(s) from being updated by yum package manager
|
||||
|
||||
Source Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
gitlab
|
||||
^^^^^^
|
||||
|
||||
- gitlab_group_members - Manage group members on GitLab Server
|
||||
- gitlab_group_variable - Creates, updates, or deletes GitLab groups variables
|
||||
|
||||
System
|
||||
~~~~~~
|
||||
|
||||
- iptables_state - Save iptables state into a file or restore it from a file
|
||||
- shutdown - Shut down a machine
|
||||
- ssh_config - Manage SSH config for user
|
||||
- sysrc - Manage FreeBSD using sysrc
|
||||
- sysupgrade - Manage OpenBSD system upgrades
|
||||
|
||||
@@ -980,25 +980,72 @@ releases:
|
||||
- description: Manages Datadog downtimes
|
||||
name: datadog_downtime
|
||||
namespace: monitoring.datadog
|
||||
- description: Manage group members on GitLab Server
|
||||
name: gitlab_group_members
|
||||
namespace: source_control.gitlab
|
||||
- description: Creates, updates, or deletes GitLab groups variables
|
||||
name: gitlab_group_variable
|
||||
namespace: source_control.gitlab
|
||||
- description: Manage FreeIPA password policies
|
||||
name: ipa_pwpolicy
|
||||
namespace: identity.ipa
|
||||
- description: Save iptables state into a file or restore it from a file
|
||||
name: iptables_state
|
||||
namespace: system
|
||||
- description: Launch a Nomad Job
|
||||
name: nomad_job
|
||||
namespace: clustering.nomad
|
||||
- description: Get Nomad Jobs info
|
||||
name: nomad_job_info
|
||||
namespace: clustering.nomad
|
||||
- description: Track a code or infrastructure change as a PagerDuty change event
|
||||
name: pagerduty_change
|
||||
namespace: monitoring
|
||||
- description: Manage a user account on PagerDuty
|
||||
name: pagerduty_user
|
||||
namespace: monitoring
|
||||
- description: Retrieve information about one or more Proxmox VE domains
|
||||
name: proxmox_domain_info
|
||||
namespace: cloud.misc
|
||||
- description: Retrieve information about one or more Proxmox VE groups
|
||||
name: proxmox_group_info
|
||||
namespace: cloud.misc
|
||||
- description: Snapshot management of instances in Proxmox VE cluster
|
||||
name: proxmox_snap
|
||||
namespace: cloud.misc
|
||||
- description: Retrieve information about one or more Proxmox VE users
|
||||
name: proxmox_user_info
|
||||
namespace: cloud.misc
|
||||
- description: Install or uninstall overlay additional packages
|
||||
name: rpm_ostree_pkg
|
||||
namespace: packaging.os
|
||||
- description: Scaleway database backups management module
|
||||
name: scaleway_database_backup
|
||||
namespace: cloud.scaleway
|
||||
- description: Shut down a machine
|
||||
name: shutdown
|
||||
namespace: system
|
||||
- description: Manage SSH config for user
|
||||
name: ssh_config
|
||||
namespace: system
|
||||
- description: Manage FreeBSD using sysrc
|
||||
name: sysrc
|
||||
namespace: system
|
||||
- description: Manage OpenBSD system upgrades
|
||||
name: sysupgrade
|
||||
namespace: system
|
||||
- description: Locks / unlocks a installed package(s) from being updated by yum
|
||||
package manager
|
||||
name: yum_versionlock
|
||||
namespace: packaging.os
|
||||
plugins:
|
||||
inventory:
|
||||
- description: Proxmox inventory source
|
||||
name: proxmox
|
||||
namespace: null
|
||||
- description: StackPath Edge Computing inventory source
|
||||
name: stackpath_compute
|
||||
namespace: null
|
||||
release_date: '2021-01-28'
|
||||
2.0.1:
|
||||
changes:
|
||||
@@ -1297,3 +1344,288 @@ releases:
|
||||
- 1847-proxmox-kvm-fix-status.yml
|
||||
- 2.1.0.yml
|
||||
release_date: '2021-02-17'
|
||||
2.2.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- bigpanda - actually use the ``deployment_message`` option (https://github.com/ansible-collections/community.general/pull/1928).
|
||||
- chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- cloudforms inventory - fixed issue that non-existing (archived) VMs were synced
|
||||
(https://github.com/ansible-collections/community.general/pull/720).
|
||||
- cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs``
|
||||
set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880).
|
||||
- consul_io inventory script - kv_groups - fix byte chain decoding for Python
|
||||
3 (https://github.com/ansible-collections/community.general/pull/620).
|
||||
- deploy_helper - allow ``state=clean`` to be used without defining a ``release``
|
||||
(https://github.com/ansible-collections/community.general/issues/1852).
|
||||
- diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- imc_rest - explicitly logging out instead of registering the call in ```atexit```
|
||||
(https://github.com/ansible-collections/community.general/issues/1735).
|
||||
- infoblox inventory script - make sure that the script also works with Ansible
|
||||
2.9, and returns a more helpful error when community.general is not installed
|
||||
as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871).
|
||||
- ini_file - allows an empty string as a value for an option (https://github.com/ansible-collections/community.general/pull/1972).
|
||||
- lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- manageiq_provider - wrapped ``dict.keys()`` with ``list`` for use in ``choices``
|
||||
setting (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- meta/runtime.yml - improve deprecation messages (https://github.com/ansible-collections/community.general/pull/1918).
|
||||
- net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- nmcli - add ``method4`` and ``method6`` options (https://github.com/ansible-collections/community.general/pull/1894).
|
||||
- nmcli - ensure the ``slave-type`` option is passed to ``nmcli`` for type ``bond-slave``
|
||||
(https://github.com/ansible-collections/community.general/pull/1882).
|
||||
- nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility
|
||||
(https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- packet_volume_attachment - removed extraneous ``print`` call - old debug?
|
||||
(https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- proxmox inventory - added handling of extra trailing slashes in the URL (https://github.com/ansible-collections/community.general/pull/1914).
|
||||
- proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating
|
||||
a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875,
|
||||
https://github.com/ansible-collections/community.general/pull/1895).
|
||||
- redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3
|
||||
compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility
|
||||
(https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- xfs_quota - the feedback for initializing project quota using xfs_quota binary
|
||||
from ``xfsprogs`` has changed since the version it was written for (https://github.com/ansible-collections/community.general/pull/1596).
|
||||
- zfs - some ZFS properties could be passed when the dataset/volume did not
|
||||
exist, but would fail if the dataset already existed, even if the property
|
||||
matched what was specified in the ansible task (https://github.com/ansible-collections/community.general/issues/868,
|
||||
https://github.com/ansible-collections/community.general/pull/1833).
|
||||
deprecated_features:
|
||||
- apt_rpm - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- composer - deprecated invalid parameter aliases ``working-dir``, ``global-command``,
|
||||
``prefer-source``, ``prefer-dist``, ``no-dev``, ``no-scripts``, ``no-plugins``,
|
||||
``optimize-autoloader``, ``classmap-authoritative``, ``apcu-autoloader``,
|
||||
``ignore-platform-reqs``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- github_deploy_key - deprecated invalid parameter alias ``2fa_token``, will
|
||||
be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- grove - the option ``message`` will be removed in community.general 4.0.0.
|
||||
Use the new option ``message_content`` instead (https://github.com/ansible-collections/community.general/pull/1929).
|
||||
- homebrew - deprecated invalid parameter alias ``update-brew``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- homebrew_cask - deprecated invalid parameter alias ``update-brew``, will be
|
||||
removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- opkg - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- pacman - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- puppet - deprecated undocumented parameter ``show_diff``, will be removed
|
||||
in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``,
|
||||
will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
- xbps - deprecated invalid parameter alias ``update-cache``, will be removed
|
||||
in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927).
|
||||
minor_changes:
|
||||
- bundler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul_acl - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- consul_io inventory script - conf options - allow custom configuration options
|
||||
via env variables (https://github.com/ansible-collections/community.general/pull/620).
|
||||
- consul_session - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- datadog_monitor - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- grove - the option ``message`` has been renamed to ``message_content``. The
|
||||
old name ``message`` is kept as an alias and will be removed for community.general
|
||||
4.0.0. This was done because ``message`` is used internally by Ansible (https://github.com/ansible-collections/community.general/pull/1929).
|
||||
- heroku_collaborator - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- linode_v4 - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_alert_profiles - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_policies - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_tags - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- manageiq_tags and manageiq_policies - added new parameter ``resource_id``.
|
||||
This parameter can be used instead of parameter ``resource_name`` (https://github.com/ansible-collections/community.general/pull/719).
|
||||
- module_helper module utils - ``CmdMixin.run_command()`` now accepts ``dict``
|
||||
command arguments, providing the parameter and its value (https://github.com/ansible-collections/community.general/pull/1867).
|
||||
- one_host - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- one_image_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- one_vm - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneandone_firewall_policy - elements of list parameters are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_load_balancer - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_monitoring_policy - elements of list parameters are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_private_network - elements of list parameters are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneandone_server - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- oneview_datacenter_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_enclosure_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_ethernet_network_info - elements of list parameters are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- oneview_network_set_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- profitbricks - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- profitbricks_volume - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- scaleway_compute - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- scaleway_lb - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970).
|
||||
- sensu_check - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- sensu_client - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- sensu_handler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- webfaction_domain - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- webfaction_site - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885).
|
||||
- yum_versionlock - Do the lock/unlock concurrently to speed up (https://github.com/ansible-collections/community.general/pull/1912).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml
|
||||
- 1735-imc-sessions.yml
|
||||
- 1833-zfs-creation-only-properties.yaml
|
||||
- 1838-runit-deprecate-param-dist.yml
|
||||
- 1852-deploy-helper-fix-state-is-clean-without-release.yaml
|
||||
- 1861-python3-keys.yml
|
||||
- 1867-modhelper-cmdmixin-dict-params.yml
|
||||
- 1871-infoblox-inventory.yml
|
||||
- 1880-fix_cobbler_system_ssl.yml
|
||||
- 1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml
|
||||
- 1885-sanity-check-fixes-batch3.yml
|
||||
- 1894-feat-nmcli-add-method4-and-method6.yml
|
||||
- 1895-proxmox-kvm-fix-issue-1875.yml
|
||||
- 1912-yum_versionlock-lock_unlock_concurrently.yml
|
||||
- 1914-add-sanitization-to-url.yml
|
||||
- 1916-add-version-sort-filter.yml
|
||||
- 1927-removed-parameter-invalid.yml
|
||||
- 1928-bigpanda-message.yml
|
||||
- 1929-grove-message.yml
|
||||
- 1970-valmod-batch7.yml
|
||||
- 1972-ini_file-empty-str-value.yml
|
||||
- 2.2.0.yml
|
||||
- 620-consul_io-env-variables-conf-based.yml
|
||||
- 719-manageiq-resource_id.yml
|
||||
- 720-cloudforms_inventory.yml
|
||||
- meta-runtime-deprecations.yml
|
||||
modules:
|
||||
- description: Manage your repositories on Github
|
||||
name: github_repo
|
||||
namespace: source_control.github
|
||||
- description: Manage project members on GitLab Server
|
||||
name: gitlab_project_members
|
||||
namespace: source_control.gitlab
|
||||
- description: Manage jenkins builds
|
||||
name: jenkins_build
|
||||
namespace: web_infrastructure
|
||||
- description: Retrieve information about one or more Proxmox VE storages
|
||||
name: proxmox_storage_info
|
||||
namespace: cloud.misc
|
||||
plugins:
|
||||
filter:
|
||||
- description: Sort a list according to version order instead of pure alphabetical
|
||||
one
|
||||
name: version_sort
|
||||
namespace: null
|
||||
release_date: '2021-03-08'
|
||||
2.3.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Mark various module options with ``no_log=False`` which have a name that potentially
|
||||
could leak secrets, but which do not (https://github.com/ansible-collections/community.general/pull/2001).
|
||||
- module_helper module utils - actually ignoring formatting of parameters with
|
||||
value ``None`` (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- module_helper module utils - handling ``ModuleHelperException`` now properly
|
||||
calls ``fail_json()`` (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- module_helper module utils - use the command name as-is in ``CmdMixin`` if
|
||||
it fails ``get_bin_path()`` - allowing full path names to be passed (https://github.com/ansible-collections/community.general/pull/2024).
|
||||
- nios* modules - fix modules to work with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/2057).
|
||||
- proxmox - removed requirement that root password is provided when containter
|
||||
state is ``present`` (https://github.com/ansible-collections/community.general/pull/1999).
|
||||
- proxmox inventory - exclude qemu templates from inclusion to the inventory
|
||||
via pools (https://github.com/ansible-collections/community.general/issues/1986,
|
||||
https://github.com/ansible-collections/community.general/pull/1991).
|
||||
- proxmox inventory plugin - allowed proxomox tag string to contain commas when
|
||||
returned as fact (https://github.com/ansible-collections/community.general/pull/1949).
|
||||
- redfish_config module, redfish_utils module utils - fix IndexError in ``SetManagerNic``
|
||||
command (https://github.com/ansible-collections/community.general/issues/1692).
|
||||
- scaleway inventory plugin - fix pagination on scaleway inventory plugin (https://github.com/ansible-collections/community.general/pull/2036).
|
||||
- stacki_host - replaced ``default`` to environment variables with ``fallback``
|
||||
to them (https://github.com/ansible-collections/community.general/pull/2072).
|
||||
minor_changes:
|
||||
- archive - refactored some reused code out into a couple of functions (https://github.com/ansible-collections/community.general/pull/2061).
|
||||
- csv module utils - new module_utils for shared functions between ``from_csv``
|
||||
filter and ``read_csv`` module (https://github.com/ansible-collections/community.general/pull/2037).
|
||||
- ipa_sudorule - add support for setting sudo runasuser (https://github.com/ansible-collections/community.general/pull/2031).
|
||||
- jenkins_job - add a ``validate_certs`` parameter that allows disabling TLS/SSL
|
||||
certificate validation (https://github.com/ansible-collections/community.general/issues/255).
|
||||
- kibana_plugin - add parameter for passing ``--allow-root`` flag to kibana
|
||||
and kibana-plugin commands (https://github.com/ansible-collections/community.general/pull/2014).
|
||||
- proxmox - added ``purge`` module parameter for use when deleting lxc's with
|
||||
HA options (https://github.com/ansible-collections/community.general/pull/2013).
|
||||
- proxmox inventory plugin - added ``tags_parsed`` fact containing tags parsed
|
||||
as a list (https://github.com/ansible-collections/community.general/pull/1949).
|
||||
- proxmox_kvm - added new module parameter ``tags`` for use with PVE 6+ (https://github.com/ansible-collections/community.general/pull/2000).
|
||||
- rax - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- rax_cdb_user - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- rax_scaling_group - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006).
|
||||
- read_csv - refactored read_csv module to use shared csv functions from csv
|
||||
module_utils (https://github.com/ansible-collections/community.general/pull/2037).
|
||||
- redfish_* modules, redfish_utils module utils - add support for Redfish session
|
||||
create, delete, and authenticate (https://github.com/ansible-collections/community.general/issues/1975).
|
||||
- snmp_facts - added parameters ``timeout`` and ``retries`` to module (https://github.com/ansible-collections/community.general/issues/980).
|
||||
release_summary: Fixes compatibility issues with the latest ansible-core 2.11
|
||||
beta, some more bugs, and contains several new features, modules and plugins.
|
||||
fragments:
|
||||
- 1949-proxmox-inventory-tags.yml
|
||||
- 1977-jenkinsjob-validate-certs.yml
|
||||
- 1991-proxmox-inventory-fix-template-in-pool.yml
|
||||
- 1999-proxmox-fix-issue-1955.yml
|
||||
- 2.3.0.yml
|
||||
- 2000-proxmox_kvm-tag-support.yml
|
||||
- 2001-no_log-false.yml
|
||||
- 2006-valmod-batch8.yml
|
||||
- 2013-proxmox-purge-parameter.yml
|
||||
- 2014-allow-root-for-kibana-plugin.yaml
|
||||
- 2024-module-helper-fixes.yml
|
||||
- 2027-add-redfish-session-create-delete-authenticate.yml
|
||||
- 2031-ipa_sudorule_add_runasextusers.yml
|
||||
- 2036-scaleway-inventory.yml
|
||||
- 2037-add-from-csv-filter.yml
|
||||
- 2040-fix-index-error-in-redfish-set-manager-nic.yml
|
||||
- 2057-nios-devel.yml
|
||||
- 2061-archive-refactor1.yml
|
||||
- 2065-snmp-facts-timeout.yml
|
||||
- 2072-stacki-host-params-fallback.yml
|
||||
modules:
|
||||
- description: Manage Gandi LiveDNS records
|
||||
name: gandi_livedns
|
||||
namespace: net_tools
|
||||
- description: Manage Pritunl Users using the Pritunl API
|
||||
name: pritunl_user
|
||||
namespace: net_tools.pritunl
|
||||
- description: List Pritunl Users using the Pritunl API
|
||||
name: pritunl_user_info
|
||||
namespace: net_tools.pritunl
|
||||
plugins:
|
||||
filter:
|
||||
- description: Converts CSV text input into list of dicts
|
||||
name: from_csv
|
||||
namespace: null
|
||||
release_date: '2021-03-23'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 2.1.1
|
||||
version: 2.3.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
120
meta/runtime.yml
120
meta/runtime.yml
@@ -41,7 +41,7 @@ plugin_routing:
|
||||
ali_instance_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.ali_instance_info instead.
|
||||
docker_compose:
|
||||
redirect: community.docker.docker_compose
|
||||
docker_config:
|
||||
@@ -198,15 +198,15 @@ plugin_routing:
|
||||
hpilo_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.hpilo_info instead.
|
||||
idrac_redfish_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.idrac_redfish_info instead.
|
||||
jenkins_job_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.jenkins_job_info instead.
|
||||
katello:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -226,7 +226,7 @@ plugin_routing:
|
||||
ldap_attr:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.ldap_attrs instead.
|
||||
logicmonitor:
|
||||
tombstone:
|
||||
removal_version: 1.0.0
|
||||
@@ -238,11 +238,11 @@ plugin_routing:
|
||||
memset_memstore_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.memset_memstore_info instead.
|
||||
memset_server_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.memset_server_info instead.
|
||||
na_cdot_aggregate:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -278,159 +278,159 @@ plugin_routing:
|
||||
na_ontap_gather_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use netapp.ontap.na_ontap_info instead.
|
||||
nginx_status_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.nginx_status_info instead.
|
||||
one_image_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.one_image_info instead.
|
||||
onepassword_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.onepassword_info instead.
|
||||
oneview_datacenter_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_datacenter_info instead.
|
||||
oneview_enclosure_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_enclosure_info instead.
|
||||
oneview_ethernet_network_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_ethernet_network_info instead.
|
||||
oneview_fc_network_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_fc_network_info instead.
|
||||
oneview_fcoe_network_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_fcoe_network_info instead.
|
||||
oneview_logical_interconnect_group_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_logical_interconnect_group_info instead.
|
||||
oneview_network_set_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_network_set_info instead.
|
||||
oneview_san_manager_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.oneview_san_manager_info instead.
|
||||
online_server_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.online_server_info instead.
|
||||
online_user_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.online_user_info instead.
|
||||
ovirt:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm instead.
|
||||
ovirt_affinity_label_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead.
|
||||
ovirt_api_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_api_info instead.
|
||||
ovirt_cluster_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_cluster_info instead.
|
||||
ovirt_datacenter_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead.
|
||||
ovirt_disk_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_disk_info instead.
|
||||
ovirt_event_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_event_info instead.
|
||||
ovirt_external_provider_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead.
|
||||
ovirt_group_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_group_info instead.
|
||||
ovirt_host_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_info instead.
|
||||
ovirt_host_storage_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead.
|
||||
ovirt_network_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_network_info instead.
|
||||
ovirt_nic_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_nic_info instead.
|
||||
ovirt_permission_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_permission_info instead.
|
||||
ovirt_quota_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_quota_info instead.
|
||||
ovirt_scheduling_policy_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead.
|
||||
ovirt_snapshot_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead.
|
||||
ovirt_storage_domain_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead.
|
||||
ovirt_storage_template_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead.
|
||||
ovirt_storage_vm_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead.
|
||||
ovirt_tag_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_tag_info instead.
|
||||
ovirt_template_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_template_info instead.
|
||||
ovirt_user_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_user_info instead.
|
||||
ovirt_vm_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm_info instead.
|
||||
ovirt_vmpool_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
|
||||
postgresql_copy:
|
||||
redirect: community.postgresql.postgresql_copy
|
||||
postgresql_db:
|
||||
@@ -478,47 +478,47 @@ plugin_routing:
|
||||
purefa_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use purestorage.flasharray.purefa_info instead.
|
||||
purefb_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use purestorage.flashblade.purefb_info instead.
|
||||
python_requirements_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.python_requirements_info instead.
|
||||
redfish_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.redfish_info instead.
|
||||
scaleway_image_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_image_info instead.
|
||||
scaleway_ip_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_ip_info instead.
|
||||
scaleway_organization_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_organization_info instead.
|
||||
scaleway_security_group_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_security_group_info instead.
|
||||
scaleway_server_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_server_info instead.
|
||||
scaleway_snapshot_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_snapshot_info instead.
|
||||
scaleway_volume_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.scaleway_volume_info instead.
|
||||
sf_account_manager:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -542,15 +542,15 @@ plugin_routing:
|
||||
smartos_image_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.smartos_image_info instead.
|
||||
vertica_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.vertica_info instead.
|
||||
xenserver_guest_facts:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
warning_text: Use community.general.xenserver_guest_info instead.
|
||||
doc_fragments:
|
||||
_gcp:
|
||||
redirect: community.google._gcp
|
||||
|
||||
2
plugins/cache/memcached.py
vendored
2
plugins/cache/memcached.py
vendored
@@ -162,7 +162,7 @@ class CacheModuleKeys(MutableSet):
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
for k in self._keyset.keys():
|
||||
for k in list(self._keyset.keys()):
|
||||
t = self._keyset[k]
|
||||
if s_min < t < s_max:
|
||||
del self._keyset[k]
|
||||
|
||||
6
plugins/cache/redis.py
vendored
6
plugins/cache/redis.py
vendored
@@ -217,14 +217,12 @@ class CacheModule(BaseCacheModule):
|
||||
self._db.zrem(self._keys_set, key)
|
||||
|
||||
def flush(self):
|
||||
for key in self.keys():
|
||||
for key in list(self.keys()):
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
# TODO: there is probably a better way to do this in redis
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
|
||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
||||
for attr in _stats_attributes:
|
||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
||||
|
||||
return _ret
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ COLORS = {
|
||||
|
||||
def dict_diff(prv, nxt):
|
||||
"""Return a dict of keys that differ with another config object."""
|
||||
keys = set(prv.keys() + nxt.keys())
|
||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
||||
result = {}
|
||||
for k in keys:
|
||||
if prv.get(k) != nxt.get(k):
|
||||
|
||||
43
plugins/doc_fragments/pritunl.py
Normal file
43
plugins/doc_fragments/pritunl.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
pritunl_url:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- URL and port of the Pritunl server on which the API is enabled.
|
||||
|
||||
pritunl_api_token:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Token of a Pritunl admin user.
|
||||
- It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
|
||||
|
||||
pritunl_api_secret:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Secret found in Administrators > USERNAME > API Secret.
|
||||
|
||||
validate_certs:
|
||||
type: bool
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- If certificates should be validated or not.
|
||||
- This should never be set to C(false), except if you are very sure that
|
||||
your connection to the server can not be subject to a Man In The Middle
|
||||
attack.
|
||||
"""
|
||||
49
plugins/filter/from_csv.py
Normal file
49
plugins/filter/from_csv.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||
DialectNotAvailableError,
|
||||
CustomDialectFailureError)
|
||||
|
||||
|
||||
def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
|
||||
|
||||
dialect_params = {
|
||||
"delimiter": delimiter,
|
||||
"skipinitialspace": skipinitialspace,
|
||||
"strict": strict,
|
||||
}
|
||||
|
||||
try:
|
||||
dialect = initialize_dialect(dialect, **dialect_params)
|
||||
except (CustomDialectFailureError, DialectNotAvailableError) as e:
|
||||
raise AnsibleFilterError(to_native(e))
|
||||
|
||||
reader = read_csv(data, dialect, fieldnames)
|
||||
|
||||
data_list = []
|
||||
|
||||
try:
|
||||
for row in reader:
|
||||
data_list.append(row)
|
||||
except CSVError as e:
|
||||
raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
|
||||
|
||||
return data_list
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'from_csv': from_csv
|
||||
}
|
||||
21
plugins/filter/version_sort.py
Normal file
21
plugins/filter/version_sort.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
|
||||
def version_sort(value, reverse=False):
|
||||
'''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
|
||||
return sorted(value, key=LooseVersion, reverse=reverse)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Version sort filter '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'version_sort': version_sort
|
||||
}
|
||||
@@ -34,18 +34,15 @@ DOCUMENTATION = r'''
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
tags:
|
||||
description: Populate inventory only with instances which have at least one of the tags listed here.
|
||||
default: []
|
||||
type: list
|
||||
reqired: false
|
||||
version_added: 2.0.0
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
strict:
|
||||
version_added: 2.0.0
|
||||
compose:
|
||||
|
||||
@@ -217,6 +217,10 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
|
||||
self.inventory.set_variable(name, vmtype_key, vmtype)
|
||||
|
||||
plaintext_configs = [
|
||||
'tags',
|
||||
]
|
||||
|
||||
for config in ret:
|
||||
key = config
|
||||
key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
|
||||
@@ -226,6 +230,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
|
||||
value = ('disk_image=' + value)
|
||||
|
||||
# Additional field containing parsed tags as list
|
||||
if config == 'tags':
|
||||
parsed_key = self.to_safe('%s%s' % (key, "_parsed"))
|
||||
parsed_value = [tag.strip() for tag in value.split(",")]
|
||||
self.inventory.set_variable(name, parsed_key, parsed_value)
|
||||
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
@@ -339,7 +349,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
|
||||
for member in self._get_members_per_pool(pool['poolid']):
|
||||
if member.get('name'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
if not member.get('template'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_REQUESTS:
|
||||
@@ -352,7 +363,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
self._read_config_data(path)
|
||||
|
||||
# get connection host
|
||||
self.proxmox_url = self.get_option('url')
|
||||
self.proxmox_url = self.get_option('url').rstrip('/')
|
||||
self.proxmox_user = self.get_option('user')
|
||||
self.proxmox_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
|
||||
@@ -81,7 +81,7 @@ class LookupModule(LookupBase):
|
||||
)
|
||||
if args:
|
||||
raise AnsibleError(
|
||||
"unrecognized arguments to with_sequence: %r" % args.keys()
|
||||
"unrecognized arguments to with_sequence: %r" % list(args.keys())
|
||||
)
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
@@ -82,6 +82,27 @@ EXAMPLES = r"""
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
username='user.name',
|
||||
password='password'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
|
||||
67
plugins/module_utils/csv.py
Normal file
67
plugins/module_utils/csv.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import csv
|
||||
from io import BytesIO, StringIO
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
|
||||
class CustomDialectFailureError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DialectNotAvailableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
CSVError = csv.Error
|
||||
|
||||
|
||||
def initialize_dialect(dialect, **kwargs):
|
||||
# Add Unix dialect from Python 3
|
||||
class unix_dialect(csv.Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = csv.QUOTE_ALL
|
||||
|
||||
csv.register_dialect("unix", unix_dialect)
|
||||
|
||||
if dialect not in csv.list_dialects():
|
||||
raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
|
||||
|
||||
# Create a dictionary from only set options
|
||||
dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
if dialect_params:
|
||||
try:
|
||||
csv.register_dialect('custom', dialect, **dialect_params)
|
||||
except TypeError as e:
|
||||
raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
|
||||
dialect = 'custom'
|
||||
|
||||
return dialect
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
else:
|
||||
fake_fh = BytesIO(data)
|
||||
|
||||
reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
|
||||
|
||||
return reader
|
||||
234
plugins/module_utils/gandi_livedns_api.py
Normal file
234
plugins/module_utils/gandi_livedns_api.py
Normal file
@@ -0,0 +1,234 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class GandiLiveDNSAPI(object):
|
||||
|
||||
api_endpoint = 'https://api.gandi.net/v5/livedns'
|
||||
changed = False
|
||||
|
||||
error_strings = {
|
||||
400: 'Bad request',
|
||||
401: 'Permission denied',
|
||||
404: 'Resource not found',
|
||||
}
|
||||
|
||||
attribute_map = {
|
||||
'record': 'rrset_name',
|
||||
'type': 'rrset_type',
|
||||
'ttl': 'rrset_ttl',
|
||||
'values': 'rrset_values'
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.api_key = module.params['api_key']
|
||||
|
||||
def _build_error_message(self, module, info):
|
||||
s = ''
|
||||
body = info.get('body')
|
||||
if body:
|
||||
errors = module.from_json(body).get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
name = error.get('name')
|
||||
if name:
|
||||
s += '{0} :'.format(name)
|
||||
description = error.get('description')
|
||||
if description:
|
||||
s += description
|
||||
return s
|
||||
|
||||
def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
|
||||
headers = {'Authorization': 'Apikey {0}'.format(self.api_key),
|
||||
'Content-Type': 'application/json'}
|
||||
data = None
|
||||
if payload:
|
||||
try:
|
||||
data = json.dumps(payload)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
|
||||
|
||||
resp, info = fetch_url(self.module,
|
||||
self.api_endpoint + api_call,
|
||||
headers=headers,
|
||||
data=data,
|
||||
method=method)
|
||||
|
||||
error_msg = ''
|
||||
if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
|
||||
err_s = self.error_strings.get(info['status'], '')
|
||||
|
||||
error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info))
|
||||
|
||||
result = None
|
||||
try:
|
||||
content = resp.read()
|
||||
except AttributeError:
|
||||
content = None
|
||||
|
||||
if content:
|
||||
try:
|
||||
result = json.loads(to_text(content, errors='surrogate_or_strict'))
|
||||
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
|
||||
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
|
||||
|
||||
if error_msg:
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
return result, info['status']
|
||||
|
||||
def build_result(self, result, domain):
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for k in self.attribute_map:
|
||||
v = result.get(self.attribute_map[k], None)
|
||||
if v is not None:
|
||||
if k == 'record' and v == '@':
|
||||
v = ''
|
||||
res[k] = v
|
||||
|
||||
res['domain'] = domain
|
||||
|
||||
return res
|
||||
|
||||
def build_results(self, results, domain):
|
||||
if results is None:
|
||||
return []
|
||||
return [self.build_result(r, domain) for r in results]
|
||||
|
||||
def get_records(self, record, type, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
if record:
|
||||
url += '/%s' % (record)
|
||||
if type:
|
||||
url += '/%s' % (type)
|
||||
|
||||
records, status = self._gandi_api_call(url, error_on_404=False)
|
||||
|
||||
if status == 404:
|
||||
return []
|
||||
|
||||
if not isinstance(records, list):
|
||||
records = [records]
|
||||
|
||||
# filter by type if record is not set
|
||||
if not record and type:
|
||||
records = [r
|
||||
for r in records
|
||||
if r['rrset_type'] == type]
|
||||
|
||||
return records
|
||||
|
||||
def create_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
new_record = {
|
||||
'rrset_name': record,
|
||||
'rrset_type': type,
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record, status = self._gandi_api_call(url, method='POST', payload=new_record)
|
||||
|
||||
if status in (200, 201,):
|
||||
return new_record
|
||||
|
||||
return None
|
||||
|
||||
def update_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
new_record = {
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
|
||||
return record
|
||||
|
||||
def delete_record(self, record, type, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
|
||||
self._gandi_api_call(url, method='DELETE')
|
||||
|
||||
def delete_dns_record(self, record, type, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
self.changed = True
|
||||
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
new_values = set(cur_record['rrset_values']) - set(values)
|
||||
if new_values:
|
||||
# Removing one or more values from a record, we update the record with the remaining values
|
||||
self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
|
||||
records = self.get_records(record, type, domain)
|
||||
return records[0], self.changed
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.delete_record(record, type, domain)
|
||||
else:
|
||||
cur_record = None
|
||||
|
||||
return None, self.changed
|
||||
|
||||
def ensure_dns_record(self, record, type, ttl, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
do_update = False
|
||||
if ttl is not None and cur_record['rrset_ttl'] != ttl:
|
||||
do_update = True
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
do_update = True
|
||||
|
||||
if do_update:
|
||||
if self.module.check_mode:
|
||||
result = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
else:
|
||||
self.update_record(record, type, values, ttl, domain)
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
result = records[0]
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
else:
|
||||
return cur_record, self.changed
|
||||
|
||||
if self.module.check_mode:
|
||||
new_record = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
result = new_record
|
||||
else:
|
||||
result = self.create_record(record, type, values, ttl, domain)
|
||||
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
@@ -55,7 +55,7 @@ def keycloak_argument_spec():
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
return dict(
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
|
||||
auth_client_id=dict(type='str', default='admin-cli'),
|
||||
auth_realm=dict(type='str', required=True),
|
||||
auth_client_secret=dict(type='str', default=None, no_log=True),
|
||||
|
||||
@@ -93,6 +93,8 @@ class ArgFormat(object):
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
if value is None:
|
||||
return []
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
@@ -121,6 +123,7 @@ def module_fails_on_exception(func):
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
self.module.fail_json(changed=False, msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars)
|
||||
except Exception as e:
|
||||
self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.vars.exception = traceback.format_exc()
|
||||
@@ -292,21 +295,35 @@ class CmdMixin(object):
|
||||
|
||||
extra_params = extra_params or dict()
|
||||
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0])
|
||||
try:
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
|
||||
except ValueError:
|
||||
pass
|
||||
param_list = params if params else self.module.params.keys()
|
||||
|
||||
for param in param_list:
|
||||
if param in self.module.argument_spec:
|
||||
if param not in self.module.params:
|
||||
if isinstance(param, dict):
|
||||
if len(param) != 1:
|
||||
raise ModuleHelperException("run_command parameter as a dict must "
|
||||
"contain only one key: {0}".format(param))
|
||||
_param = list(param.keys())[0]
|
||||
fmt = find_format(_param)
|
||||
value = param[_param]
|
||||
elif isinstance(param, str):
|
||||
if param in self.module.argument_spec:
|
||||
fmt = find_format(param)
|
||||
value = self.module.params[param]
|
||||
elif param in extra_params:
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
||||
version="4.0.0", collection_name="community.general")
|
||||
continue
|
||||
fmt = find_format(param)
|
||||
value = self.module.params[param]
|
||||
|
||||
else:
|
||||
if param not in extra_params:
|
||||
continue
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
self.cmd_args = cmd_args
|
||||
raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
|
||||
return cmd_args
|
||||
|
||||
@@ -18,6 +18,7 @@ from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.validation import check_type_dict
|
||||
|
||||
try:
|
||||
from infoblox_client.connector import Connector
|
||||
@@ -399,11 +400,11 @@ class WapiModule(WapiBase):
|
||||
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
elif 'ipv4addr' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
|
||||
return proposed_object
|
||||
@@ -485,7 +486,7 @@ class WapiModule(WapiBase):
|
||||
if ('name' in obj_filter):
|
||||
# gets and returns the current object based on name/old_name passed
|
||||
try:
|
||||
name_obj = self.module._check_type_dict(obj_filter['name'])
|
||||
name_obj = check_type_dict(obj_filter['name'])
|
||||
old_name = name_obj['old_name']
|
||||
new_name = name_obj['new_name']
|
||||
except TypeError:
|
||||
@@ -499,12 +500,12 @@ class WapiModule(WapiBase):
|
||||
else:
|
||||
test_obj_filter = dict([('name', old_name)])
|
||||
# get the object reference
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
if ib_obj:
|
||||
obj_filter['name'] = new_name
|
||||
else:
|
||||
test_obj_filter['name'] = new_name
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
update = True
|
||||
return ib_obj, update, new_name
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
@@ -521,7 +522,7 @@ class WapiModule(WapiBase):
|
||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
try:
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
@@ -530,7 +531,7 @@ class WapiModule(WapiBase):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
@@ -538,32 +539,32 @@ class WapiModule(WapiBase):
|
||||
# check if test_obj_filter is empty copy passed obj_filter
|
||||
else:
|
||||
test_obj_filter = obj_filter
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_ZONE):
|
||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
||||
temp = ib_spec['restart_if_needed']
|
||||
del ib_spec['restart_if_needed']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
||||
if not ib_obj:
|
||||
ib_spec['restart_if_needed'] = temp
|
||||
@@ -571,12 +572,12 @@ class WapiModule(WapiBase):
|
||||
# del key 'create_token' as nios_member get_object fails with the key present
|
||||
temp = ib_spec['create_token']
|
||||
del ib_spec['create_token']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
if temp:
|
||||
# reinstate 'create_token' key
|
||||
ib_spec['create_token'] = temp
|
||||
else:
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
return ib_obj, update, new_name
|
||||
|
||||
def on_update(self, proposed_object, ib_spec):
|
||||
|
||||
0
plugins/module_utils/net_tools/pritunl/__init__.py
Normal file
0
plugins/module_utils/net_tools/pritunl/__init__.py
Normal file
300
plugins/module_utils/net_tools/pritunl/api.py
Normal file
300
plugins/module_utils/net_tools/pritunl/api.py
Normal file
@@ -0,0 +1,300 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""
|
||||
Pritunl API that offers CRUD operations on Pritunl Organizations and Users
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class PritunlException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def pritunl_argument_spec():
|
||||
return dict(
|
||||
pritunl_url=dict(required=True, type="str"),
|
||||
pritunl_api_token=dict(required=True, type="str", no_log=False),
|
||||
pritunl_api_secret=dict(required=True, type="str", no_log=True),
|
||||
validate_certs=dict(required=False, type="bool", default=True),
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_settings(module):
|
||||
"""
|
||||
Helper function to set required Pritunl request params from module arguments.
|
||||
"""
|
||||
return {
|
||||
"api_token": module.params.get("pritunl_api_token"),
|
||||
"api_secret": module.params.get("pritunl_api_secret"),
|
||||
"base_url": module.params.get("pritunl_url"),
|
||||
"validate_certs": module.params.get("validate_certs"),
|
||||
}
|
||||
|
||||
|
||||
def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="GET",
|
||||
path="/organization",
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _get_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="GET",
|
||||
path="/user/%s" % organization_id,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="DELETE",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/user/%s" % organization_id,
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _put_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_id,
|
||||
user_data,
|
||||
validate_certs=True,
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="PUT",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def list_pritunl_organizations(
|
||||
api_token, api_secret, base_url, validate_certs=True, filters=None
|
||||
):
|
||||
orgs = []
|
||||
|
||||
response = _get_pritunl_organizations(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve organizations from Pritunl")
|
||||
else:
|
||||
for org in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
orgs.append(org)
|
||||
else:
|
||||
if not any(
|
||||
filter_val != org[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
orgs.append(org)
|
||||
|
||||
return orgs
|
||||
|
||||
|
||||
def list_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
|
||||
):
|
||||
users = []
|
||||
|
||||
response = _get_pritunl_users(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
organization_id=organization_id,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve users from Pritunl")
|
||||
else:
|
||||
for user in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
users.append(user)
|
||||
|
||||
else:
|
||||
if not any(
|
||||
filter_val != user[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
users.append(user)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def post_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_data,
|
||||
user_id=None,
|
||||
validate_certs=True,
|
||||
):
|
||||
# If user_id is provided will do PUT otherwise will do POST
|
||||
if user_id is None:
|
||||
response = _post_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# user POST request returns an array of a single item,
|
||||
# so return this item instead of the list
|
||||
return json.loads(response.read())[0]
|
||||
else:
|
||||
response = _put_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not update user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def pritunl_auth_request(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
method,
|
||||
path,
|
||||
validate_certs=True,
|
||||
headers=None,
|
||||
data=None,
|
||||
):
|
||||
"""
|
||||
Send an API call to a Pritunl server.
|
||||
Taken from https://pritunl.com/api and adaped work with Ansible open_url
|
||||
"""
|
||||
auth_timestamp = str(int(time.time()))
|
||||
auth_nonce = uuid.uuid4().hex
|
||||
|
||||
auth_string = "&".join(
|
||||
[api_token, auth_timestamp, auth_nonce, method.upper(), path]
|
||||
+ ([data] if data else [])
|
||||
)
|
||||
|
||||
auth_signature = base64.b64encode(
|
||||
hmac.new(
|
||||
api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
)
|
||||
|
||||
auth_headers = {
|
||||
"Auth-Token": api_token,
|
||||
"Auth-Timestamp": auth_timestamp,
|
||||
"Auth-Nonce": auth_nonce,
|
||||
"Auth-Signature": auth_signature,
|
||||
}
|
||||
|
||||
if headers:
|
||||
auth_headers.update(headers)
|
||||
|
||||
try:
|
||||
uri = "%s%s" % (base_url, path)
|
||||
|
||||
return open_url(
|
||||
uri,
|
||||
method=method.upper(),
|
||||
headers=auth_headers,
|
||||
data=data,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
except Exception as e:
|
||||
raise PritunlException(e)
|
||||
@@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False):
|
||||
|
||||
if supports_create:
|
||||
common_args.update(
|
||||
key_by=dict(type="list", elements="str"),
|
||||
key_by=dict(type="list", elements="str", no_log=False),
|
||||
force_create=dict(type="bool", default=False),
|
||||
)
|
||||
|
||||
|
||||
@@ -39,13 +39,34 @@ class RedfishUtils(object):
|
||||
self.data_modification = data_modification
|
||||
self._init_session()
|
||||
|
||||
def _auth_params(self, headers):
|
||||
"""
|
||||
Return tuple of required authentication params based on the presence
|
||||
of a token in the self.creds dict. If using a token, set the
|
||||
X-Auth-Token header in the `headers` param.
|
||||
|
||||
:param headers: dict containing headers to send in request
|
||||
:return: tuple of username, password and force_basic_auth
|
||||
"""
|
||||
if self.creds.get('token'):
|
||||
username = None
|
||||
password = None
|
||||
force_basic_auth = False
|
||||
headers['X-Auth-Token'] = self.creds['token']
|
||||
else:
|
||||
username = self.creds['user']
|
||||
password = self.creds['pswd']
|
||||
force_basic_auth = True
|
||||
return username, password, force_basic_auth
|
||||
|
||||
# The following functions are to send GET/POST/PATCH/DELETE requests
|
||||
def get_request(self, uri):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, method="GET", headers=GET_HEADERS,
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
@@ -66,14 +87,16 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def post_request(self, uri, pyld):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=POST_HEADERS, method="POST",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
msg = self._get_extended_message(e)
|
||||
return {'ret': False,
|
||||
@@ -87,10 +110,10 @@ class RedfishUtils(object):
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'resp': resp}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
|
||||
def patch_request(self, uri, pyld):
|
||||
headers = PATCH_HEADERS
|
||||
req_headers = dict(PATCH_HEADERS)
|
||||
r = self.get_request(uri)
|
||||
if r['ret']:
|
||||
# Get etag from etag header or @odata.etag property
|
||||
@@ -98,15 +121,13 @@ class RedfishUtils(object):
|
||||
if not etag:
|
||||
etag = r['data'].get('@odata.etag')
|
||||
if etag:
|
||||
# Make copy of headers and add If-Match header
|
||||
headers = dict(headers)
|
||||
headers['If-Match'] = etag
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=headers, method="PATCH",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
headers=req_headers, method="PATCH",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -125,13 +146,14 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
def delete_request(self, uri, pyld=None):
|
||||
req_headers = dict(DELETE_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
data = json.dumps(pyld) if pyld else None
|
||||
resp = open_url(uri, data=data,
|
||||
headers=DELETE_HEADERS, method="DELETE",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
headers=req_headers, method="DELETE",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -1196,6 +1218,54 @@ class RedfishUtils(object):
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
|
||||
|
||||
def create_session(self):
|
||||
if not self.creds.get('user') or not self.creds.get('pswd'):
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the username and password parameters for '
|
||||
'the CreateSession command'}
|
||||
|
||||
payload = {
|
||||
'UserName': self.creds['user'],
|
||||
'Password': self.creds['pswd']
|
||||
}
|
||||
response = self.post_request(self.root_uri + self.sessions_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
headers = response['headers']
|
||||
if 'x-auth-token' not in headers:
|
||||
return {'ret': False, 'msg':
|
||||
'The service did not return the X-Auth-Token header in '
|
||||
'the response from the Sessions collection POST'}
|
||||
|
||||
if 'location' not in headers:
|
||||
self.module.warn(
|
||||
'The service did not return the Location header for the '
|
||||
'session URL in the response from the Sessions collection '
|
||||
'POST')
|
||||
session_uri = None
|
||||
else:
|
||||
session_uri = urlparse(headers.get('location')).path
|
||||
|
||||
session = dict()
|
||||
session['token'] = headers.get('x-auth-token')
|
||||
session['uri'] = session_uri
|
||||
return {'ret': True, 'changed': True, 'session': session,
|
||||
'msg': 'Session created successfully'}
|
||||
|
||||
def delete_session(self, session_uri):
|
||||
if not session_uri:
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the session_uri parameter for the '
|
||||
'DeleteSession command'}
|
||||
|
||||
response = self.delete_request(self.root_uri + session_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': 'Session deleted successfully'}
|
||||
|
||||
def get_firmware_update_capabilities(self):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + self.update_uri)
|
||||
@@ -2676,6 +2746,10 @@ class RedfishUtils(object):
|
||||
need_change = True
|
||||
# type is list
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
# if arrays are not the same len, no need to check each element
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in target_ethernet_current_setting[property][i]:
|
||||
|
||||
@@ -39,7 +39,7 @@ class ScalewayException(Exception):
|
||||
R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
|
||||
(,<[^>]+>;\srel="(first|previous|next|last)")*'''
|
||||
# Specify a single relation, for iteration and string extraction purposes
|
||||
R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
|
||||
|
||||
def parse_pagination_link(header):
|
||||
|
||||
@@ -84,7 +84,7 @@ class UTM:
|
||||
raise UTMModuleConfigurationError(
|
||||
"The keys " + to_native(
|
||||
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
|
||||
module.params.keys()))
|
||||
list(module.params.keys())))
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
|
||||
@@ -26,6 +26,7 @@ options:
|
||||
- Heroku API key
|
||||
apps:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of Heroku App names
|
||||
required: true
|
||||
@@ -109,7 +110,7 @@ def main():
|
||||
argument_spec = HerokuHelper.heroku_argument_spec()
|
||||
argument_spec.update(
|
||||
user=dict(required=True, type='str'),
|
||||
apps=dict(required=True, type='list'),
|
||||
apps=dict(required=True, type='list', elements='str'),
|
||||
suppress_invitation=dict(default=False, type='bool'),
|
||||
state=dict(default='present', type='str', choices=['present', 'absent']),
|
||||
)
|
||||
|
||||
@@ -63,6 +63,7 @@ options:
|
||||
U(https://www.linode.com/docs/api/tags/).
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
root_pass:
|
||||
description:
|
||||
- The password for the root user. If not specified, one will be
|
||||
@@ -75,6 +76,7 @@ options:
|
||||
- A list of SSH public key parts to deploy for the root user.
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- The desired instance state.
|
||||
@@ -240,12 +242,12 @@ def initialise_module():
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
|
||||
),
|
||||
authorized_keys=dict(type='list', required=False),
|
||||
authorized_keys=dict(type='list', elements='str', required=False, no_log=False),
|
||||
group=dict(type='str', required=False),
|
||||
image=dict(type='str', required=False),
|
||||
region=dict(type='str', required=False),
|
||||
root_pass=dict(type='str', required=False, no_log=True),
|
||||
tags=dict(type='list', required=False),
|
||||
tags=dict(type='list', elements='str', required=False),
|
||||
type=dict(type='str', required=False),
|
||||
stackscript_id=dict(type='int', required=False),
|
||||
stackscript_data=dict(type='dict', required=False),
|
||||
|
||||
@@ -1662,7 +1662,7 @@ def main():
|
||||
),
|
||||
backing_store=dict(
|
||||
type='str',
|
||||
choices=LXC_BACKING_STORE.keys(),
|
||||
choices=list(LXC_BACKING_STORE.keys()),
|
||||
default='dir'
|
||||
),
|
||||
template_options=dict(
|
||||
@@ -1699,7 +1699,7 @@ def main():
|
||||
type='path'
|
||||
),
|
||||
state=dict(
|
||||
choices=LXC_ANSIBLE_STATES.keys(),
|
||||
choices=list(LXC_ANSIBLE_STATES.keys()),
|
||||
default='started'
|
||||
),
|
||||
container_command=dict(
|
||||
@@ -1733,7 +1733,7 @@ def main():
|
||||
type='path',
|
||||
),
|
||||
archive_compression=dict(
|
||||
choices=LXC_COMPRESSION_MAP.keys(),
|
||||
choices=list(LXC_COMPRESSION_MAP.keys()),
|
||||
default='gzip'
|
||||
)
|
||||
),
|
||||
|
||||
@@ -665,7 +665,7 @@ def main():
|
||||
type='dict',
|
||||
),
|
||||
state=dict(
|
||||
choices=LXD_ANSIBLE_STATES.keys(),
|
||||
choices=list(LXD_ANSIBLE_STATES.keys()),
|
||||
default='started'
|
||||
),
|
||||
target=dict(
|
||||
|
||||
@@ -17,7 +17,6 @@ options:
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
@@ -124,6 +123,15 @@ options:
|
||||
- with states C(stopped) , C(restarted) allow to force stop instance
|
||||
type: bool
|
||||
default: 'no'
|
||||
purge:
|
||||
description:
|
||||
- Remove container from all related configurations.
|
||||
- For example backup jobs, replication jobs, or HA.
|
||||
- Related ACLs and Firewall entries will always be removed.
|
||||
- Used with state C(absent).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.3.0
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
@@ -507,6 +515,7 @@ def main():
|
||||
searchdomain=dict(),
|
||||
timeout=dict(type='int', default=30),
|
||||
force=dict(type='bool', default=False),
|
||||
purge=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
pubkey=dict(type='str', default=None),
|
||||
unprivileged=dict(type='bool', default=False),
|
||||
@@ -514,7 +523,7 @@ def main():
|
||||
hookscript=dict(type='str'),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
),
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'password', 'ostemplate'])],
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
)
|
||||
@@ -687,7 +696,13 @@ def main():
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
delete_params = {}
|
||||
|
||||
if module.params['purge']:
|
||||
delete_params['purge'] = 1
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid, **delete_params)
|
||||
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
|
||||
@@ -425,6 +425,14 @@ options:
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
tags:
|
||||
description:
|
||||
- List of tags to apply to the VM instance.
|
||||
- Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
|
||||
- Tags are only available in Proxmox 6+.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 2.3.0
|
||||
target:
|
||||
description:
|
||||
- Target node. Only allowed if the original VM is on shared storage.
|
||||
@@ -858,7 +866,7 @@ def wait_for_task(module, proxmox, node, taskid):
|
||||
def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
|
||||
# Available only in PVE 4
|
||||
only_v4 = ['force', 'protection', 'skiplock']
|
||||
only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
|
||||
only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
|
||||
|
||||
# valide clone parameters
|
||||
valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
|
||||
@@ -928,6 +936,13 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
if searchdomains:
|
||||
kwargs['searchdomain'] = ' '.join(searchdomains)
|
||||
|
||||
# VM tags are expected to be valid and presented as a comma/semi-colon delimited string
|
||||
if 'tags' in kwargs:
|
||||
for tag in kwargs['tags']:
|
||||
if not re.match(r'^[a-z0-9_][a-z0-9_\-\+\.]*$', tag):
|
||||
module.fail_json(msg='%s is not a valid tag' % tag)
|
||||
kwargs['tags'] = ",".join(kwargs['tags'])
|
||||
|
||||
# -args and skiplock require root@pam user - but can not use api tokens
|
||||
if module.params['api_user'] == "root@pam" and module.params['args'] is None:
|
||||
if not update and module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
@@ -1057,12 +1072,13 @@ def main():
|
||||
smbios=dict(type='str'),
|
||||
snapname=dict(type='str'),
|
||||
sockets=dict(type='int'),
|
||||
sshkeys=dict(type='str'),
|
||||
sshkeys=dict(type='str', no_log=False),
|
||||
startdate=dict(type='str'),
|
||||
startup=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
|
||||
storage=dict(type='str'),
|
||||
tablet=dict(type='bool'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
target=dict(type='str'),
|
||||
tdf=dict(type='bool'),
|
||||
template=dict(type='bool'),
|
||||
@@ -1267,6 +1283,7 @@ def main():
|
||||
startdate=module.params['startdate'],
|
||||
startup=module.params['startup'],
|
||||
tablet=module.params['tablet'],
|
||||
tags=module.params['tags'],
|
||||
target=module.params['target'],
|
||||
tdf=module.params['tdf'],
|
||||
template=module.params['template'],
|
||||
@@ -1287,7 +1304,7 @@ def main():
|
||||
elif clone is not None:
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
||||
else:
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||
except Exception as e:
|
||||
if update:
|
||||
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
||||
|
||||
190
plugins/modules/cloud/misc/proxmox_storage_info.py
Normal file
190
plugins/modules/cloud/misc/proxmox_storage_info.py
Normal file
@@ -0,0 +1,190 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_storage_info
|
||||
short_description: Retrieve information about one or more Proxmox VE storages
|
||||
version_added: 2.2.0
|
||||
description:
|
||||
- Retrieve information about one or more Proxmox VE storages.
|
||||
options:
|
||||
storage:
|
||||
description:
|
||||
- Only return informations on a specific storage.
|
||||
aliases: ['name']
|
||||
type: str
|
||||
type:
|
||||
description:
|
||||
- Filter on a specifc storage type.
|
||||
type: str
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
notes:
|
||||
- Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List existing storages
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
register: proxmox_storages
|
||||
|
||||
- name: List NFS storages only
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
type: nfs
|
||||
register: proxmox_storages_nfs
|
||||
|
||||
- name: Retrieve information about the lvm2 storage
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
storage: lvm2
|
||||
register: proxmox_storage_lvm
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
proxmox_storages:
|
||||
description: List of storage pools.
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
content:
|
||||
description: Proxmox content types available in this storage
|
||||
returned: on success
|
||||
type: list
|
||||
elements: str
|
||||
digest:
|
||||
description: Storage's digest
|
||||
returned: on success
|
||||
type: str
|
||||
nodes:
|
||||
description: List of nodes associated to this storage
|
||||
returned: on success, if storage is not local
|
||||
type: list
|
||||
elements: str
|
||||
path:
|
||||
description: Physical path to this storage
|
||||
returned: on success
|
||||
type: str
|
||||
prune-backups:
|
||||
description: Backup retention options
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
shared:
|
||||
description: Is this storage shared
|
||||
returned: on success
|
||||
type: bool
|
||||
storage:
|
||||
description: Storage name
|
||||
returned: on success
|
||||
type: str
|
||||
type:
|
||||
description: Storage type
|
||||
returned: on success
|
||||
type: str
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR, proxmox_to_ansible_bool)
|
||||
|
||||
|
||||
class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
|
||||
def get_storage(self, storage):
|
||||
try:
|
||||
storage = self.proxmox_api.storage.get(storage)
|
||||
except Exception:
|
||||
self.module.fail_json(msg="Storage '%s' does not exist" % storage)
|
||||
return ProxmoxStorage(storage)
|
||||
|
||||
def get_storages(self, type=None):
|
||||
storages = self.proxmox_api.storage.get(type=type)
|
||||
storages = [ProxmoxStorage(storage) for storage in storages]
|
||||
return storages
|
||||
|
||||
|
||||
class ProxmoxStorage:
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
# Convert proxmox representation of lists, dicts and boolean for easier
|
||||
# manipulation within ansible.
|
||||
if 'shared' in self.storage:
|
||||
self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared'])
|
||||
if 'content' in self.storage:
|
||||
self.storage['content'] = self.storage['content'].split(',')
|
||||
if 'nodes' in self.storage:
|
||||
self.storage['nodes'] = self.storage['nodes'].split(',')
|
||||
if 'prune-backups' in storage:
|
||||
options = storage['prune-backups'].split(',')
|
||||
self.storage['prune-backups'] = dict()
|
||||
for option in options:
|
||||
k, v = option.split('=')
|
||||
self.storage['prune-backups'][k] = v
|
||||
|
||||
|
||||
def proxmox_storage_info_argument_spec():
|
||||
return dict(
|
||||
storage=dict(type='str', aliases=['name']),
|
||||
type=dict(type='str'),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
storage_info_args = proxmox_storage_info_argument_spec()
|
||||
module_args.update(storage_info_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
mutually_exclusive=[('storage', 'type')],
|
||||
supports_check_mode=True
|
||||
)
|
||||
result = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxStorageInfoAnsible(module)
|
||||
storage = module.params['storage']
|
||||
storagetype = module.params['type']
|
||||
|
||||
if storage:
|
||||
storages = [proxmox.get_storage(storage)]
|
||||
else:
|
||||
storages = proxmox.get_storages(type=storagetype)
|
||||
result['proxmox_storages'] = [storage.storage for storage in storages]
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -57,27 +57,32 @@ options:
|
||||
Each rule must contain protocol parameter, in addition to three optional parameters
|
||||
(port_from, port_to, and source)
|
||||
type: list
|
||||
elements: dict
|
||||
add_server_ips:
|
||||
description:
|
||||
- A list of server identifiers (id or name) to be assigned to a firewall policy.
|
||||
Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing firewall policy.
|
||||
It is syntax is the same as the one used for rules parameter. Used in combination with update state.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
description:
|
||||
description:
|
||||
@@ -508,11 +513,11 @@ def main():
|
||||
name=dict(type='str'),
|
||||
firewall_policy=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
rules=dict(type='list', default=[]),
|
||||
add_server_ips=dict(type='list', default=[]),
|
||||
remove_server_ips=dict(type='list', default=[]),
|
||||
add_rules=dict(type='list', default=[]),
|
||||
remove_rules=dict(type='list', default=[]),
|
||||
rules=dict(type='list', elements="dict", default=[]),
|
||||
add_server_ips=dict(type='list', elements="str", default=[]),
|
||||
remove_server_ips=dict(type='list', elements="str", default=[]),
|
||||
add_rules=dict(type='list', elements="dict", default=[]),
|
||||
remove_rules=dict(type='list', elements="str", default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -95,6 +95,7 @@ options:
|
||||
- A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
|
||||
port_balancer, and port_server parameters, in addition to source parameter, which is optional.
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
description:
|
||||
- Description of the load balancer. maxLength=256
|
||||
@@ -105,22 +106,26 @@ options:
|
||||
- A list of server identifiers (id or name) to be assigned to a load balancer.
|
||||
Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing load balancer.
|
||||
It is syntax is the same as the one used for rules parameter. Used in combination with update state.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
@@ -613,11 +618,11 @@ def main():
|
||||
choices=METHODS),
|
||||
datacenter=dict(
|
||||
choices=DATACENTERS),
|
||||
rules=dict(type='list', default=[]),
|
||||
add_server_ips=dict(type='list', default=[]),
|
||||
remove_server_ips=dict(type='list', default=[]),
|
||||
add_rules=dict(type='list', default=[]),
|
||||
remove_rules=dict(type='list', default=[]),
|
||||
rules=dict(type='list', elements="dict", default=[]),
|
||||
add_server_ips=dict(type='list', elements="str", default=[]),
|
||||
remove_server_ips=dict(type='list', elements="str", default=[]),
|
||||
add_rules=dict(type='list', elements="dict", default=[]),
|
||||
remove_rules=dict(type='list', elements="str", default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -71,6 +71,7 @@ options:
|
||||
warning alerts, critical is used to set critical alerts. alert enables alert,
|
||||
and value is used to advise when the value is exceeded.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
cpu:
|
||||
description:
|
||||
@@ -96,6 +97,7 @@ options:
|
||||
description:
|
||||
- Array of ports that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
protocol:
|
||||
description:
|
||||
@@ -119,6 +121,7 @@ options:
|
||||
description:
|
||||
- Array of processes that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
process:
|
||||
description:
|
||||
@@ -133,41 +136,49 @@ options:
|
||||
description:
|
||||
- Ports to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
add_processes:
|
||||
description:
|
||||
- Processes to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
add_servers:
|
||||
description:
|
||||
- Servers to add to the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_ports:
|
||||
description:
|
||||
- Ports to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_processes:
|
||||
description:
|
||||
- Processes to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_servers:
|
||||
description:
|
||||
- Servers to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
update_ports:
|
||||
description:
|
||||
- Ports to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
update_processes:
|
||||
description:
|
||||
- Processes to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
@@ -197,7 +208,7 @@ author:
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
name: ansible monitoring policy
|
||||
description: Testing creation of a monitoring policy with ansible
|
||||
@@ -258,13 +269,13 @@ EXAMPLES = '''
|
||||
wait: true
|
||||
|
||||
- name: Destroy a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
state: absent
|
||||
name: ansible monitoring policy
|
||||
|
||||
- name: Update a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy
|
||||
name: ansible monitoring policy updated
|
||||
@@ -315,7 +326,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add a port to a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_ports:
|
||||
@@ -328,7 +339,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Update existing ports of a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
update_ports:
|
||||
@@ -348,7 +359,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove a port from a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_ports:
|
||||
@@ -356,7 +367,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add a process to a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_processes:
|
||||
@@ -368,7 +379,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Update existing processes of a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
update_processes:
|
||||
@@ -386,7 +397,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove a process from a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_processes:
|
||||
@@ -395,7 +406,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add server to a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_servers:
|
||||
@@ -404,7 +415,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove server from a monitoring policy
|
||||
oneandone_moitoring_policy:
|
||||
community.general.oneandone_monitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_servers:
|
||||
@@ -695,15 +706,15 @@ def update_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
if name or description or email or thresholds:
|
||||
@@ -864,15 +875,15 @@ def create_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
_ports = []
|
||||
@@ -957,17 +968,17 @@ def main():
|
||||
agent=dict(type='str'),
|
||||
email=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
thresholds=dict(type='list', default=[]),
|
||||
ports=dict(type='list', default=[]),
|
||||
processes=dict(type='list', default=[]),
|
||||
add_ports=dict(type='list', default=[]),
|
||||
update_ports=dict(type='list', default=[]),
|
||||
remove_ports=dict(type='list', default=[]),
|
||||
add_processes=dict(type='list', default=[]),
|
||||
update_processes=dict(type='list', default=[]),
|
||||
remove_processes=dict(type='list', default=[]),
|
||||
add_servers=dict(type='list', default=[]),
|
||||
remove_servers=dict(type='list', default=[]),
|
||||
thresholds=dict(type='list', elements="dict", default=[]),
|
||||
ports=dict(type='list', elements="dict", default=[]),
|
||||
processes=dict(type='list', elements="dict", default=[]),
|
||||
add_ports=dict(type='list', elements="dict", default=[]),
|
||||
update_ports=dict(type='list', elements="dict", default=[]),
|
||||
remove_ports=dict(type='list', elements="str", default=[]),
|
||||
add_processes=dict(type='list', elements="dict", default=[]),
|
||||
update_processes=dict(type='list', elements="dict", default=[]),
|
||||
remove_processes=dict(type='list', elements="str", default=[]),
|
||||
add_servers=dict(type='list', elements="str", default=[]),
|
||||
remove_servers=dict(type='list', elements="str", default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -71,10 +71,12 @@ options:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be added to the private network.
|
||||
type: list
|
||||
elements: str
|
||||
remove_members:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be removed from the private network.
|
||||
type: list
|
||||
elements: str
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
@@ -394,8 +396,8 @@ def main():
|
||||
description=dict(type='str'),
|
||||
network_address=dict(type='str'),
|
||||
subnet_mask=dict(type='str'),
|
||||
add_members=dict(type='list', default=[]),
|
||||
remove_members=dict(type='list', default=[]),
|
||||
add_members=dict(type='list', elements="str", default=[]),
|
||||
remove_members=dict(type='list', elements="str", default=[]),
|
||||
datacenter=dict(
|
||||
choices=DATACENTERS),
|
||||
wait=dict(type='bool', default=True),
|
||||
|
||||
@@ -87,6 +87,7 @@ options:
|
||||
- A list of hard disks with nested "size" and "is_main" properties.
|
||||
It must be provided with vcore, cores_per_processor, and ram parameters.
|
||||
type: list
|
||||
elements: dict
|
||||
private_network:
|
||||
description:
|
||||
- The private network name or ID.
|
||||
@@ -627,9 +628,9 @@ def main():
|
||||
vcore=dict(type='int'),
|
||||
cores_per_processor=dict(type='int'),
|
||||
ram=dict(type='float'),
|
||||
hdds=dict(type='list'),
|
||||
hdds=dict(type='list', elements='dict'),
|
||||
count=dict(type='int', default=1),
|
||||
ssh_key=dict(type='raw'),
|
||||
ssh_key=dict(type='raw', no_log=False),
|
||||
auto_increment=dict(type='bool', default=True),
|
||||
server=dict(type='str'),
|
||||
datacenter=dict(
|
||||
|
||||
@@ -66,6 +66,7 @@ options:
|
||||
description:
|
||||
- The labels for this host.
|
||||
type: list
|
||||
elements: str
|
||||
template:
|
||||
description:
|
||||
- The template or attribute changes to merge into the host template.
|
||||
@@ -130,7 +131,7 @@ class HostModule(OpenNebulaModule):
|
||||
vmm_mad_name=dict(type='str', default="kvm"),
|
||||
cluster_id=dict(type='int', default=0),
|
||||
cluster_name=dict(type='str'),
|
||||
labels=dict(type='list'),
|
||||
labels=dict(type='list', elements='str'),
|
||||
template=dict(type='dict', aliases=['attributes']),
|
||||
)
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ options:
|
||||
- A list of images ids whose facts you want to gather.
|
||||
aliases: ['id']
|
||||
type: list
|
||||
elements: str
|
||||
name:
|
||||
description:
|
||||
- A C(name) of the image whose facts will be gathered.
|
||||
@@ -253,7 +254,7 @@ def main():
|
||||
"api_url": {"required": False, "type": "str"},
|
||||
"api_username": {"required": False, "type": "str"},
|
||||
"api_password": {"required": False, "type": "str", "no_log": True},
|
||||
"ids": {"required": False, "aliases": ['id'], "type": "list"},
|
||||
"ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"},
|
||||
"name": {"required": False, "type": "str"},
|
||||
}
|
||||
|
||||
@@ -273,9 +274,6 @@ def main():
|
||||
name = params.get('name')
|
||||
client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
|
||||
result = {'images': []}
|
||||
images = []
|
||||
|
||||
if ids:
|
||||
images = get_images_by_ids(module, client, ids)
|
||||
elif name:
|
||||
@@ -283,8 +281,9 @@ def main():
|
||||
else:
|
||||
images = get_all_images(client).IMAGE
|
||||
|
||||
for image in images:
|
||||
result['images'].append(get_image_info(image))
|
||||
result = {
|
||||
'images': [get_image_info(image) for image in images],
|
||||
}
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ options:
|
||||
- A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
|
||||
aliases: ['ids']
|
||||
type: list
|
||||
elements: int
|
||||
state:
|
||||
description:
|
||||
- C(present) - create instances from a template specified with C(template_id)/C(template_name).
|
||||
@@ -120,6 +121,7 @@ options:
|
||||
- C(state) of instances with these labels.
|
||||
default: []
|
||||
type: list
|
||||
elements: str
|
||||
count_attributes:
|
||||
description:
|
||||
- A dictionary of key/value attributes that can only be used with
|
||||
@@ -134,6 +136,7 @@ options:
|
||||
- This can be expressed in multiple ways and is shown in the EXAMPLES
|
||||
- section.
|
||||
type: list
|
||||
elements: str
|
||||
count:
|
||||
description:
|
||||
- Number of instances to launch
|
||||
@@ -168,6 +171,7 @@ options:
|
||||
- NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
|
||||
- matched against the order specified in C(template_id)/C(template_name).
|
||||
type: list
|
||||
elements: str
|
||||
cpu:
|
||||
description:
|
||||
- Percentage of CPU divided by 100 required for the new instance. Half a
|
||||
@@ -182,6 +186,7 @@ options:
|
||||
- A list of dictionaries with network parameters. See examples for more details.
|
||||
default: []
|
||||
type: list
|
||||
elements: dict
|
||||
disk_saveas:
|
||||
description:
|
||||
- Creates an image from a VM disk.
|
||||
@@ -1349,7 +1354,7 @@ def main():
|
||||
"api_url": {"required": False, "type": "str"},
|
||||
"api_username": {"required": False, "type": "str"},
|
||||
"api_password": {"required": False, "type": "str", "no_log": True},
|
||||
"instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
|
||||
"instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"},
|
||||
"template_name": {"required": False, "type": "str"},
|
||||
"template_id": {"required": False, "type": "int"},
|
||||
"vm_start_on_hold": {"default": False, "type": "bool"},
|
||||
@@ -1367,16 +1372,16 @@ def main():
|
||||
"memory": {"required": False, "type": "str"},
|
||||
"cpu": {"required": False, "type": "float"},
|
||||
"vcpu": {"required": False, "type": "int"},
|
||||
"disk_size": {"required": False, "type": "list"},
|
||||
"disk_size": {"required": False, "type": "list", "elements": "str"},
|
||||
"datastore_name": {"required": False, "type": "str"},
|
||||
"datastore_id": {"required": False, "type": "int"},
|
||||
"networks": {"default": [], "type": "list"},
|
||||
"networks": {"default": [], "type": "list", "elements": "dict"},
|
||||
"count": {"default": 1, "type": "int"},
|
||||
"exact_count": {"required": False, "type": "int"},
|
||||
"attributes": {"default": {}, "type": "dict"},
|
||||
"count_attributes": {"required": False, "type": "dict"},
|
||||
"labels": {"default": [], "type": "list"},
|
||||
"count_labels": {"required": False, "type": "list"},
|
||||
"labels": {"default": [], "type": "list", "elements": "str"},
|
||||
"count_labels": {"required": False, "type": "list", "elements": "str"},
|
||||
"disk_saveas": {"type": "dict"},
|
||||
"persistent": {"default": False, "type": "bool"}
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ def update_vcn(virtual_network_client, module):
|
||||
primitive_params_update=["vcn_id"],
|
||||
kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
|
||||
module=module,
|
||||
update_attributes=UpdateVcnDetails().attribute_map.keys(),
|
||||
update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -17,27 +17,35 @@ author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
type: str
|
||||
auth_token:
|
||||
description:
|
||||
- Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
|
||||
- Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
|
||||
type: str
|
||||
label:
|
||||
description:
|
||||
- Label for the key. If you keep it empty, it will be read from key string.
|
||||
description:
|
||||
- Label for the key. If you keep it empty, it will be read from key string.
|
||||
type: str
|
||||
aliases: [name]
|
||||
id:
|
||||
description:
|
||||
- UUID of the key which you want to remove.
|
||||
- UUID of the key which you want to remove.
|
||||
type: str
|
||||
fingerprint:
|
||||
description:
|
||||
- Fingerprint of the key which you want to remove.
|
||||
- Fingerprint of the key which you want to remove.
|
||||
type: str
|
||||
key:
|
||||
description:
|
||||
- Public Key string ({type} {base64 encoded key} {description}).
|
||||
- Public Key string ({type} {base64 encoded key} {description}).
|
||||
type: str
|
||||
key_file:
|
||||
description:
|
||||
- File with the public key.
|
||||
- File with the public key.
|
||||
type: path
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
|
||||
@@ -181,7 +181,6 @@ def do_detach(packet_conn, vol, dev_id=None):
|
||||
return (dev_id is None) or (a['device']['id'] == dev_id)
|
||||
for a in vol['attachments']:
|
||||
if dev_match(a):
|
||||
print(a['href'])
|
||||
packet_conn.call_api(a['href'], type="DELETE")
|
||||
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ options:
|
||||
description:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter to provision this virtual machine.
|
||||
@@ -70,6 +71,7 @@ options:
|
||||
description:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
count:
|
||||
description:
|
||||
- The number of virtual machines to create.
|
||||
@@ -581,12 +583,12 @@ def main():
|
||||
volume_size=dict(type='int', default=10),
|
||||
disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
|
||||
image_password=dict(default=None, no_log=True),
|
||||
ssh_keys=dict(type='list', default=[]),
|
||||
ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
|
||||
bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
|
||||
lan=dict(type='int', default=1),
|
||||
count=dict(type='int', default=1),
|
||||
auto_increment=dict(type='bool', default=True),
|
||||
instance_ids=dict(type='list', default=[]),
|
||||
instance_ids=dict(type='list', elements='str', default=[]),
|
||||
subscription_user=dict(),
|
||||
subscription_password=dict(no_log=True),
|
||||
location=dict(choices=LOCATIONS, default='us/las'),
|
||||
|
||||
@@ -47,6 +47,7 @@ options:
|
||||
description:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
disk_type:
|
||||
description:
|
||||
@@ -77,6 +78,7 @@ options:
|
||||
description:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
subscription_user:
|
||||
description:
|
||||
@@ -106,6 +108,10 @@ options:
|
||||
type: str
|
||||
required: false
|
||||
default: 'present'
|
||||
server:
|
||||
description:
|
||||
- Server name to attach the volume to.
|
||||
type: str
|
||||
|
||||
requirements: [ "profitbricks" ]
|
||||
author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
|
||||
@@ -369,13 +375,13 @@ def main():
|
||||
size=dict(type='int', default=10),
|
||||
bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
|
||||
image=dict(),
|
||||
image_password=dict(default=None, no_log=True),
|
||||
ssh_keys=dict(type='list', default=[]),
|
||||
image_password=dict(no_log=True),
|
||||
ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
|
||||
disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
|
||||
licence_type=dict(default='UNKNOWN'),
|
||||
count=dict(type='int', default=1),
|
||||
auto_increment=dict(type='bool', default=True),
|
||||
instance_ids=dict(type='list', default=[]),
|
||||
instance_ids=dict(type='list', elements='str', default=[]),
|
||||
subscription_user=dict(),
|
||||
subscription_password=dict(no_log=True),
|
||||
wait=dict(type='bool', default=True),
|
||||
|
||||
@@ -549,7 +549,7 @@ def main():
|
||||
password=dict(default='', required=False, type='str', no_log=True),
|
||||
account=dict(default='', required=False, type='str'),
|
||||
application=dict(required=True, type='str'),
|
||||
keyset=dict(required=True, type='str'),
|
||||
keyset=dict(required=True, type='str', no_log=False),
|
||||
state=dict(default='present', type='str',
|
||||
choices=['started', 'stopped', 'present', 'absent']),
|
||||
name=dict(required=True, type='str'), description=dict(type='str'),
|
||||
|
||||
@@ -110,6 +110,7 @@ options:
|
||||
with this image
|
||||
instance_ids:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- list of instance ids, currently only used when state='absent' to
|
||||
remove instances
|
||||
@@ -129,6 +130,7 @@ options:
|
||||
- Name to give the instance
|
||||
networks:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The network to attach to the instances. If specified, you must include
|
||||
ALL networks including the public and private interfaces. Can be C(id)
|
||||
@@ -810,11 +812,11 @@ def main():
|
||||
flavor=dict(),
|
||||
group=dict(),
|
||||
image=dict(),
|
||||
instance_ids=dict(type='list'),
|
||||
instance_ids=dict(type='list', elements='str'),
|
||||
key_name=dict(aliases=['keypair']),
|
||||
meta=dict(type='dict', default={}),
|
||||
name=dict(),
|
||||
networks=dict(type='list', default=['public', 'private']),
|
||||
networks=dict(type='list', elements='str', default=['public', 'private']),
|
||||
service=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
user_data=dict(no_log=True),
|
||||
|
||||
@@ -30,6 +30,7 @@ options:
|
||||
required: yes
|
||||
databases:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- Name of the databases that the user can access
|
||||
default: []
|
||||
@@ -189,7 +190,7 @@ def main():
|
||||
cdb_id=dict(type='str', required=True),
|
||||
db_username=dict(type='str', required=True),
|
||||
db_password=dict(type='str', required=True, no_log=True),
|
||||
databases=dict(type='list', default=[]),
|
||||
databases=dict(type='list', elements='str', default=[]),
|
||||
host=dict(type='str', default='%'),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
|
||||
@@ -53,6 +53,7 @@ options:
|
||||
- key pair to use on the instance
|
||||
loadbalancers:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- List of load balancer C(id) and C(port) hashes
|
||||
max_entities:
|
||||
@@ -78,6 +79,7 @@ options:
|
||||
required: true
|
||||
networks:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The network to attach to the instances. If specified, you must include
|
||||
ALL networks including the public and private interfaces. Can be C(id)
|
||||
@@ -376,12 +378,12 @@ def main():
|
||||
flavor=dict(required=True),
|
||||
image=dict(required=True),
|
||||
key_name=dict(),
|
||||
loadbalancers=dict(type='list'),
|
||||
loadbalancers=dict(type='list', elements='dict'),
|
||||
meta=dict(type='dict', default={}),
|
||||
min_entities=dict(type='int', required=True),
|
||||
max_entities=dict(type='int', required=True),
|
||||
name=dict(required=True),
|
||||
networks=dict(type='list', default=['public', 'private']),
|
||||
networks=dict(type='list', elements='str', default=['public', 'private']),
|
||||
server_name=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
user_data=dict(no_log=True),
|
||||
|
||||
@@ -70,6 +70,7 @@ options:
|
||||
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of tags to apply to the instance (5 max)
|
||||
required: false
|
||||
@@ -652,7 +653,7 @@ def main():
|
||||
enable_ipv6=dict(default=False, type="bool"),
|
||||
public_ip=dict(default="absent"),
|
||||
state=dict(choices=list(state_strategy.keys()), default='present'),
|
||||
tags=dict(type="list", default=[]),
|
||||
tags=dict(type="list", elements="str", default=[]),
|
||||
organization=dict(required=True),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
|
||||
@@ -63,6 +63,7 @@ options:
|
||||
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of tags to apply to the load-balancer
|
||||
|
||||
@@ -338,7 +339,7 @@ def main():
|
||||
description=dict(required=True),
|
||||
region=dict(required=True, choices=SCALEWAY_REGIONS),
|
||||
state=dict(choices=list(state_strategy.keys()), default='present'),
|
||||
tags=dict(type="list", default=[]),
|
||||
tags=dict(type="list", elements="str", default=[]),
|
||||
organization_id=dict(required=True),
|
||||
wait=dict(type="bool", default=False),
|
||||
wait_timeout=dict(type="int", default=300),
|
||||
|
||||
@@ -24,6 +24,7 @@ options:
|
||||
manifest and 'published_date', 'published', 'source', 'clones',
|
||||
and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
|
||||
under 'imgadm list'.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -404,7 +404,7 @@ def main():
|
||||
nic_speed=dict(type='int', choices=NIC_SPEEDS),
|
||||
public_vlan=dict(type='str'),
|
||||
private_vlan=dict(type='str'),
|
||||
ssh_keys=dict(type='list', elements='str', default=[]),
|
||||
ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
|
||||
post_uri=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=STATES),
|
||||
wait=dict(type='bool', default=True),
|
||||
|
||||
@@ -1305,10 +1305,8 @@ def expand_tags(eg_launchspec, tags):
|
||||
|
||||
for tag in tags:
|
||||
eg_tag = spotinst.aws_elastigroup.Tag()
|
||||
if tag.keys():
|
||||
eg_tag.tag_key = tag.keys()[0]
|
||||
if tag.values():
|
||||
eg_tag.tag_value = tag.values()[0]
|
||||
if tag:
|
||||
eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0]
|
||||
|
||||
eg_tags.append(eg_tag)
|
||||
|
||||
@@ -1450,7 +1448,7 @@ def main():
|
||||
iam_role_arn=dict(type='str'),
|
||||
iam_role_name=dict(type='str'),
|
||||
image_id=dict(type='str', required=True),
|
||||
key_pair=dict(type='str'),
|
||||
key_pair=dict(type='str', no_log=False),
|
||||
kubernetes=dict(type='dict'),
|
||||
lifetime_period=dict(type='int'),
|
||||
load_balancers=dict(type='list'),
|
||||
|
||||
@@ -35,17 +35,20 @@ options:
|
||||
description:
|
||||
- The name of the application
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the application should exist
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
type: str
|
||||
|
||||
type:
|
||||
description:
|
||||
- The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
autostart:
|
||||
description:
|
||||
@@ -57,6 +60,7 @@ options:
|
||||
description:
|
||||
- Any extra parameters required by the app
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
port_open:
|
||||
description:
|
||||
@@ -68,15 +72,18 @@ options:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
machine:
|
||||
description:
|
||||
- The machine name to use (optional for accounts with only one machine)
|
||||
type: str
|
||||
|
||||
'''
|
||||
|
||||
|
||||
@@ -32,36 +32,43 @@ options:
|
||||
description:
|
||||
- The name of the database
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the database should exist
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
type: str
|
||||
|
||||
type:
|
||||
description:
|
||||
- The type of database to create.
|
||||
required: true
|
||||
choices: ['mysql', 'postgresql']
|
||||
type: str
|
||||
|
||||
password:
|
||||
description:
|
||||
- The password for the new database user.
|
||||
type: str
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
machine:
|
||||
description:
|
||||
- The machine name to use (optional for accounts with only one machine)
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -32,27 +32,33 @@ options:
|
||||
description:
|
||||
- The name of the domain
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the domain should exist
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
type: str
|
||||
|
||||
subdomains:
|
||||
description:
|
||||
- Any subdomains to create.
|
||||
default: []
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -87,8 +93,8 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
subdomains=dict(required=False, default=[], type='list'),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
subdomains=dict(default=[], type='list', elements='str'),
|
||||
login_name=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
),
|
||||
|
||||
@@ -29,27 +29,32 @@ options:
|
||||
description:
|
||||
- The name of the mailbox
|
||||
required: true
|
||||
type: str
|
||||
|
||||
mailbox_password:
|
||||
description:
|
||||
- The password for the mailbox
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the mailbox should exist
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
type: str
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -33,17 +33,20 @@ options:
|
||||
description:
|
||||
- The name of the website
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- Whether the website should exist
|
||||
choices: ['present', 'absent']
|
||||
default: "present"
|
||||
type: str
|
||||
|
||||
host:
|
||||
description:
|
||||
- The webfaction host on which the site should be created.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
https:
|
||||
description:
|
||||
@@ -55,21 +58,27 @@ options:
|
||||
description:
|
||||
- A mapping of URLs to apps
|
||||
default: []
|
||||
type: list
|
||||
elements: list
|
||||
|
||||
subdomains:
|
||||
description:
|
||||
- A list of subdomains associated with this site.
|
||||
default: []
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
login_name:
|
||||
description:
|
||||
- The webfaction account to use
|
||||
required: true
|
||||
type: str
|
||||
|
||||
login_password:
|
||||
description:
|
||||
- The webfaction password to use
|
||||
required: true
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -101,12 +110,12 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
# You can specify an IP address or hostname.
|
||||
host=dict(required=True),
|
||||
https=dict(required=False, type='bool', default=False),
|
||||
subdomains=dict(required=False, type='list', default=[]),
|
||||
site_apps=dict(required=False, type='list', default=[]),
|
||||
subdomains=dict(type='list', elements='str', default=[]),
|
||||
site_apps=dict(type='list', elements='list', default=[]),
|
||||
login_name=dict(required=True),
|
||||
login_password=dict(required=True, no_log=True),
|
||||
),
|
||||
|
||||
@@ -1839,7 +1839,7 @@ def main():
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
key=dict(type='str', required=True),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='raw', required=True),
|
||||
),
|
||||
),
|
||||
|
||||
@@ -33,6 +33,7 @@ requirements:
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- register or deregister the consul service, defaults to present
|
||||
default: present
|
||||
@@ -86,6 +87,7 @@ options:
|
||||
documentation for further details.
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- tags that will be attached to the service registration.
|
||||
script:
|
||||
@@ -345,7 +347,7 @@ def remove_service(module, service_id):
|
||||
module.exit_json(changed=False, id=service_id)
|
||||
|
||||
|
||||
def get_consul_api(module, token=None):
|
||||
def get_consul_api(module):
|
||||
consulClient = consul.Consul(host=module.params.get('host'),
|
||||
port=module.params.get('port'),
|
||||
scheme=module.params.get('scheme'),
|
||||
@@ -398,7 +400,7 @@ def parse_service(module):
|
||||
module.fail_json(msg="service_name is required to configure a service.")
|
||||
|
||||
|
||||
class ConsulService():
|
||||
class ConsulService(object):
|
||||
|
||||
def __init__(self, service_id=None, name=None, address=None, port=-1,
|
||||
tags=None, loaded=None):
|
||||
@@ -564,26 +566,26 @@ def main():
|
||||
argument_spec=dict(
|
||||
host=dict(default='localhost'),
|
||||
port=dict(default=8500, type='int'),
|
||||
scheme=dict(required=False, default='http'),
|
||||
validate_certs=dict(required=False, default=True, type='bool'),
|
||||
check_id=dict(required=False),
|
||||
check_name=dict(required=False),
|
||||
check_node=dict(required=False),
|
||||
check_host=dict(required=False),
|
||||
notes=dict(required=False),
|
||||
script=dict(required=False),
|
||||
service_id=dict(required=False),
|
||||
service_name=dict(required=False),
|
||||
service_address=dict(required=False, type='str', default=None),
|
||||
service_port=dict(required=False, type='int', default=None),
|
||||
scheme=dict(default='http'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
check_id=dict(),
|
||||
check_name=dict(),
|
||||
check_node=dict(),
|
||||
check_host=dict(),
|
||||
notes=dict(),
|
||||
script=dict(),
|
||||
service_id=dict(),
|
||||
service_name=dict(),
|
||||
service_address=dict(type='str'),
|
||||
service_port=dict(type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
interval=dict(required=False, type='str'),
|
||||
ttl=dict(required=False, type='str'),
|
||||
tcp=dict(required=False, type='str'),
|
||||
http=dict(required=False, type='str'),
|
||||
timeout=dict(required=False, type='str'),
|
||||
tags=dict(required=False, type='list'),
|
||||
token=dict(required=False, no_log=True)
|
||||
interval=dict(type='str'),
|
||||
ttl=dict(type='str'),
|
||||
tcp=dict(type='str'),
|
||||
http=dict(type='str'),
|
||||
timeout=dict(type='str'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
token=dict(no_log=True)
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
@@ -22,29 +22,35 @@ options:
|
||||
description:
|
||||
- a management token is required to manipulate the acl lists
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- whether the ACL pair should be present or absent
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
token_type:
|
||||
description:
|
||||
- the type of token that should be created
|
||||
choices: ['client', 'management']
|
||||
default: client
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- the name that should be associated with the acl key, this is opaque
|
||||
to Consul
|
||||
required: false
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- the token key identifying an ACL rule set. If generated by consul
|
||||
this will be a UUID
|
||||
required: false
|
||||
type: str
|
||||
rules:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- rules that should be associated with a given token
|
||||
required: false
|
||||
@@ -53,6 +59,7 @@ options:
|
||||
- host of the consul agent defaults to localhost
|
||||
required: false
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
@@ -64,6 +71,7 @@ options:
|
||||
- the protocol scheme on which the consul agent is running
|
||||
required: false
|
||||
default: http
|
||||
type: str
|
||||
validate_certs:
|
||||
type: bool
|
||||
description:
|
||||
@@ -215,14 +223,14 @@ _POLICY_HCL_PROPERTY = "policy"
|
||||
_ARGUMENT_SPEC = {
|
||||
MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
|
||||
HOST_PARAMETER_NAME: dict(default='localhost'),
|
||||
SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
|
||||
VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
|
||||
NAME_PARAMETER_NAME: dict(required=False),
|
||||
SCHEME_PARAMETER_NAME: dict(default='http'),
|
||||
VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
|
||||
NAME_PARAMETER_NAME: dict(),
|
||||
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
|
||||
RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
|
||||
RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
|
||||
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
|
||||
TOKEN_PARAMETER_NAME: dict(required=False),
|
||||
TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
|
||||
TOKEN_PARAMETER_NAME: dict(no_log=False),
|
||||
TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
|
||||
default=CLIENT_TOKEN_TYPE_VALUE)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
'release' respectively. a valid session must be supplied to make the
|
||||
attempt changed will be true if the attempt is successful, false
|
||||
otherwise.
|
||||
type: str
|
||||
choices: [ absent, acquire, present, release ]
|
||||
default: present
|
||||
key:
|
||||
@@ -296,7 +297,7 @@ def main():
|
||||
argument_spec=dict(
|
||||
cas=dict(type='str'),
|
||||
flags=dict(type='str'),
|
||||
key=dict(type='str', required=True),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
host=dict(type='str', default='localhost'),
|
||||
scheme=dict(type='str', default='http'),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
|
||||
@@ -67,6 +67,7 @@ options:
|
||||
associated with the session will be release and can be acquired once
|
||||
the associated lock delay has expired.
|
||||
type: list
|
||||
elements: str
|
||||
host:
|
||||
description:
|
||||
- The host of the consul agent defaults to localhost.
|
||||
@@ -237,7 +238,7 @@ def test_dependencies(module):
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
checks=dict(type='list'),
|
||||
checks=dict(type='list', elements='str'),
|
||||
delay=dict(type='int', default='15'),
|
||||
behavior=dict(type='str', default='release', choices=['release', 'delete']),
|
||||
host=dict(type='str', default='localhost'),
|
||||
|
||||
@@ -18,47 +18,58 @@ description:
|
||||
- Needs python etcd3 lib to work
|
||||
options:
|
||||
key:
|
||||
type: str
|
||||
description:
|
||||
- the key where the information is stored in the cluster
|
||||
required: true
|
||||
value:
|
||||
type: str
|
||||
description:
|
||||
- the information stored
|
||||
required: true
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- the IP address of the cluster
|
||||
default: 'localhost'
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- the port number used to connect to the cluster
|
||||
default: 2379
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- the state of the value for the key.
|
||||
- can be present or absent
|
||||
required: true
|
||||
choices: [ present, absent ]
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- The etcd user to authenticate with.
|
||||
password:
|
||||
type: str
|
||||
description:
|
||||
- The password to use for authentication.
|
||||
- Required if I(user) is defined.
|
||||
ca_cert:
|
||||
type: path
|
||||
description:
|
||||
- The Certificate Authority to use to verify the etcd host.
|
||||
- Required if I(client_cert) and I(client_key) are defined.
|
||||
client_cert:
|
||||
type: path
|
||||
description:
|
||||
- PEM formatted certificate chain file to be used for SSL client authentication.
|
||||
- Required if I(client_key) is defined.
|
||||
client_key:
|
||||
type: path
|
||||
description:
|
||||
- PEM formatted file that contains your private key to be used for SSL client authentication.
|
||||
- Required if I(client_cert) is defined.
|
||||
timeout:
|
||||
type: int
|
||||
description:
|
||||
- The socket level timeout in seconds.
|
||||
author:
|
||||
@@ -123,7 +134,7 @@ def run_module():
|
||||
# define the available arguments/parameters that a user can pass to
|
||||
# the module
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='str', required=True),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=2379),
|
||||
|
||||
@@ -17,25 +17,31 @@ options:
|
||||
description:
|
||||
- A list of ZooKeeper servers (format '[server]:[port]').
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The path of the znode.
|
||||
required: true
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- The value assigned to the znode.
|
||||
type: str
|
||||
op:
|
||||
description:
|
||||
- An operation to perform. Mutually exclusive with state.
|
||||
choices: [ get, wait, list ]
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state to enforce. Mutually exclusive with op.
|
||||
choices: [ present, absent ]
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- The amount of time to wait for a node to appear.
|
||||
default: 300
|
||||
type: int
|
||||
recursive:
|
||||
description:
|
||||
- Recursively delete node and all its children.
|
||||
@@ -110,11 +116,11 @@ def main():
|
||||
argument_spec=dict(
|
||||
hosts=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
value=dict(required=False, default=None, type='str'),
|
||||
op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
|
||||
value=dict(type='str'),
|
||||
op=dict(choices=['get', 'wait', 'list']),
|
||||
state=dict(choices=['present', 'absent']),
|
||||
timeout=dict(required=False, default=300, type='int'),
|
||||
recursive=dict(required=False, default=False, type='bool')
|
||||
timeout=dict(default=300, type='int'),
|
||||
recursive=dict(default=False, type='bool')
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
@@ -190,9 +190,9 @@ def run_module():
|
||||
min_cluster_size=dict(type='int', required=False, default=1),
|
||||
target_cluster_size=dict(type='int', required=False, default=None),
|
||||
fail_on_cluster_change=dict(type='bool', required=False, default=True),
|
||||
migrate_tx_key=dict(type='str', required=False,
|
||||
migrate_tx_key=dict(type='str', required=False, no_log=False,
|
||||
default="migrate_tx_partitions_remaining"),
|
||||
migrate_rx_key=dict(type='str', required=False,
|
||||
migrate_rx_key=dict(type='str', required=False, no_log=False,
|
||||
default="migrate_rx_partitions_remaining")
|
||||
)
|
||||
|
||||
|
||||
@@ -58,7 +58,13 @@ options:
|
||||
description:
|
||||
- Delete and re-install the plugin. Can be useful for plugins update.
|
||||
type: bool
|
||||
default: 'no'
|
||||
default: false
|
||||
allow_root:
|
||||
description:
|
||||
- Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.3.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -152,7 +158,7 @@ def parse_error(string):
|
||||
return string
|
||||
|
||||
|
||||
def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
|
||||
def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'):
|
||||
if LooseVersion(kibana_version) > LooseVersion('4.6'):
|
||||
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
|
||||
cmd_args = [kibana_plugin_bin, "install"]
|
||||
@@ -169,6 +175,9 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version
|
||||
if timeout:
|
||||
cmd_args.append("--timeout %s" % timeout)
|
||||
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
@@ -182,13 +191,16 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
|
||||
def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'):
|
||||
if LooseVersion(kibana_version) > LooseVersion('4.6'):
|
||||
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
|
||||
cmd_args = [kibana_plugin_bin, "remove", plugin_name]
|
||||
else:
|
||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
|
||||
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
@@ -202,8 +214,12 @@ def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def get_kibana_version(module, plugin_bin):
|
||||
def get_kibana_version(module, plugin_bin, allow_root):
|
||||
cmd_args = [plugin_bin, '--version']
|
||||
|
||||
if allow_root:
|
||||
cmd_args.append('--allow-root')
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
@@ -222,7 +238,8 @@ def main():
|
||||
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
|
||||
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
|
||||
version=dict(default=None),
|
||||
force=dict(default="no", type="bool")
|
||||
force=dict(default=False, type="bool"),
|
||||
allow_root=dict(default=False, type="bool"),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
@@ -235,10 +252,11 @@ def main():
|
||||
plugin_dir = module.params["plugin_dir"]
|
||||
version = module.params["version"]
|
||||
force = module.params["force"]
|
||||
allow_root = module.params["allow_root"]
|
||||
|
||||
changed, cmd, out, err = False, '', '', ''
|
||||
|
||||
kibana_version = get_kibana_version(module, plugin_bin)
|
||||
kibana_version = get_kibana_version(module, plugin_bin, allow_root)
|
||||
|
||||
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
|
||||
|
||||
@@ -252,10 +270,10 @@ def main():
|
||||
if state == "present":
|
||||
if force:
|
||||
remove_plugin(module, plugin_bin, name)
|
||||
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
|
||||
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version)
|
||||
|
||||
elif state == "absent":
|
||||
changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
|
||||
changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version)
|
||||
|
||||
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ options:
|
||||
description:
|
||||
- The file name of the destination archive. The parent directory must exists on the remote host.
|
||||
- This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
|
||||
- If the destination archive already exists, it will be truncated and overwritten.
|
||||
type: path
|
||||
exclude_path:
|
||||
description:
|
||||
@@ -44,8 +45,9 @@ options:
|
||||
elements: path
|
||||
force_archive:
|
||||
description:
|
||||
- Allow you to force the module to treat this as an archive even if only a single file is specified.
|
||||
- By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
|
||||
- Allows you to force the module to treat this as an archive even if only a single file is specified.
|
||||
- By default when a single file is specified it is compressed only (not archived).
|
||||
- Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module.
|
||||
type: bool
|
||||
default: false
|
||||
remove:
|
||||
@@ -153,7 +155,6 @@ expanded_exclude_paths:
|
||||
'''
|
||||
|
||||
import bz2
|
||||
import filecmp
|
||||
import glob
|
||||
import gzip
|
||||
import io
|
||||
@@ -186,6 +187,33 @@ else:
|
||||
HAS_LZMA = False
|
||||
|
||||
|
||||
def to_b(s):
|
||||
return to_bytes(s, errors='surrogate_or_strict')
|
||||
|
||||
|
||||
def to_n(s):
|
||||
return to_native(s, errors='surrogate_or_strict')
|
||||
|
||||
|
||||
def to_na(s):
|
||||
return to_native(s, errors='surrogate_or_strict', encoding='ascii')
|
||||
|
||||
|
||||
def expand_paths(paths):
|
||||
expanded_path = []
|
||||
is_globby = False
|
||||
for path in paths:
|
||||
b_path = to_b(path)
|
||||
if b'*' in b_path or b'?' in b_path:
|
||||
e_paths = glob.glob(b_path)
|
||||
is_globby = True
|
||||
|
||||
else:
|
||||
e_paths = [b_path]
|
||||
expanded_path.extend(e_paths)
|
||||
return expanded_path, is_globby
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
@@ -204,21 +232,17 @@ def main():
|
||||
check_mode = module.check_mode
|
||||
paths = params['path']
|
||||
dest = params['dest']
|
||||
b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
|
||||
b_dest = None if not dest else to_b(dest)
|
||||
exclude_paths = params['exclude_path']
|
||||
remove = params['remove']
|
||||
|
||||
b_expanded_paths = []
|
||||
b_expanded_exclude_paths = []
|
||||
fmt = params['format']
|
||||
b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
|
||||
b_fmt = to_b(fmt)
|
||||
force_archive = params['force_archive']
|
||||
globby = False
|
||||
changed = False
|
||||
state = 'absent'
|
||||
|
||||
# Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
|
||||
archive = False
|
||||
b_successes = []
|
||||
|
||||
# Fail early
|
||||
@@ -227,35 +251,7 @@ def main():
|
||||
exception=LZMA_IMP_ERR)
|
||||
module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
|
||||
|
||||
for path in paths:
|
||||
b_path = to_bytes(path, errors='surrogate_or_strict')
|
||||
|
||||
# Expand any glob characters. If found, add the expanded glob to the
|
||||
# list of expanded_paths, which might be empty.
|
||||
if (b'*' in b_path or b'?' in b_path):
|
||||
b_expanded_paths.extend(glob.glob(b_path))
|
||||
globby = True
|
||||
|
||||
# If there are no glob characters the path is added to the expanded paths
|
||||
# whether the path exists or not
|
||||
else:
|
||||
b_expanded_paths.append(b_path)
|
||||
|
||||
# Only attempt to expand the exclude paths if it exists
|
||||
if exclude_paths:
|
||||
for exclude_path in exclude_paths:
|
||||
b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict')
|
||||
|
||||
# Expand any glob characters. If found, add the expanded glob to the
|
||||
# list of expanded_paths, which might be empty.
|
||||
if (b'*' in b_exclude_path or b'?' in b_exclude_path):
|
||||
b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
|
||||
|
||||
# If there are no glob character the exclude path is added to the expanded
|
||||
# exclude paths whether the path exists or not.
|
||||
else:
|
||||
b_expanded_exclude_paths.append(b_exclude_path)
|
||||
|
||||
b_expanded_paths, globby = expand_paths(paths)
|
||||
if not b_expanded_paths:
|
||||
return module.fail_json(
|
||||
path=', '.join(paths),
|
||||
@@ -263,6 +259,9 @@ def main():
|
||||
msg='Error, no source paths were found'
|
||||
)
|
||||
|
||||
# Only attempt to expand the exclude paths if it exists
|
||||
b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else []
|
||||
|
||||
# Only try to determine if we are working with an archive or not if we haven't set archive to true
|
||||
if not force_archive:
|
||||
# If we actually matched multiple files or TRIED to, then
|
||||
@@ -280,7 +279,7 @@ def main():
|
||||
if archive and not b_dest:
|
||||
module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
|
||||
|
||||
b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
|
||||
b_sep = to_b(os.sep)
|
||||
|
||||
b_archive_paths = []
|
||||
b_missing = []
|
||||
@@ -321,7 +320,7 @@ def main():
|
||||
# No source files were found but the named archive exists: are we 'compress' or 'archive' now?
|
||||
if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
|
||||
# Just check the filename to know if it's an archive or simple compressed file
|
||||
if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
|
||||
if re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(b_dest), re.IGNORECASE):
|
||||
state = 'archive'
|
||||
else:
|
||||
state = 'compress'
|
||||
@@ -352,7 +351,7 @@ def main():
|
||||
# Slightly more difficult (and less efficient!) compression using zipfile module
|
||||
if fmt == 'zip':
|
||||
arcfile = zipfile.ZipFile(
|
||||
to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
|
||||
to_na(b_dest),
|
||||
'w',
|
||||
zipfile.ZIP_DEFLATED,
|
||||
True
|
||||
@@ -360,7 +359,7 @@ def main():
|
||||
|
||||
# Easier compression using tarfile module
|
||||
elif fmt == 'gz' or fmt == 'bz2':
|
||||
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
|
||||
arcfile = tarfile.open(to_na(b_dest), 'w|' + fmt)
|
||||
|
||||
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
|
||||
# in memory and then compress it with lzma.
|
||||
@@ -370,7 +369,7 @@ def main():
|
||||
|
||||
# Or plain tar archiving
|
||||
elif fmt == 'tar':
|
||||
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
|
||||
arcfile = tarfile.open(to_na(b_dest), 'w')
|
||||
|
||||
b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
|
||||
for b_path in b_archive_paths:
|
||||
@@ -382,7 +381,7 @@ def main():
|
||||
|
||||
for b_dirname in b_dirnames:
|
||||
b_fullpath = b_dirpath + b_dirname
|
||||
n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
|
||||
n_fullpath = to_na(b_fullpath)
|
||||
n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
|
||||
|
||||
try:
|
||||
@@ -396,8 +395,8 @@ def main():
|
||||
|
||||
for b_filename in b_filenames:
|
||||
b_fullpath = b_dirpath + b_filename
|
||||
n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
|
||||
n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
|
||||
n_fullpath = to_na(b_fullpath)
|
||||
n_arcname = to_n(b_match_root.sub(b'', b_fullpath))
|
||||
|
||||
try:
|
||||
if fmt == 'zip':
|
||||
@@ -409,8 +408,8 @@ def main():
|
||||
except Exception as e:
|
||||
errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
|
||||
else:
|
||||
path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
|
||||
arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
|
||||
path = to_na(b_path)
|
||||
arcname = to_n(b_match_root.sub(b'', b_path))
|
||||
if fmt == 'zip':
|
||||
arcfile.write(path, arcname)
|
||||
else:
|
||||
@@ -444,14 +443,14 @@ def main():
|
||||
shutil.rmtree(b_path)
|
||||
elif not check_mode:
|
||||
os.remove(b_path)
|
||||
except OSError as e:
|
||||
except OSError:
|
||||
errors.append(to_native(b_path))
|
||||
|
||||
for b_path in b_expanded_paths:
|
||||
try:
|
||||
if os.path.isdir(b_path):
|
||||
shutil.rmtree(b_path)
|
||||
except OSError as e:
|
||||
except OSError:
|
||||
errors.append(to_native(b_path))
|
||||
|
||||
if errors:
|
||||
@@ -490,25 +489,25 @@ def main():
|
||||
try:
|
||||
if fmt == 'zip':
|
||||
arcfile = zipfile.ZipFile(
|
||||
to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
|
||||
to_na(b_dest),
|
||||
'w',
|
||||
zipfile.ZIP_DEFLATED,
|
||||
True
|
||||
)
|
||||
arcfile.write(
|
||||
to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
|
||||
to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
|
||||
to_na(b_path),
|
||||
to_n(b_path[len(b_arcroot):])
|
||||
)
|
||||
arcfile.close()
|
||||
state = 'archive' # because all zip files are archives
|
||||
elif fmt == 'tar':
|
||||
arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
|
||||
arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
|
||||
arcfile = tarfile.open(to_na(b_dest), 'w')
|
||||
arcfile.add(to_na(b_path))
|
||||
arcfile.close()
|
||||
else:
|
||||
f_in = open(b_path, 'rb')
|
||||
|
||||
n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
|
||||
n_dest = to_na(b_dest)
|
||||
if fmt == 'gz':
|
||||
f_out = gzip.open(n_dest, 'wb')
|
||||
elif fmt == 'bz2':
|
||||
@@ -564,14 +563,14 @@ def main():
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
|
||||
module.exit_json(
|
||||
archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
|
||||
archived=[to_n(p) for p in b_successes],
|
||||
dest=dest,
|
||||
changed=changed,
|
||||
state=state,
|
||||
arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
|
||||
missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
|
||||
expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
|
||||
expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
|
||||
arcroot=to_n(b_arcroot),
|
||||
missing=[to_n(p) for p in b_missing],
|
||||
expanded_paths=[to_n(p) for p in b_expanded_paths],
|
||||
expanded_exclude_paths=[to_n(p) for p in b_expanded_exclude_paths],
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -114,9 +114,7 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
def match_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
|
||||
or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
|
||||
or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
|
||||
return re.match('[#;]?( |\t)*%s( |\t)*(=|$)' % option, line)
|
||||
|
||||
|
||||
def match_active_opt(option, line):
|
||||
@@ -251,9 +249,9 @@ def do_ini(module, filename, section=None, option=None, value=None,
|
||||
if not within_section and state == 'present':
|
||||
ini_lines.append('[%s]\n' % section)
|
||||
msg = 'section and option added'
|
||||
if option and value:
|
||||
if option and value is not None:
|
||||
ini_lines.append(assignment_format % (option, value))
|
||||
elif option and not value and allow_no_value:
|
||||
elif option and value is None and allow_no_value:
|
||||
ini_lines.append('%s\n' % option)
|
||||
else:
|
||||
msg = 'only section added'
|
||||
@@ -312,7 +310,7 @@ def main():
|
||||
allow_no_value = module.params['allow_no_value']
|
||||
create = module.params['create']
|
||||
|
||||
if state == 'present' and not allow_no_value and not value:
|
||||
if state == 'present' and not allow_no_value and value is None:
|
||||
module.fail_json("Parameter 'value' must not be empty if state=present and allow_no_value=False")
|
||||
|
||||
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
|
||||
|
||||
@@ -137,26 +137,12 @@ list:
|
||||
gid: 500
|
||||
'''
|
||||
|
||||
import csv
|
||||
from io import BytesIO, StringIO
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# Add Unix dialect from Python 3
|
||||
class unix_dialect(csv.Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = csv.QUOTE_ALL
|
||||
|
||||
|
||||
csv.register_dialect("unix", unix_dialect)
|
||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||
DialectNotAvailableError,
|
||||
CustomDialectFailureError)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -164,7 +150,7 @@ def main():
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True, aliases=['filename']),
|
||||
dialect=dict(type='str', default='excel'),
|
||||
key=dict(type='str'),
|
||||
key=dict(type='str', no_log=False),
|
||||
fieldnames=dict(type='list', elements='str'),
|
||||
unique=dict(type='bool', default=True),
|
||||
delimiter=dict(type='str'),
|
||||
@@ -180,38 +166,24 @@ def main():
|
||||
fieldnames = module.params['fieldnames']
|
||||
unique = module.params['unique']
|
||||
|
||||
if dialect not in csv.list_dialects():
|
||||
module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect)
|
||||
dialect_params = {
|
||||
"delimiter": module.params['delimiter'],
|
||||
"skipinitialspace": module.params['skipinitialspace'],
|
||||
"strict": module.params['strict'],
|
||||
}
|
||||
|
||||
dialect_options = dict(
|
||||
delimiter=module.params['delimiter'],
|
||||
skipinitialspace=module.params['skipinitialspace'],
|
||||
strict=module.params['strict'],
|
||||
)
|
||||
|
||||
# Create a dictionary from only set options
|
||||
dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None)
|
||||
if dialect_params:
|
||||
try:
|
||||
csv.register_dialect('custom', dialect, **dialect_params)
|
||||
except TypeError as e:
|
||||
module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e))
|
||||
dialect = 'custom'
|
||||
try:
|
||||
dialect = initialize_dialect(dialect, **dialect_params)
|
||||
except (CustomDialectFailureError, DialectNotAvailableError) as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
try:
|
||||
with open(path, 'rb') as f:
|
||||
data = f.read()
|
||||
except (IOError, OSError) as e:
|
||||
module.fail_json(msg="Unable to open file: %s" % to_text(e))
|
||||
module.fail_json(msg="Unable to open file: %s" % to_native(e))
|
||||
|
||||
if PY3:
|
||||
# Manually decode on Python3 so that we can use the surrogateescape error handler
|
||||
data = to_text(data, errors='surrogate_or_strict')
|
||||
fake_fh = StringIO(data)
|
||||
else:
|
||||
fake_fh = BytesIO(data)
|
||||
|
||||
reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
|
||||
reader = read_csv(data, dialect, fieldnames)
|
||||
|
||||
if key and key not in reader.fieldnames:
|
||||
module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
|
||||
@@ -223,16 +195,16 @@ def main():
|
||||
try:
|
||||
for row in reader:
|
||||
data_list.append(row)
|
||||
except csv.Error as e:
|
||||
module.fail_json(msg="Unable to process file: %s" % to_text(e))
|
||||
except CSVError as e:
|
||||
module.fail_json(msg="Unable to process file: %s" % to_native(e))
|
||||
else:
|
||||
try:
|
||||
for row in reader:
|
||||
if unique and row[key] in data_dict:
|
||||
module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
|
||||
data_dict[row[key]] = row
|
||||
except csv.Error as e:
|
||||
module.fail_json(msg="Unable to process file: %s" % to_text(e))
|
||||
except CSVError as e:
|
||||
module.fail_json(msg="Unable to process file: %s" % to_native(e))
|
||||
|
||||
module.exit_json(dict=data_dict, list=data_list)
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ def main():
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True, aliases=['name']),
|
||||
namespace=dict(type='str', default='user'),
|
||||
key=dict(type='str'),
|
||||
key=dict(type='str', no_log=False),
|
||||
value=dict(type='str'),
|
||||
state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
|
||||
follow=dict(type='bool', default=True),
|
||||
|
||||
1
plugins/modules/gandi_livedns.py
Symbolic link
1
plugins/modules/gandi_livedns.py
Symbolic link
@@ -0,0 +1 @@
|
||||
net_tools/gandi_livedns.py
|
||||
1
plugins/modules/github_repo.py
Symbolic link
1
plugins/modules/github_repo.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./source_control/github/github_repo.py
|
||||
1
plugins/modules/gitlab_project_members.py
Symbolic link
1
plugins/modules/gitlab_project_members.py
Symbolic link
@@ -0,0 +1 @@
|
||||
source_control/gitlab/gitlab_project_members.py
|
||||
@@ -68,6 +68,12 @@ options:
|
||||
- Option C(hostcategory) must be omitted to assign host groups.
|
||||
type: list
|
||||
elements: str
|
||||
runasextusers:
|
||||
description:
|
||||
- List of external RunAs users
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 2.3.0
|
||||
runasusercategory:
|
||||
description:
|
||||
- RunAs User category the rule applies to.
|
||||
@@ -143,13 +149,15 @@ EXAMPLES = r'''
|
||||
ipa_user: admin
|
||||
ipa_pass: topsecret
|
||||
|
||||
- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host.
|
||||
- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root.
|
||||
community.general.ipa_sudorule:
|
||||
name: sudo_operations_all
|
||||
description: Allow operators to run any commands that is part of operations-cmdgroup on any host.
|
||||
description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root.
|
||||
cmdgroup:
|
||||
- operations-cmdgroup
|
||||
hostcategory: all
|
||||
runasextusers:
|
||||
- root
|
||||
sudoopt:
|
||||
- '!authenticate'
|
||||
usergroup:
|
||||
@@ -183,6 +191,12 @@ class SudoRuleIPAClient(IPAClient):
|
||||
def sudorule_add(self, name, item):
|
||||
return self._post_json(method='sudorule_add', name=name, item=item)
|
||||
|
||||
def sudorule_add_runasuser(self, name, item):
|
||||
return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item})
|
||||
|
||||
def sudorule_remove_runasuser(self, name, item):
|
||||
return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item})
|
||||
|
||||
def sudorule_mod(self, name, item):
|
||||
return self._post_json(method='sudorule_mod', name=name, item=item)
|
||||
|
||||
@@ -287,6 +301,7 @@ def ensure(module, client):
|
||||
hostgroup = module.params['hostgroup']
|
||||
runasusercategory = module.params['runasusercategory']
|
||||
runasgroupcategory = module.params['runasgroupcategory']
|
||||
runasextusers = module.params['runasextusers']
|
||||
|
||||
if state in ['present', 'enabled']:
|
||||
ipaenabledflag = 'TRUE'
|
||||
@@ -371,6 +386,21 @@ def ensure(module, client):
|
||||
for item in diff:
|
||||
client.sudorule_add_option_ipasudoopt(name, item)
|
||||
|
||||
if runasextusers is not None:
|
||||
ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', [])
|
||||
diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers))
|
||||
if len(diff) > 0:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
for item in diff:
|
||||
client.sudorule_remove_runasuser(name=name, item=item)
|
||||
diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user))
|
||||
if len(diff) > 0:
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
for item in diff:
|
||||
client.sudorule_add_runasuser(name=name, item=item)
|
||||
|
||||
if user is not None:
|
||||
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
|
||||
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
|
||||
@@ -406,8 +436,8 @@ def main():
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
|
||||
user=dict(type='list', elements='str'),
|
||||
usercategory=dict(type='str', choices=['all']),
|
||||
usergroup=dict(type='list', elements='str'))
|
||||
|
||||
usergroup=dict(type='list', elements='str'),
|
||||
runasextusers=dict(type='list', elements='str'))
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
mutually_exclusive=[['cmdcategory', 'cmd'],
|
||||
['cmdcategory', 'cmdgroup'],
|
||||
|
||||
1
plugins/modules/jenkins_build.py
Symbolic link
1
plugins/modules/jenkins_build.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./web_infrastructure/jenkins_build.py
|
||||
@@ -183,7 +183,7 @@ def main():
|
||||
|
||||
request_url = url + '/data/events/deployments/start'
|
||||
else:
|
||||
message = module.params['message']
|
||||
message = module.params['deployment_message']
|
||||
if message is not None:
|
||||
body['errorMessage'] = message
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ options:
|
||||
- If not specified, it defaults to the remote system's hostname.
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description: ["Comma separated list of tags to apply to the event."]
|
||||
alert_type:
|
||||
type: str
|
||||
@@ -114,17 +115,12 @@ def main():
|
||||
app_key=dict(required=True, no_log=True),
|
||||
title=dict(required=True),
|
||||
text=dict(required=True),
|
||||
date_happened=dict(required=False, default=None, type='int'),
|
||||
priority=dict(
|
||||
required=False, default='normal', choices=['normal', 'low']
|
||||
),
|
||||
host=dict(required=False, default=None),
|
||||
tags=dict(required=False, default=None, type='list'),
|
||||
alert_type=dict(
|
||||
required=False, default='info',
|
||||
choices=['error', 'warning', 'info', 'success']
|
||||
),
|
||||
aggregation_key=dict(required=False, default=None),
|
||||
date_happened=dict(type='int'),
|
||||
priority=dict(default='normal', choices=['normal', 'low']),
|
||||
host=dict(),
|
||||
tags=dict(type='list', elements='str'),
|
||||
alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']),
|
||||
aggregation_key=dict(no_log=False),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -46,6 +46,7 @@ options:
|
||||
- A list of tags to associate with your monitor when creating or updating.
|
||||
- This can help you categorize and filter monitors.
|
||||
type: list
|
||||
elements: str
|
||||
type:
|
||||
description:
|
||||
- The type of the monitor.
|
||||
@@ -206,31 +207,30 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(required=True, no_log=True),
|
||||
api_host=dict(required=False),
|
||||
api_host=dict(),
|
||||
app_key=dict(required=True, no_log=True),
|
||||
state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
|
||||
type=dict(required=False, choices=['metric alert', 'service check', 'event alert',
|
||||
'process alert', 'log alert', 'query alert',
|
||||
'trace-analytics alert', 'rum alert']),
|
||||
type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert',
|
||||
'log alert', 'query alert', 'trace-analytics alert', 'rum alert']),
|
||||
name=dict(required=True),
|
||||
query=dict(required=False),
|
||||
notification_message=dict(required=False, no_log=True, default=None, aliases=['message'],
|
||||
query=dict(),
|
||||
notification_message=dict(no_log=True, aliases=['message'],
|
||||
deprecated_aliases=[dict(name='message', version='3.0.0',
|
||||
collection_name='community.general')]), # was Ansible 2.14
|
||||
silenced=dict(required=False, default=None, type='dict'),
|
||||
notify_no_data=dict(required=False, default=False, type='bool'),
|
||||
no_data_timeframe=dict(required=False, default=None),
|
||||
timeout_h=dict(required=False, default=None),
|
||||
renotify_interval=dict(required=False, default=None),
|
||||
escalation_message=dict(required=False, default=None),
|
||||
notify_audit=dict(required=False, default=False, type='bool'),
|
||||
thresholds=dict(required=False, type='dict', default=None),
|
||||
tags=dict(required=False, type='list', default=None),
|
||||
locked=dict(required=False, default=False, type='bool'),
|
||||
require_full_window=dict(required=False, default=None, type='bool'),
|
||||
new_host_delay=dict(required=False, default=None),
|
||||
evaluation_delay=dict(required=False, default=None),
|
||||
id=dict(required=False),
|
||||
silenced=dict(type='dict'),
|
||||
notify_no_data=dict(default=False, type='bool'),
|
||||
no_data_timeframe=dict(),
|
||||
timeout_h=dict(),
|
||||
renotify_interval=dict(),
|
||||
escalation_message=dict(),
|
||||
notify_audit=dict(default=False, type='bool'),
|
||||
thresholds=dict(type='dict', default=None),
|
||||
tags=dict(type='list', elements='str', default=None),
|
||||
locked=dict(default=False, type='bool'),
|
||||
require_full_window=dict(type='bool'),
|
||||
new_host_delay=dict(),
|
||||
evaluation_delay=dict(),
|
||||
id=dict(),
|
||||
include_tags=dict(required=False, default=True, type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -75,6 +75,7 @@ options:
|
||||
description:
|
||||
- Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
|
||||
required: true
|
||||
aliases: [host]
|
||||
zone:
|
||||
type: str
|
||||
description:
|
||||
|
||||
@@ -205,7 +205,7 @@ def main():
|
||||
client=dict(required=False, default=None),
|
||||
client_url=dict(required=False, default=None),
|
||||
desc=dict(required=False, default='Created via Ansible'),
|
||||
incident_key=dict(required=False, default=None)
|
||||
incident_key=dict(required=False, default=None, no_log=False)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -49,11 +49,13 @@ options:
|
||||
- Path to the sensu check to run (not required when I(state=absent))
|
||||
handlers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of handlers to notify when the check fails
|
||||
default: []
|
||||
subscribers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of subscribers/channels this check should run for
|
||||
- See sensu_subscribers to subscribe a machine to a channel
|
||||
@@ -86,9 +88,9 @@ options:
|
||||
- When to enable handling of check failures
|
||||
dependencies:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- Other checks this check depends on, if dependencies fail,
|
||||
- handling of this check will be disabled
|
||||
- Other checks this check depends on, if dependencies fail handling of this check will be disabled
|
||||
default: []
|
||||
metric:
|
||||
description:
|
||||
@@ -327,15 +329,15 @@ def main():
|
||||
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
|
||||
'backup': {'type': 'bool', 'default': 'no'},
|
||||
'command': {'type': 'str'},
|
||||
'handlers': {'type': 'list'},
|
||||
'subscribers': {'type': 'list'},
|
||||
'handlers': {'type': 'list', 'elements': 'str'},
|
||||
'subscribers': {'type': 'list', 'elements': 'str'},
|
||||
'interval': {'type': 'int'},
|
||||
'timeout': {'type': 'int'},
|
||||
'ttl': {'type': 'int'},
|
||||
'handle': {'type': 'bool'},
|
||||
'subdue_begin': {'type': 'str'},
|
||||
'subdue_end': {'type': 'str'},
|
||||
'dependencies': {'type': 'list'},
|
||||
'dependencies': {'type': 'list', 'elements': 'str'},
|
||||
'metric': {'type': 'bool', 'default': 'no'},
|
||||
'standalone': {'type': 'bool'},
|
||||
'publish': {'type': 'bool'},
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
- If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
|
||||
subscriptions:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
|
||||
- These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
|
||||
@@ -44,6 +45,7 @@ options:
|
||||
default: 'no'
|
||||
redact:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- Client definition attributes to redact (values) when logging and sending client keepalives.
|
||||
socket:
|
||||
@@ -160,22 +162,22 @@ def main():
|
||||
module = AnsibleModule(
|
||||
supports_check_mode=True,
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
|
||||
name=dict(type='str', required=False),
|
||||
address=dict(type='str', required=False),
|
||||
subscriptions=dict(type='list', required=False),
|
||||
safe_mode=dict(type='bool', required=False, default=False),
|
||||
redact=dict(type='list', required=False),
|
||||
socket=dict(type='dict', required=False),
|
||||
keepalives=dict(type='bool', required=False, default=True),
|
||||
keepalive=dict(type='dict', required=False),
|
||||
registration=dict(type='dict', required=False),
|
||||
deregister=dict(type='bool', required=False),
|
||||
deregistration=dict(type='dict', required=False),
|
||||
ec2=dict(type='dict', required=False),
|
||||
chef=dict(type='dict', required=False),
|
||||
puppet=dict(type='dict', required=False),
|
||||
servicenow=dict(type='dict', required=False)
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(type='str', ),
|
||||
address=dict(type='str', ),
|
||||
subscriptions=dict(type='list', elements="str"),
|
||||
safe_mode=dict(type='bool', default=False),
|
||||
redact=dict(type='list', elements="str"),
|
||||
socket=dict(type='dict'),
|
||||
keepalives=dict(type='bool', default=True),
|
||||
keepalive=dict(type='dict'),
|
||||
registration=dict(type='dict'),
|
||||
deregister=dict(type='bool'),
|
||||
deregistration=dict(type='dict'),
|
||||
ec2=dict(type='dict'),
|
||||
chef=dict(type='dict'),
|
||||
puppet=dict(type='dict'),
|
||||
servicenow=dict(type='dict')
|
||||
),
|
||||
required_if=[
|
||||
['state', 'present', ['subscriptions']]
|
||||
|
||||
@@ -37,11 +37,13 @@ options:
|
||||
- The Sensu event filter (name) to use when filtering events for the handler.
|
||||
filters:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- An array of Sensu event filters (names) to use when filtering events for the handler.
|
||||
- Each array item must be a string.
|
||||
severities:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- An array of check result severities the handler will handle.
|
||||
- 'NOTE: event resolution bypasses this filtering.'
|
||||
@@ -84,9 +86,9 @@ options:
|
||||
- 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
|
||||
handlers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- An array of Sensu event handlers (names) to use for events using the handler set.
|
||||
- Each array item must be a string.
|
||||
- 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
|
||||
notes:
|
||||
- Check mode is supported
|
||||
@@ -165,20 +167,20 @@ def main():
|
||||
module = AnsibleModule(
|
||||
supports_check_mode=True,
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
name=dict(type='str', required=True),
|
||||
type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
|
||||
filter=dict(type='str', required=False),
|
||||
filters=dict(type='list', required=False),
|
||||
severities=dict(type='list', required=False),
|
||||
mutator=dict(type='str', required=False),
|
||||
timeout=dict(type='int', required=False, default=10),
|
||||
handle_silenced=dict(type='bool', required=False, default=False),
|
||||
handle_flapping=dict(type='bool', required=False, default=False),
|
||||
command=dict(type='str', required=False),
|
||||
socket=dict(type='dict', required=False),
|
||||
pipe=dict(type='dict', required=False),
|
||||
handlers=dict(type='list', required=False),
|
||||
type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
|
||||
filter=dict(type='str'),
|
||||
filters=dict(type='list', elements='str'),
|
||||
severities=dict(type='list', elements='str'),
|
||||
mutator=dict(type='str'),
|
||||
timeout=dict(type='int', default=10),
|
||||
handle_silenced=dict(type='bool', default=False),
|
||||
handle_flapping=dict(type='bool', default=False),
|
||||
command=dict(type='str'),
|
||||
socket=dict(type='dict'),
|
||||
pipe=dict(type='dict'),
|
||||
handlers=dict(type='list', elements='str'),
|
||||
),
|
||||
required_if=[
|
||||
['state', 'present', ['type']],
|
||||
|
||||
@@ -800,7 +800,7 @@ def main():
|
||||
algorithm=dict(type='int'),
|
||||
cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
|
||||
hash_type=dict(type='int', choices=[1, 2]),
|
||||
key_tag=dict(type='int'),
|
||||
key_tag=dict(type='int', no_log=False),
|
||||
port=dict(type='int'),
|
||||
priority=dict(type='int', default=1),
|
||||
proto=dict(type='str'),
|
||||
|
||||
187
plugins/modules/net_tools/gandi_livedns.py
Normal file
187
plugins/modules/net_tools/gandi_livedns.py
Normal file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: gandi_livedns
|
||||
author:
|
||||
- Gregory Thiemonge (@gthiemonge)
|
||||
version_added: "2.3.0"
|
||||
short_description: Manage Gandi LiveDNS records
|
||||
description:
|
||||
- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)."
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Account API token.
|
||||
type: str
|
||||
required: true
|
||||
record:
|
||||
description:
|
||||
- Record to add.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the record(s) should exist or not.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record.
|
||||
- Required when I(state=present).
|
||||
type: int
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create.
|
||||
type: str
|
||||
required: true
|
||||
values:
|
||||
description:
|
||||
- The record values.
|
||||
- Required when I(state=present).
|
||||
type: list
|
||||
elements: str
|
||||
domain:
|
||||
description:
|
||||
- The name of the Domain to work with (for example, "example.com").
|
||||
required: true
|
||||
type: str
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a test A record to point to 127.0.0.1 in the my.com domain
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
record: test
|
||||
type: A
|
||||
values:
|
||||
- 127.0.0.1
|
||||
ttl: 7200
|
||||
api_key: dummyapitoken
|
||||
register: record
|
||||
|
||||
- name: Create a mail CNAME record to www.my.com domain
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
values:
|
||||
- www
|
||||
ttl: 7200
|
||||
api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Change its TTL
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
values:
|
||||
- www
|
||||
ttl: 10800
|
||||
api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Delete the record
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
api_key: dummyapitoken
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
record:
|
||||
description: A dictionary containing the record data.
|
||||
returned: success, except on record deletion
|
||||
type: dict
|
||||
contains:
|
||||
values:
|
||||
description: The record content (details depend on record type).
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- 192.0.2.91
|
||||
- 192.0.2.92
|
||||
record:
|
||||
description: The record name.
|
||||
returned: success
|
||||
type: str
|
||||
sample: www
|
||||
ttl:
|
||||
description: The time-to-live for the record.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 300
|
||||
type:
|
||||
description: The record type.
|
||||
returned: success
|
||||
type: str
|
||||
sample: A
|
||||
domain:
|
||||
description: The domain associated with the record.
|
||||
returned: success
|
||||
type: str
|
||||
sample: my.com
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(type='str', required=True, no_log=True),
|
||||
record=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
ttl=dict(type='int'),
|
||||
type=dict(type='str', required=True),
|
||||
values=dict(type='list', elements='str'),
|
||||
domain=dict(type='str', required=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['values', 'ttl']),
|
||||
],
|
||||
)
|
||||
|
||||
gandi_api = GandiLiveDNSAPI(module)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
ret, changed = gandi_api.ensure_dns_record(module.params['record'],
|
||||
module.params['type'],
|
||||
module.params['ttl'],
|
||||
module.params['values'],
|
||||
module.params['domain'])
|
||||
else:
|
||||
ret, changed = gandi_api.delete_dns_record(module.params['record'],
|
||||
module.params['type'],
|
||||
module.params['values'],
|
||||
module.params['domain'])
|
||||
|
||||
result = dict(
|
||||
changed=changed,
|
||||
)
|
||||
if ret:
|
||||
result['record'] = gandi_api.build_result(ret,
|
||||
module.params['domain'])
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -69,6 +69,7 @@ options:
|
||||
description:
|
||||
- The IPv4 address to this interface.
|
||||
- Use the format C(192.0.2.24/24).
|
||||
- If defined and I(method4) is not specified, automatically set C(ipv4.method) to C(manual).
|
||||
type: str
|
||||
gw4:
|
||||
description:
|
||||
@@ -106,10 +107,18 @@ options:
|
||||
- A list of DNS search domains.
|
||||
elements: str
|
||||
type: list
|
||||
method4:
|
||||
description:
|
||||
- Configuration method to be used for IPv4.
|
||||
- If I(ip4) is set, C(ipv4.method) is automatically set to C(manual) and this parameter is not needed.
|
||||
type: str
|
||||
choices: [auto, link-local, manual, shared, disabled]
|
||||
version_added: 2.2.0
|
||||
ip6:
|
||||
description:
|
||||
- The IPv6 address to this interface.
|
||||
- Use the format C(abbe::cafe).
|
||||
- If defined and I(method6) is not specified, automatically set C(ipv6.method) to C(manual).
|
||||
type: str
|
||||
gw6:
|
||||
description:
|
||||
@@ -127,6 +136,13 @@ options:
|
||||
- A list of DNS search domains.
|
||||
elements: str
|
||||
type: list
|
||||
method6:
|
||||
description:
|
||||
- Configuration method to be used for IPv6
|
||||
- If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed.
|
||||
type: str
|
||||
choices: [ignore, auto, dhcp, link-local, manual, shared]
|
||||
version_added: 2.2.0
|
||||
mtu:
|
||||
description:
|
||||
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
|
||||
@@ -611,10 +627,12 @@ class Nmcli(object):
|
||||
self.never_default4 = module.params['never_default4']
|
||||
self.dns4 = module.params['dns4']
|
||||
self.dns4_search = module.params['dns4_search']
|
||||
self.method4 = module.params['method4']
|
||||
self.ip6 = module.params['ip6']
|
||||
self.gw6 = module.params['gw6']
|
||||
self.dns6 = module.params['dns6']
|
||||
self.dns6_search = module.params['dns6_search']
|
||||
self.method6 = module.params['method6']
|
||||
self.mtu = module.params['mtu']
|
||||
self.stp = module.params['stp']
|
||||
self.priority = module.params['priority']
|
||||
@@ -648,18 +666,18 @@ class Nmcli(object):
|
||||
self.dhcp_client_id = module.params['dhcp_client_id']
|
||||
self.zone = module.params['zone']
|
||||
|
||||
if self.ip4:
|
||||
if self.method4:
|
||||
self.ipv4_method = self.method4
|
||||
elif self.ip4:
|
||||
self.ipv4_method = 'manual'
|
||||
else:
|
||||
# supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled]
|
||||
# TODO: add a new module parameter to specify a non 'manual' value
|
||||
self.ipv4_method = None
|
||||
|
||||
if self.ip6:
|
||||
if self.method6:
|
||||
self.ipv6_method = self.method6
|
||||
elif self.ip6:
|
||||
self.ipv6_method = 'manual'
|
||||
else:
|
||||
# supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared]
|
||||
# TODO: add a new module parameter to specify a non 'manual' value
|
||||
self.ipv6_method = None
|
||||
|
||||
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
|
||||
@@ -719,6 +737,10 @@ class Nmcli(object):
|
||||
'primary': self.primary,
|
||||
'updelay': self.updelay,
|
||||
})
|
||||
elif self.type == 'bond-slave':
|
||||
options.update({
|
||||
'connection.slave-type': 'bond',
|
||||
})
|
||||
elif self.type == 'bridge':
|
||||
options.update({
|
||||
'bridge.ageing-time': self.ageingtime,
|
||||
@@ -1075,11 +1097,13 @@ def main():
|
||||
never_default4=dict(type='bool', default=False),
|
||||
dns4=dict(type='list', elements='str'),
|
||||
dns4_search=dict(type='list', elements='str'),
|
||||
method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']),
|
||||
dhcp_client_id=dict(type='str'),
|
||||
ip6=dict(type='str'),
|
||||
gw6=dict(type='str'),
|
||||
dns6=dict(type='list', elements='str'),
|
||||
dns6_search=dict(type='list', elements='str'),
|
||||
method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']),
|
||||
# Bond Specific vars
|
||||
mode=dict(type='str', default='balance-rr',
|
||||
choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
|
||||
|
||||
343
plugins/modules/net_tools/pritunl/pritunl_user.py
Normal file
343
plugins/modules/net_tools/pritunl/pritunl_user.py
Normal file
@@ -0,0 +1,343 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: pritunl_user
|
||||
author: "Florian Dambrine (@Lowess)"
|
||||
version_added: 2.3.0
|
||||
short_description: Manage Pritunl Users using the Pritunl API
|
||||
description:
|
||||
- A module to manage Pritunl users using the Pritunl API.
|
||||
extends_documentation_fragment:
|
||||
- community.general.pritunl
|
||||
options:
|
||||
organization:
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- org
|
||||
description:
|
||||
- The name of the organization the user is part of.
|
||||
|
||||
state:
|
||||
type: str
|
||||
default: 'present'
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
description:
|
||||
- If C(present), the module adds user I(user_name) to
|
||||
the Pritunl I(organization). If C(absent), removes the user
|
||||
I(user_name) from the Pritunl I(organization).
|
||||
|
||||
user_name:
|
||||
type: str
|
||||
required: true
|
||||
default: null
|
||||
description:
|
||||
- Name of the user to create or delete from Pritunl.
|
||||
|
||||
user_email:
|
||||
type: str
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- Email address associated with the user I(user_name).
|
||||
|
||||
user_type:
|
||||
type: str
|
||||
required: false
|
||||
default: client
|
||||
choices:
|
||||
- client
|
||||
- server
|
||||
description:
|
||||
- Type of the user I(user_name).
|
||||
|
||||
user_groups:
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- List of groups associated with the user I(user_name).
|
||||
|
||||
user_disabled:
|
||||
type: bool
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- Enable/Disable the user I(user_name).
|
||||
|
||||
user_gravatar:
|
||||
type: bool
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- Enable/Disable Gravatar usage for the user I(user_name).
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create the user Foo with email address foo@bar.com in MyOrg
|
||||
community.general.pritunl_user:
|
||||
state: present
|
||||
name: MyOrg
|
||||
user_name: Foo
|
||||
user_email: foo@bar.com
|
||||
|
||||
- name: Disable the user Foo but keep it in Pritunl
|
||||
community.general.pritunl_user:
|
||||
state: present
|
||||
name: MyOrg
|
||||
user_name: Foo
|
||||
user_email: foo@bar.com
|
||||
user_disabled: yes
|
||||
|
||||
- name: Make sure the user Foo is not part of MyOrg anymore
|
||||
community.general.pritunl_user:
|
||||
state: absent
|
||||
name: MyOrg
|
||||
user_name: Foo
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
response:
|
||||
description: JSON representation of Pritunl Users.
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"audit": false,
|
||||
"auth_type": "google",
|
||||
"bypass_secondary": false,
|
||||
"client_to_client": false,
|
||||
"disabled": false,
|
||||
"dns_mapping": null,
|
||||
"dns_servers": null,
|
||||
"dns_suffix": null,
|
||||
"email": "foo@bar.com",
|
||||
"gravatar": true,
|
||||
"groups": [
|
||||
"foo", "bar"
|
||||
],
|
||||
"id": "5d070dafe63q3b2e6s472c3b",
|
||||
"name": "foo@acme.com",
|
||||
"network_links": [],
|
||||
"organization": "58070daee6sf342e6e4s2c36",
|
||||
"organization_name": "Acme",
|
||||
"otp_auth": true,
|
||||
"otp_secret": "35H5EJA3XB2$4CWG",
|
||||
"pin": false,
|
||||
"port_forwarding": [],
|
||||
"servers": [],
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
|
||||
PritunlException,
|
||||
delete_pritunl_user,
|
||||
get_pritunl_settings,
|
||||
list_pritunl_organizations,
|
||||
list_pritunl_users,
|
||||
post_pritunl_user,
|
||||
pritunl_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def add_or_update_pritunl_user(module):
|
||||
result = {}
|
||||
|
||||
org_name = module.params.get("organization")
|
||||
user_name = module.params.get("user_name")
|
||||
|
||||
user_params = {
|
||||
"name": user_name,
|
||||
"email": module.params.get("user_email"),
|
||||
"groups": module.params.get("user_groups"),
|
||||
"disabled": module.params.get("user_disabled"),
|
||||
"gravatar": module.params.get("user_gravatar"),
|
||||
"type": module.params.get("user_type"),
|
||||
}
|
||||
|
||||
org_obj_list = list_pritunl_organizations(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{"filters": {"name": org_name}},
|
||||
)
|
||||
)
|
||||
|
||||
if len(org_obj_list) == 0:
|
||||
module.fail_json(
|
||||
msg="Can not add user to organization '%s' which does not exist" % org_name
|
||||
)
|
||||
|
||||
org_id = org_obj_list[0]["id"]
|
||||
|
||||
# Grab existing users from this org
|
||||
users = list_pritunl_users(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"filters": {"name": user_name},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# Check if the pritunl user already exists
|
||||
if len(users) > 0:
|
||||
# Compare remote user params with local user_params and trigger update if needed
|
||||
user_params_changed = False
|
||||
for key in user_params.keys():
|
||||
# When a param is not specified grab existing ones to prevent from changing it with the PUT request
|
||||
if user_params[key] is None:
|
||||
user_params[key] = users[0][key]
|
||||
|
||||
# 'groups' is a list comparison
|
||||
if key == "groups":
|
||||
if set(users[0][key]) != set(user_params[key]):
|
||||
user_params_changed = True
|
||||
|
||||
# otherwise it is either a boolean or a string
|
||||
else:
|
||||
if users[0][key] != user_params[key]:
|
||||
user_params_changed = True
|
||||
|
||||
# Trigger a PUT on the API to update the current user if settings have changed
|
||||
if user_params_changed:
|
||||
response = post_pritunl_user(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"user_id": users[0]["id"],
|
||||
"user_data": user_params,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
result["changed"] = True
|
||||
result["response"] = response
|
||||
else:
|
||||
result["changed"] = False
|
||||
result["response"] = users
|
||||
else:
|
||||
response = post_pritunl_user(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"user_data": user_params,
|
||||
},
|
||||
)
|
||||
)
|
||||
result["changed"] = True
|
||||
result["response"] = response
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def remove_pritunl_user(module):
|
||||
result = {}
|
||||
|
||||
org_name = module.params.get("organization")
|
||||
user_name = module.params.get("user_name")
|
||||
|
||||
org_obj_list = []
|
||||
|
||||
org_obj_list = list_pritunl_organizations(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"filters": {"name": org_name},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
if len(org_obj_list) == 0:
|
||||
module.fail_json(
|
||||
msg="Can not remove user '%s' from a non existing organization '%s'"
|
||||
% (user_name, org_name)
|
||||
)
|
||||
|
||||
org_id = org_obj_list[0]["id"]
|
||||
|
||||
# Grab existing users from this org
|
||||
users = list_pritunl_users(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"filters": {"name": user_name},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# Check if the pritunl user exists, if not, do nothing
|
||||
if len(users) == 0:
|
||||
result["changed"] = False
|
||||
result["response"] = {}
|
||||
|
||||
# Otherwise remove the org from Pritunl
|
||||
else:
|
||||
response = delete_pritunl_user(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"user_id": users[0]["id"],
|
||||
},
|
||||
)
|
||||
)
|
||||
result["changed"] = True
|
||||
result["response"] = response
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pritunl_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
dict(
|
||||
organization=dict(required=True, type="str", aliases=["org"]),
|
||||
state=dict(
|
||||
required=False, choices=["present", "absent"], default="present"
|
||||
),
|
||||
user_name=dict(required=True, type="str"),
|
||||
user_type=dict(
|
||||
required=False, choices=["client", "server"], default="client"
|
||||
),
|
||||
user_email=dict(required=False, type="str", default=None),
|
||||
user_groups=dict(required=False, type="list", elements="str", default=None),
|
||||
user_disabled=dict(required=False, type="bool", default=None),
|
||||
user_gravatar=dict(required=False, type="bool", default=None),
|
||||
)
|
||||
),
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
state = module.params.get("state")
|
||||
|
||||
try:
|
||||
if state == "present":
|
||||
add_or_update_pritunl_user(module)
|
||||
elif state == "absent":
|
||||
remove_pritunl_user(module)
|
||||
except PritunlException as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
171
plugins/modules/net_tools/pritunl/pritunl_user_info.py
Normal file
171
plugins/modules/net_tools/pritunl/pritunl_user_info.py
Normal file
@@ -0,0 +1,171 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: pritunl_user_info
|
||||
author: "Florian Dambrine (@Lowess)"
|
||||
version_added: 2.3.0
|
||||
short_description: List Pritunl Users using the Pritunl API
|
||||
description:
|
||||
- A module to list Pritunl users using the Pritunl API.
|
||||
extends_documentation_fragment:
|
||||
- community.general.pritunl
|
||||
options:
|
||||
organization:
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- org
|
||||
description:
|
||||
- The name of the organization the user is part of.
|
||||
|
||||
user_name:
|
||||
type: str
|
||||
required: false
|
||||
description:
|
||||
- Name of the user to filter on Pritunl.
|
||||
|
||||
user_type:
|
||||
type: str
|
||||
required: false
|
||||
default: client
|
||||
choices:
|
||||
- client
|
||||
- server
|
||||
description:
|
||||
- Type of the user I(user_name).
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: List all existing users part of the organization MyOrg
|
||||
community.general.pritunl_user_info:
|
||||
state: list
|
||||
organization: MyOrg
|
||||
|
||||
- name: Search for the user named Florian part of the organization MyOrg
|
||||
community.general.pritunl_user_info:
|
||||
state: list
|
||||
organization: MyOrg
|
||||
user_name: Florian
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
users:
|
||||
description: List of Pritunl users.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"audit": false,
|
||||
"auth_type": "google",
|
||||
"bypass_secondary": false,
|
||||
"client_to_client": false,
|
||||
"disabled": false,
|
||||
"dns_mapping": null,
|
||||
"dns_servers": null,
|
||||
"dns_suffix": null,
|
||||
"email": "foo@bar.com",
|
||||
"gravatar": true,
|
||||
"groups": [
|
||||
"foo", "bar"
|
||||
],
|
||||
"id": "5d070dafe63q3b2e6s472c3b",
|
||||
"name": "foo@acme.com",
|
||||
"network_links": [],
|
||||
"organization": "58070daee6sf342e6e4s2c36",
|
||||
"organization_name": "Acme",
|
||||
"otp_auth": true,
|
||||
"otp_secret": "35H5EJA3XB2$4CWG",
|
||||
"pin": false,
|
||||
"port_forwarding": [],
|
||||
"servers": [],
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
|
||||
PritunlException,
|
||||
get_pritunl_settings,
|
||||
list_pritunl_organizations,
|
||||
list_pritunl_users,
|
||||
pritunl_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_user(module):
|
||||
user_name = module.params.get("user_name")
|
||||
user_type = module.params.get("user_type")
|
||||
org_name = module.params.get("organization")
|
||||
|
||||
org_obj_list = []
|
||||
|
||||
org_obj_list = list_pritunl_organizations(
|
||||
**dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}})
|
||||
)
|
||||
|
||||
if len(org_obj_list) == 0:
|
||||
module.fail_json(
|
||||
msg="Can not list users from the organization '%s' which does not exist"
|
||||
% org_name
|
||||
)
|
||||
|
||||
org_id = org_obj_list[0]["id"]
|
||||
|
||||
users = list_pritunl_users(
|
||||
**dict_merge(
|
||||
get_pritunl_settings(module),
|
||||
{
|
||||
"organization_id": org_id,
|
||||
"filters": (
|
||||
{"type": user_type}
|
||||
if user_name is None
|
||||
else {"name": user_name, "type": user_type}
|
||||
),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
result = {}
|
||||
result["changed"] = False
|
||||
result["users"] = users
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = pritunl_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
dict(
|
||||
organization=dict(required=True, type="str", aliases=["org"]),
|
||||
user_name=dict(required=False, type="str", default=None),
|
||||
user_type=dict(
|
||||
required=False,
|
||||
choices=["client", "server"],
|
||||
default="client",
|
||||
),
|
||||
)
|
||||
),
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
try:
|
||||
get_pritunl_user(module)
|
||||
except PritunlException as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -67,6 +67,16 @@ options:
|
||||
- Encryption key.
|
||||
- Required if I(level) is C(authPriv).
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Response timeout in seconds.
|
||||
type: int
|
||||
version_added: 2.3.0
|
||||
retries:
|
||||
description:
|
||||
- Maximum number of request retries, 0 retries means just a single request.
|
||||
type: int
|
||||
version_added: 2.3.0
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -271,6 +281,8 @@ def main():
|
||||
privacy=dict(type='str', choices=['aes', 'des']),
|
||||
authkey=dict(type='str', no_log=True),
|
||||
privkey=dict(type='str', no_log=True),
|
||||
timeout=dict(type='int'),
|
||||
retries=dict(type='int'),
|
||||
),
|
||||
required_together=(
|
||||
['username', 'level', 'integrity', 'authkey'],
|
||||
@@ -285,6 +297,7 @@ def main():
|
||||
module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR)
|
||||
|
||||
cmdGen = cmdgen.CommandGenerator()
|
||||
transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None)
|
||||
|
||||
# Verify that we receive a community when using snmp v2
|
||||
if m_args['version'] in ("v2", "v2c"):
|
||||
@@ -333,7 +346,7 @@ def main():
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts),
|
||||
cmdgen.MibVariable(p.sysDescr,),
|
||||
cmdgen.MibVariable(p.sysObjectId,),
|
||||
cmdgen.MibVariable(p.sysUpTime,),
|
||||
@@ -364,7 +377,7 @@ def main():
|
||||
|
||||
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
|
||||
snmp_auth,
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161)),
|
||||
cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts),
|
||||
cmdgen.MibVariable(p.ifIndex,),
|
||||
cmdgen.MibVariable(p.ifDescr,),
|
||||
cmdgen.MibVariable(p.ifMtu,),
|
||||
|
||||
@@ -27,11 +27,14 @@ options:
|
||||
- Name of the service (displayed as the "user" in the message)
|
||||
required: false
|
||||
default: ansible
|
||||
message:
|
||||
message_content:
|
||||
type: str
|
||||
description:
|
||||
- Message content
|
||||
- Message content.
|
||||
- The alias I(message) is deprecated and will be removed in community.general 4.0.0.
|
||||
required: true
|
||||
aliases:
|
||||
- message
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
@@ -92,7 +95,9 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
channel_token=dict(type='str', required=True, no_log=True),
|
||||
message=dict(type='str', required=True),
|
||||
message_content=dict(type='str', required=True, aliases=['message'],
|
||||
deprecated_aliases=[dict(name='message', version='4.0.0',
|
||||
collection_name='community.general')]),
|
||||
service=dict(type='str', default='ansible'),
|
||||
url=dict(type='str', default=None),
|
||||
icon_url=dict(type='str', default=None),
|
||||
@@ -102,7 +107,7 @@ def main():
|
||||
|
||||
channel_token = module.params['channel_token']
|
||||
service = module.params['service']
|
||||
message = module.params['message']
|
||||
message = module.params['message_content']
|
||||
url = module.params['url']
|
||||
icon_url = module.params['icon_url']
|
||||
|
||||
|
||||
@@ -48,6 +48,10 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- Body of the notification, e.g. Details of the fault you're alerting.
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- URL field, used when I(push_type) is C(link).
|
||||
|
||||
notes:
|
||||
- Requires pushbullet.py Python package on the remote host.
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
- If not specified, it will default to the temporary working directory
|
||||
exclude_groups:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A list of Gemfile groups to exclude during operations. This only
|
||||
applies when state is C(present). Bundler considers this
|
||||
@@ -134,7 +135,7 @@ def main():
|
||||
executable=dict(default=None, required=False),
|
||||
state=dict(default='present', required=False, choices=['present', 'latest']),
|
||||
chdir=dict(default=None, required=False, type='path'),
|
||||
exclude_groups=dict(default=None, required=False, type='list'),
|
||||
exclude_groups=dict(default=None, required=False, type='list', elements='str'),
|
||||
clean=dict(default=False, required=False, type='bool'),
|
||||
gemfile=dict(default=None, required=False, type='path'),
|
||||
local=dict(default=False, required=False, type='bool'),
|
||||
|
||||
@@ -41,40 +41,47 @@ options:
|
||||
- Directory of your project (see --working-dir). This is required when
|
||||
the command is not run globally.
|
||||
- Will be ignored if C(global_command=true).
|
||||
- Alias C(working-dir) has been deprecated and will be removed in community.general 5.0.0.
|
||||
aliases: [ working-dir ]
|
||||
global_command:
|
||||
description:
|
||||
- Runs the specified command globally.
|
||||
- Alias C(global-command) has been deprecated and will be removed in community.general 5.0.0.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [ global-command ]
|
||||
prefer_source:
|
||||
description:
|
||||
- Forces installation from package sources when possible (see --prefer-source).
|
||||
- Alias C(prefer-source) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ prefer-source ]
|
||||
prefer_dist:
|
||||
description:
|
||||
- Forces installation from package dist even for dev versions (see --prefer-dist).
|
||||
- Alias C(prefer-dist) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ prefer-dist ]
|
||||
no_dev:
|
||||
description:
|
||||
- Disables installation of require-dev packages (see --no-dev).
|
||||
- Alias C(no-dev) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: true
|
||||
type: bool
|
||||
aliases: [ no-dev ]
|
||||
no_scripts:
|
||||
description:
|
||||
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
|
||||
- Alias C(no-scripts) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ no-scripts ]
|
||||
no_plugins:
|
||||
description:
|
||||
- Disables all plugins ( see --no-plugins ).
|
||||
- Alias C(no-plugins) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ no-plugins ]
|
||||
@@ -83,6 +90,7 @@ options:
|
||||
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
|
||||
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
|
||||
- Recommended especially for production, but can take a bit of time to run.
|
||||
- Alias C(optimize-autoloader) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: true
|
||||
type: bool
|
||||
aliases: [ optimize-autoloader ]
|
||||
@@ -91,18 +99,21 @@ options:
|
||||
- Autoload classes from classmap only.
|
||||
- Implicitely enable optimize_autoloader.
|
||||
- Recommended especially for production, but can take a bit of time to run.
|
||||
- Alias C(classmap-authoritative) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ classmap-authoritative ]
|
||||
apcu_autoloader:
|
||||
description:
|
||||
- Uses APCu to cache found/not-found classes
|
||||
- Alias C(apcu-autoloader) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ apcu-autoloader ]
|
||||
ignore_platform_reqs:
|
||||
description:
|
||||
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
|
||||
- Alias C(ignore-platform-reqs) has been deprecated and will be removed in community.general 5.0.0.
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [ ignore-platform-reqs ]
|
||||
@@ -187,17 +198,39 @@ def main():
|
||||
command=dict(default="install", type="str"),
|
||||
arguments=dict(default="", type="str"),
|
||||
executable=dict(type="path", aliases=["php_path"]),
|
||||
working_dir=dict(type="path", aliases=["working-dir"]),
|
||||
global_command=dict(default=False, type="bool", aliases=["global-command"]),
|
||||
prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
|
||||
prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
|
||||
no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
|
||||
no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
|
||||
no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
|
||||
apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]),
|
||||
optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
|
||||
classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]),
|
||||
ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
|
||||
working_dir=dict(
|
||||
type="path", aliases=["working-dir"],
|
||||
deprecated_aliases=[dict(name='working-dir', version='5.0.0', collection_name='community.general')]),
|
||||
global_command=dict(
|
||||
default=False, type="bool", aliases=["global-command"],
|
||||
deprecated_aliases=[dict(name='global-command', version='5.0.0', collection_name='community.general')]),
|
||||
prefer_source=dict(
|
||||
default=False, type="bool", aliases=["prefer-source"],
|
||||
deprecated_aliases=[dict(name='prefer-source', version='5.0.0', collection_name='community.general')]),
|
||||
prefer_dist=dict(
|
||||
default=False, type="bool", aliases=["prefer-dist"],
|
||||
deprecated_aliases=[dict(name='prefer-dist', version='5.0.0', collection_name='community.general')]),
|
||||
no_dev=dict(
|
||||
default=True, type="bool", aliases=["no-dev"],
|
||||
deprecated_aliases=[dict(name='no-dev', version='5.0.0', collection_name='community.general')]),
|
||||
no_scripts=dict(
|
||||
default=False, type="bool", aliases=["no-scripts"],
|
||||
deprecated_aliases=[dict(name='no-scripts', version='5.0.0', collection_name='community.general')]),
|
||||
no_plugins=dict(
|
||||
default=False, type="bool", aliases=["no-plugins"],
|
||||
deprecated_aliases=[dict(name='no-plugins', version='5.0.0', collection_name='community.general')]),
|
||||
apcu_autoloader=dict(
|
||||
default=False, type="bool", aliases=["apcu-autoloader"],
|
||||
deprecated_aliases=[dict(name='apcu-autoloader', version='5.0.0', collection_name='community.general')]),
|
||||
optimize_autoloader=dict(
|
||||
default=True, type="bool", aliases=["optimize-autoloader"],
|
||||
deprecated_aliases=[dict(name='optimize-autoloader', version='5.0.0', collection_name='community.general')]),
|
||||
classmap_authoritative=dict(
|
||||
default=False, type="bool", aliases=["classmap-authoritative"],
|
||||
deprecated_aliases=[dict(name='classmap-authoritative', version='5.0.0', collection_name='community.general')]),
|
||||
ignore_platform_reqs=dict(
|
||||
default=False, type="bool", aliases=["ignore-platform-reqs"],
|
||||
deprecated_aliases=[dict(name='ignore-platform-reqs', version='5.0.0', collection_name='community.general')]),
|
||||
),
|
||||
required_if=[('global_command', False, ['working_dir'])],
|
||||
supports_check_mode=True
|
||||
|
||||
@@ -141,6 +141,10 @@ options:
|
||||
required: false
|
||||
default: 'download'
|
||||
choices: ['never', 'download', 'change', 'always']
|
||||
directory_mode:
|
||||
type: str
|
||||
description:
|
||||
- Filesystem permission mode applied recursively to I(dest) when it is a directory.
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
'''
|
||||
@@ -342,7 +346,7 @@ class Artifact(object):
|
||||
if len(parts) >= 3:
|
||||
g = parts[0]
|
||||
a = parts[1]
|
||||
v = parts[len(parts) - 1]
|
||||
v = parts[-1]
|
||||
t = None
|
||||
c = None
|
||||
if len(parts) == 4:
|
||||
@@ -595,8 +599,7 @@ def main():
|
||||
client_key=dict(type="path", required=False),
|
||||
keep_name=dict(required=False, default=False, type='bool'),
|
||||
verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
|
||||
directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure
|
||||
# if this should really be here.
|
||||
directory_mode=dict(type='str'),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
mutually_exclusive=([('version', 'version_by_spec')])
|
||||
|
||||
@@ -20,6 +20,7 @@ options:
|
||||
default: ['pip']
|
||||
required: False
|
||||
type: list
|
||||
elements: path
|
||||
requirements:
|
||||
- The requested pip executables must be installed on the target.
|
||||
author:
|
||||
@@ -115,7 +116,11 @@ def main():
|
||||
|
||||
# start work
|
||||
global module
|
||||
module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True)
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
clients=dict(type='list', elements='path', default=['pip']),
|
||||
),
|
||||
supports_check_mode=True)
|
||||
packages = {}
|
||||
results = {'packages': {}}
|
||||
clients = module.params['clients']
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
update_cache:
|
||||
description:
|
||||
- update the package database first C(apt-get update).
|
||||
- Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0.
|
||||
aliases: [ 'update-cache' ]
|
||||
type: bool
|
||||
default: no
|
||||
@@ -157,7 +158,9 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
|
||||
update_cache=dict(type='bool', default=False, aliases=['update-cache']),
|
||||
update_cache=dict(
|
||||
type='bool', default=False, aliases=['update-cache'],
|
||||
deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]),
|
||||
package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
|
||||
),
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user