mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 09:56:53 +00:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f39f4c9071 | ||
|
|
a85fe95747 | ||
|
|
d7afb48ab6 | ||
|
|
0cee34ffaa | ||
|
|
fad7935abc | ||
|
|
ac0770ff55 | ||
|
|
8c8d4b578a | ||
|
|
275b979f7c | ||
|
|
0a364b166b | ||
|
|
bcf5c289af | ||
|
|
0e4b7aef26 | ||
|
|
e0022f0f5b | ||
|
|
aad2b2400e | ||
|
|
58317b37b3 | ||
|
|
14d82733be | ||
|
|
58ae8c81ac | ||
|
|
0aaa5e3d91 | ||
|
|
1c5149c3de | ||
|
|
0074d352be | ||
|
|
a5117bdeff | ||
|
|
ae2acef507 | ||
|
|
cfd1d2e327 | ||
|
|
767e9076a5 | ||
|
|
4080929c8a | ||
|
|
f8a9ac1048 | ||
|
|
0f91fea501 | ||
|
|
21204e1d46 | ||
|
|
e1863a2ff5 | ||
|
|
451428af04 | ||
|
|
248128f282 | ||
|
|
be8022c743 | ||
|
|
3a2e614071 | ||
|
|
91acc44c34 | ||
|
|
2a8f04347d | ||
|
|
42c7d763d8 | ||
|
|
b33e4224fc | ||
|
|
ffca5f1cc9 | ||
|
|
dd8bfe5f0b |
@@ -13,7 +13,7 @@ pr:
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
- cron: 0 8 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
@@ -36,7 +36,7 @@ variables:
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.7.1
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.8.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
@@ -140,18 +140,16 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 11.1
|
||||
test: freebsd/11.1
|
||||
- name: FreeBSD 12.1
|
||||
test: freebsd/12.1
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 11.4
|
||||
test: freebsd/11.4
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -168,6 +166,12 @@ stages:
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: FreeBSD 12.1
|
||||
@@ -175,9 +179,6 @@ stages:
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
@@ -188,14 +189,11 @@ stages:
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
#- name: FreeBSD 12.0
|
||||
# test: freebsd/12.0
|
||||
- name: FreeBSD 12.0
|
||||
test: freebsd/12.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
@@ -220,10 +218,10 @@ stages:
|
||||
test: opensuse15py2
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
@@ -238,16 +236,15 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
#- name: CentOS 8
|
||||
# test: centos8
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
@@ -260,16 +257,13 @@ stages:
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
#- name: CentOS 8
|
||||
# test: centos8
|
||||
#- name: Fedora 31
|
||||
# test: fedora31
|
||||
#- name: openSUSE 15 py3
|
||||
# test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
@@ -306,7 +300,7 @@ stages:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
|
||||
3
.github/BOTMETA.yml
vendored
3
.github/BOTMETA.yml
vendored
@@ -374,7 +374,8 @@ files:
|
||||
$modules/clustering/consul/:
|
||||
maintainers: $team_consul
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp vfauth
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
$modules/clustering/nomad/:
|
||||
maintainers: chris93111
|
||||
$modules/clustering/pacemaker_cluster.py:
|
||||
|
||||
110
CHANGELOG.rst
110
CHANGELOG.rst
@@ -5,6 +5,116 @@ Community General Release Notes
|
||||
.. contents:: Topics
|
||||
|
||||
|
||||
v1.3.7
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular maintenance and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- clc_* modules - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1771).
|
||||
- dnsimple - elements of list parameters ``record_ids`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- gitlab_runner - elements of list parameters ``tag_list`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- keycloak_client - elements of list parameters ``default_roles``, ``redirect_uris``, ``web_origins`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- librato_annotation - elements of list parameters ``links`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- lxd_container - elements of list parameter ``profiles`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- mail - elements of list parameters ``to``, ``cc``, ``bcc``, ``attach``, ``headers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- na_ontap_gather_facts - elements of list parameters ``gather_subset`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- nexmo - elements of list parameters ``dest`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- nsupdate - elements of list parameters ``value`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- omapi_host - elements of list parameters ``statements`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- onepassword_info - elements of list parameters ``search_terms`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- packet_device - elements of list parameters ``device_ids``, ``hostnames`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- pagerduty - elements of list parameters ``service`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- plugins/module_utils/oracle/oci_utils.py - elements of list parameter ``key_by`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- proxmox_kvm module - actually implemented ``vmid`` and ``status`` return values. Updated documentation to reflect current situation (https://github.com/ansible-collections/community.general/issues/1410, https://github.com/ansible-collections/community.general/pull/1715).
|
||||
- pubnub_blocks - elements of list parameters ``event_handlers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- redfish modules - explicitly setting lists' elements to ``str`` (https://github.com/ansible-collections/community.general/pull/1761).
|
||||
- redhat_subscription - elements of list parameters ``pool_ids``, ``addons`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- rocketchat - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- sendgrid - elements of list parameters ``to_addresses``, ``cc``, ``bcc``, ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- sl_vm - elements of list parameters ``disks``, ``ssh_keys`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- slack - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- statusio_maintenance - elements of list parameters ``components``, ``containers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- twilio - elements of list parameters ``to_numbers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- vmadm - elements of list parameters ``disks``, ``nics``, ``resolvers``, ``filesystems`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- xml - elements of list parameters ``add_children``, ``set_children`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- aerospike_migration - fix typo that caused ``migrate_tx_key`` instead of ``migrate_rx_key`` being used (https://github.com/ansible-collections/community.general/pull/1739).
|
||||
- alternatives - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- beadm - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs`` set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880).
|
||||
- cronvar - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- dconf - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- deploy_helper - allow ``state=clean`` to be used without defining a ``release`` (https://github.com/ansible-collections/community.general/issues/1852).
|
||||
- diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- elasticsearch_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- filesystem - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- filesystem - remove ``swap`` from list of FS supported by ``resizefs=yes`` (https://github.com/ansible-collections/community.general/issues/790).
|
||||
- git_config - prevent ``run_command`` from expanding values (https://github.com/ansible-collections/community.general/issues/1776).
|
||||
- gitlab_runner - parameter ``registration_token`` was required but is used only when ``state`` is ``present`` (https://github.com/ansible-collections/community.general/issues/1714).
|
||||
- hipchat - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- imc_rest - explicitly logging out instead of registering the call in ```atexit``` (https://github.com/ansible-collections/community.general/issues/1735).
|
||||
- infoblox inventory script - make sure that the script also works with Ansible 2.9, and returns a more helpful error when community.general is not installed as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871).
|
||||
- interfaces_file - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- iso_extract - use proper alias deprecation mechanism for ``thirsty`` alias of ``force`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- java_cert - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- kibana_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- logstash_plugin - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- lvg - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- lvol - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- lxc - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- lxc_container - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- nios_host_record - allow DNS Bypass for views other than default (https://github.com/ansible-collections/community.general/issues/1786).
|
||||
- nomad_job_info - fix module failure when nomad client returns no jobs (https://github.com/ansible-collections/community.general/pull/1721).
|
||||
- nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- parted - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
|
||||
- postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
|
||||
- postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47).
|
||||
- postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45).
|
||||
- postgresql_set - fails in check_mode on non-numeric values containing ``B`` (https://github.com/ansible-collections/community.postgresql/issues/48).
|
||||
- postgresql_set - return a message instead of traceback when a passed parameter has not been found (https://github.com/ansible-collections/community.postgresql/issues/41).
|
||||
- proxmox* modules - refactored some parameter validation code into use of ``env_fallback``, ``required_if``, ``required_together``, ``required_one_of`` (https://github.com/ansible-collections/community.general/pull/1765).
|
||||
- proxmox_kvm - do not add ``args`` if ``proxmox_default_behavior`` is set to no_defaults (https://github.com/ansible-collections/community.general/issues/1641).
|
||||
- proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875, https://github.com/ansible-collections/community.general/pull/1895).
|
||||
- proxmox_kvm - fix undefined local variable ``status`` when the parameter ``state`` is either ``stopped``, ``started``, ``restarted`` or ``absent`` (https://github.com/ansible-collections/community.general/pull/1847).
|
||||
- proxmox_kvm - stop implicitly adding ``force`` equal to ``false``. Proxmox API requires not implemented parameters otherwise, and assumes ``force`` to be ``false`` by default anyways (https://github.com/ansible-collections/community.general/pull/1783).
|
||||
- redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redhat_subscription - ``mutually_exclusive`` was referring to parameter alias instead of name (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- riak - parameters ``wait_for_handoffs`` and ``wait_for_ring`` are ``int`` but the default value was ``false`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- rundeck_acl_policy - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- runit - removed unused code, and passing command as ``list`` instead of ``str`` to ``run_command()`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- selective callback plugin - adjust import so that the plugin also works with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/1807).
|
||||
- selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- statusio_maintenance - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- timezone - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).
|
||||
- utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- zfs_delegate_admin - the elements of ``users``, ``groups`` and ``permissions`` are now enforced to be strings (https://github.com/ansible-collections/community.general/pull/1766).
|
||||
|
||||
v1.3.6
|
||||
======
|
||||
|
||||
|
||||
@@ -1950,3 +1950,202 @@ releases:
|
||||
- community.docker-76-leading-v-support-in-docker-version.yml
|
||||
- no_log-fixes.yml
|
||||
release_date: '2021-02-09'
|
||||
1.3.7:
|
||||
changes:
|
||||
bugfixes:
|
||||
- aerospike_migration - fix typo that caused ``migrate_tx_key`` instead of ``migrate_rx_key``
|
||||
being used (https://github.com/ansible-collections/community.general/pull/1739).
|
||||
- 'alternatives - internal refactoring: replaced uses of ``_`` with ``dummy``
|
||||
(https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'beadm - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs``
|
||||
set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880).
|
||||
- 'cronvar - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'dconf - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- deploy_helper - allow ``state=clean`` to be used without defining a ``release``
|
||||
(https://github.com/ansible-collections/community.general/issues/1852).
|
||||
- diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- elasticsearch_plugin - ``state`` parameter choices must use ``list()`` in
|
||||
python3 (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- 'filesystem - internal refactoring: replaced uses of ``_`` with ``dummy``
|
||||
(https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- filesystem - remove ``swap`` from list of FS supported by ``resizefs=yes``
|
||||
(https://github.com/ansible-collections/community.general/issues/790).
|
||||
- git_config - prevent ``run_command`` from expanding values (https://github.com/ansible-collections/community.general/issues/1776).
|
||||
- gitlab_runner - parameter ``registration_token`` was required but is used
|
||||
only when ``state`` is ``present`` (https://github.com/ansible-collections/community.general/issues/1714).
|
||||
- 'hipchat - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- imc_rest - explicitly logging out instead of registering the call in ```atexit```
|
||||
(https://github.com/ansible-collections/community.general/issues/1735).
|
||||
- infoblox inventory script - make sure that the script also works with Ansible
|
||||
2.9, and returns a more helpful error when community.general is not installed
|
||||
as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871).
|
||||
- 'interfaces_file - internal refactoring: replaced uses of ``_`` with ``dummy``
|
||||
(https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- iso_extract - use proper alias deprecation mechanism for ``thirsty`` alias
|
||||
of ``force`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- 'java_cert - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- kibana_plugin - ``state`` parameter choices must use ``list()`` in python3
|
||||
(https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- logstash_plugin - wrapped ``dict.keys()`` with ``list`` for use in ``choices``
|
||||
setting (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- 'lvg - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'lvol - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'lxc - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'lxc_container - internal refactoring: replaced uses of ``_`` with ``dummy``
|
||||
(https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- nios_host_record - allow DNS Bypass for views other than default (https://github.com/ansible-collections/community.general/issues/1786).
|
||||
- nomad_job_info - fix module failure when nomad client returns no jobs (https://github.com/ansible-collections/community.general/pull/1721).
|
||||
- nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility
|
||||
(https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- 'parted - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
|
||||
- postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
|
||||
- postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47).
|
||||
- postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45).
|
||||
- postgresql_set - fails in check_mode on non-numeric values containing ``B``
|
||||
(https://github.com/ansible-collections/community.postgresql/issues/48).
|
||||
- postgresql_set - return a message instead of traceback when a passed parameter
|
||||
has not been found (https://github.com/ansible-collections/community.postgresql/issues/41).
|
||||
- proxmox* modules - refactored some parameter validation code into use of ``env_fallback``,
|
||||
``required_if``, ``required_together``, ``required_one_of`` (https://github.com/ansible-collections/community.general/pull/1765).
|
||||
- proxmox_kvm - do not add ``args`` if ``proxmox_default_behavior`` is set to
|
||||
no_defaults (https://github.com/ansible-collections/community.general/issues/1641).
|
||||
- proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating
|
||||
a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875,
|
||||
https://github.com/ansible-collections/community.general/pull/1895).
|
||||
- proxmox_kvm - fix undefined local variable ``status`` when the parameter ``state``
|
||||
is either ``stopped``, ``started``, ``restarted`` or ``absent`` (https://github.com/ansible-collections/community.general/pull/1847).
|
||||
- proxmox_kvm - stop implicitly adding ``force`` equal to ``false``. Proxmox
|
||||
API requires not implemented parameters otherwise, and assumes ``force`` to
|
||||
be ``false`` by default anyways (https://github.com/ansible-collections/community.general/pull/1783).
|
||||
- redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redhat_subscription - ``mutually_exclusive`` was referring to parameter alias
|
||||
instead of name (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python
|
||||
3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- riak - parameters ``wait_for_handoffs`` and ``wait_for_ring`` are ``int``
|
||||
but the default value was ``false`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- 'rundeck_acl_policy - internal refactoring: replaced uses of ``_`` with ``dummy``
|
||||
(https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- runit - removed unused code, and passing command as ``list`` instead of ``str``
|
||||
to ``run_command()`` (https://github.com/ansible-collections/community.general/pull/1830).
|
||||
- selective callback plugin - adjust import so that the plugin also works with
|
||||
ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/1807).
|
||||
- selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3
|
||||
compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()``
|
||||
for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- 'statusio_maintenance - internal refactoring: replaced uses of ``_`` with
|
||||
``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- 'timezone - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819).'
|
||||
- utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for
|
||||
Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility
|
||||
(https://github.com/ansible-collections/community.general/pull/1861).
|
||||
- zfs_delegate_admin - the elements of ``users``, ``groups`` and ``permissions``
|
||||
are now enforced to be strings (https://github.com/ansible-collections/community.general/pull/1766).
|
||||
minor_changes:
|
||||
- clc_* modules - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1771).
|
||||
- dnsimple - elements of list parameters ``record_ids`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- gitlab_runner - elements of list parameters ``tag_list`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- keycloak_client - elements of list parameters ``default_roles``, ``redirect_uris``,
|
||||
``web_origins`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- librato_annotation - elements of list parameters ``links`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- lxd_container - elements of list parameter ``profiles`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- mail - elements of list parameters ``to``, ``cc``, ``bcc``, ``attach``, ``headers``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- na_ontap_gather_facts - elements of list parameters ``gather_subset`` are
|
||||
now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- nexmo - elements of list parameters ``dest`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- nsupdate - elements of list parameters ``value`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- omapi_host - elements of list parameters ``statements`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- onepassword_info - elements of list parameters ``search_terms`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- packet_device - elements of list parameters ``device_ids``, ``hostnames``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- pagerduty - elements of list parameters ``service`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- plugins/module_utils/oracle/oci_utils.py - elements of list parameter ``key_by``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- proxmox_kvm module - actually implemented ``vmid`` and ``status`` return values.
|
||||
Updated documentation to reflect current situation (https://github.com/ansible-collections/community.general/issues/1410,
|
||||
https://github.com/ansible-collections/community.general/pull/1715).
|
||||
- pubnub_blocks - elements of list parameters ``event_handlers`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- redfish modules - explicitly setting lists' elements to ``str`` (https://github.com/ansible-collections/community.general/pull/1761).
|
||||
- redhat_subscription - elements of list parameters ``pool_ids``, ``addons``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- rocketchat - elements of list parameters ``attachments`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- sendgrid - elements of list parameters ``to_addresses``, ``cc``, ``bcc``,
|
||||
``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- sl_vm - elements of list parameters ``disks``, ``ssh_keys`` are now validated
|
||||
(https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- slack - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- statusio_maintenance - elements of list parameters ``components``, ``containers``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- twilio - elements of list parameters ``to_numbers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- vmadm - elements of list parameters ``disks``, ``nics``, ``resolvers``, ``filesystems``
|
||||
are now validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
- xml - elements of list parameters ``add_children``, ``set_children`` are now
|
||||
validated (https://github.com/ansible-collections/community.general/pull/1795).
|
||||
release_summary: Regular maintenance and bugfix release.
|
||||
fragments:
|
||||
- 1-community-postgresql_backports.yml
|
||||
- 1.3.7.yml
|
||||
- 1714-gitlab_runner-required-reg-token.yml
|
||||
- 1715-proxmox_kvm-add-vmid-to-returns.yml
|
||||
- 1721-fix-nomad_job_info-no-jobs-failure.yml
|
||||
- 1735-imc-sessions.yml
|
||||
- 1740-aerospike_migration.yml
|
||||
- 1753-document-fstypes-supported-by-resizefs.yml
|
||||
- 1761-redfish-tidy-up-validation.yml
|
||||
- 1765-proxmox-params.yml
|
||||
- 1766-zfs-fixed-sanity.yml
|
||||
- 1771-centurylink-validation-elements.yml
|
||||
- 1776-git_config-tilde_value.yml
|
||||
- 1783-proxmox-kvm-fix-args-500-error.yaml
|
||||
- 1788-ease-nios_host_record-dns-bypass-check.yml
|
||||
- 1795-list-elements-batch1.yml
|
||||
- 1819-tidyup-pylint-blacklistnames.yml
|
||||
- 1830-valmod_docmissingtype_batch1.yml
|
||||
- 1847-proxmox-kvm-fix-status.yml
|
||||
- 1852-deploy-helper-fix-state-is-clean-without-release.yaml
|
||||
- 1861-python3-keys.yml
|
||||
- 1871-infoblox-inventory.yml
|
||||
- 1880-fix_cobbler_system_ssl.yml
|
||||
- 1895-proxmox-kvm-fix-issue-1875.yml
|
||||
- selective-core-2.11.yml
|
||||
release_date: '2021-02-25'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 1.3.6
|
||||
version: 1.3.7
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
2
plugins/cache/memcached.py
vendored
2
plugins/cache/memcached.py
vendored
@@ -161,7 +161,7 @@ class CacheModuleKeys(MutableSet):
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
for k in self._keyset.keys():
|
||||
for k in list(self._keyset.keys()):
|
||||
t = self._keyset[k]
|
||||
if s_min < t < s_max:
|
||||
del self._keyset[k]
|
||||
|
||||
6
plugins/cache/redis.py
vendored
6
plugins/cache/redis.py
vendored
@@ -216,14 +216,12 @@ class CacheModule(BaseCacheModule):
|
||||
self._db.zrem(self._keys_set, key)
|
||||
|
||||
def flush(self):
|
||||
for key in self.keys():
|
||||
for key in list(self.keys()):
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
# TODO: there is probably a better way to do this in redis
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
|
||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
||||
for attr in _stats_attributes:
|
||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
||||
|
||||
return _ret
|
||||
|
||||
|
||||
@@ -173,8 +173,7 @@ class CallbackModule(CallbackBase):
|
||||
# Displays info about playbook being started by a person on an
|
||||
# inventory, as well as Tags, Skip Tags and Limits
|
||||
if not self.printed_playbook:
|
||||
self.playbook_name, _ = os.path.splitext(
|
||||
os.path.basename(self.play.playbook.filename))
|
||||
self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
|
||||
host_list = self.play.playbook.inventory.host_list
|
||||
inventory = os.path.basename(os.path.realpath(host_list))
|
||||
self.send_msg("%s: Playbook initiated by %s against %s" %
|
||||
|
||||
@@ -41,7 +41,16 @@ import difflib
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
try:
|
||||
codeCodes = C.COLOR_CODES
|
||||
except AttributeError:
|
||||
# This constant was moved to ansible.constants in
|
||||
# https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
|
||||
# (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
|
||||
# we include from the original location.
|
||||
from ansible.utils.color import codeCodes
|
||||
|
||||
|
||||
DONT_COLORIZE = False
|
||||
COLORS = {
|
||||
@@ -58,7 +67,7 @@ COLORS = {
|
||||
|
||||
def dict_diff(prv, nxt):
|
||||
"""Return a dict of keys that differ with another config object."""
|
||||
keys = set(prv.keys() + nxt.keys())
|
||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
||||
result = {}
|
||||
for k in keys:
|
||||
if prv.get(k) != nxt.get(k):
|
||||
|
||||
@@ -86,7 +86,7 @@ class Connection(ConnectionBase):
|
||||
write_fds = []
|
||||
while len(read_fds) > 0 or len(write_fds) > 0:
|
||||
try:
|
||||
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
|
||||
ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, [])
|
||||
except select.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
|
||||
@@ -20,4 +20,5 @@ class ModuleDocFragment(object):
|
||||
identify an instance of the resource. By default, all the attributes of a resource except
|
||||
I(freeform_tags) are used to uniquely identify a resource.
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
@@ -32,12 +32,10 @@ DOCUMENTATION = r'''
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
@@ -212,10 +212,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
|
||||
value = ('disk_image=' + value)
|
||||
|
||||
if isinstance(value, int) or ',' not in value:
|
||||
value = value
|
||||
# split off strings with commas to a dict
|
||||
else:
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
try:
|
||||
value = dict(key.split("=") for key in value.split(","))
|
||||
|
||||
@@ -217,7 +217,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
yield host not in v
|
||||
yield True
|
||||
|
||||
return all([found_host for found_host in find_host(host, inventory)])
|
||||
return all(find_host(host, inventory))
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ class LookupModule(LookupBase):
|
||||
)
|
||||
if args:
|
||||
raise AnsibleError(
|
||||
"unrecognized arguments to with_sequence: %r" % args.keys()
|
||||
"unrecognized arguments to with_sequence: %r" % list(args.keys())
|
||||
)
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
@@ -130,7 +130,7 @@ class CloudRetry(object):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if isinstance(e, cls.base_class):
|
||||
if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type
|
||||
response_code = cls.status_code_from_exception(e)
|
||||
if cls.found(response_code, catch_extra_error_codes):
|
||||
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
|
||||
|
||||
@@ -259,7 +259,7 @@ class CmdMixin(object):
|
||||
|
||||
def _calculate_args(self, extra_params=None, params=None):
|
||||
def add_arg_formatted_param(_cmd_args, arg_format, _value):
|
||||
args = [x for x in arg_format.to_text(_value)]
|
||||
args = list(arg_format.to_text(_value))
|
||||
return _cmd_args + args
|
||||
|
||||
def find_format(_param):
|
||||
|
||||
@@ -251,13 +251,10 @@ class WapiModule(WapiBase):
|
||||
else:
|
||||
proposed_object[key] = self.module.params[key]
|
||||
|
||||
# If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
|
||||
# If configure_by_dns is set to False and view is 'default', then delete the default dns
|
||||
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
|
||||
and ib_obj_type == NIOS_HOST_RECORD:
|
||||
del proposed_object['view']
|
||||
elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
|
||||
and ib_obj_type == NIOS_HOST_RECORD:
|
||||
self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
|
||||
|
||||
if ib_obj_ref:
|
||||
if len(ib_obj_ref) > 1:
|
||||
@@ -493,12 +490,12 @@ class WapiModule(WapiBase):
|
||||
else:
|
||||
test_obj_filter = dict([('name', old_name)])
|
||||
# get the object reference
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
if ib_obj:
|
||||
obj_filter['name'] = new_name
|
||||
else:
|
||||
test_obj_filter['name'] = new_name
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
update = True
|
||||
return ib_obj, update, new_name
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
@@ -532,7 +529,7 @@ class WapiModule(WapiBase):
|
||||
# check if test_obj_filter is empty copy passed obj_filter
|
||||
else:
|
||||
test_obj_filter = obj_filter
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
@@ -542,7 +539,7 @@ class WapiModule(WapiBase):
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
@@ -552,12 +549,12 @@ class WapiModule(WapiBase):
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
elif (ib_obj_type == NIOS_ZONE):
|
||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
||||
temp = ib_spec['restart_if_needed']
|
||||
del ib_spec['restart_if_needed']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
||||
if not ib_obj:
|
||||
ib_spec['restart_if_needed'] = temp
|
||||
@@ -565,12 +562,12 @@ class WapiModule(WapiBase):
|
||||
# del key 'create_token' as nios_member get_object fails with the key present
|
||||
temp = ib_spec['create_token']
|
||||
del ib_spec['create_token']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
if temp:
|
||||
# reinstate 'create_token' key
|
||||
ib_spec['create_token'] = temp
|
||||
else:
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
return ib_obj, update, new_name
|
||||
|
||||
def on_update(self, proposed_object, ib_spec):
|
||||
|
||||
@@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False):
|
||||
|
||||
if supports_create:
|
||||
common_args.update(
|
||||
key_by=dict(type="list"),
|
||||
key_by=dict(type="list", elements="str"),
|
||||
force_create=dict(type="bool", default=False),
|
||||
)
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ class UTM:
|
||||
raise UTMModuleConfigurationError(
|
||||
"The keys " + to_native(
|
||||
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
|
||||
module.params.keys()))
|
||||
list(module.params.keys())))
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
|
||||
@@ -32,6 +32,7 @@ options:
|
||||
- A list of recipient email ids to notify the alert.
|
||||
This is required for state 'present'
|
||||
type: list
|
||||
elements: str
|
||||
metric:
|
||||
description:
|
||||
- The metric on which to measure the condition that will trigger the alert.
|
||||
@@ -220,7 +221,7 @@ class ClcAlertPolicy:
|
||||
name=dict(),
|
||||
id=dict(),
|
||||
alias=dict(required=True),
|
||||
alert_recipients=dict(type='list'),
|
||||
alert_recipients=dict(type='list', elements='str'),
|
||||
metric=dict(
|
||||
choices=[
|
||||
'cpu',
|
||||
|
||||
@@ -18,6 +18,7 @@ options:
|
||||
- A list of server Ids to deploy the blue print package.
|
||||
type: list
|
||||
required: True
|
||||
elements: str
|
||||
package_id:
|
||||
description:
|
||||
- The package id of the blue print.
|
||||
@@ -164,7 +165,7 @@ class ClcBlueprintPackage:
|
||||
:return: the package dictionary object
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True),
|
||||
server_ids=dict(type='list', elements='str', required=True),
|
||||
package_id=dict(required=True),
|
||||
package_params=dict(type='dict', default={}),
|
||||
wait=dict(default=True), # @FIXME should be bool?
|
||||
|
||||
@@ -29,17 +29,20 @@ options:
|
||||
- The list of source addresses for traffic on the originating firewall.
|
||||
This is required when state is 'present'
|
||||
type: list
|
||||
elements: str
|
||||
destination:
|
||||
description:
|
||||
- The list of destination addresses for traffic on the terminating firewall.
|
||||
This is required when state is 'present'
|
||||
type: list
|
||||
elements: str
|
||||
ports:
|
||||
description:
|
||||
- The list of ports associated with the policy.
|
||||
TCP and UDP can take in single ports or port ranges.
|
||||
- "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
|
||||
type: list
|
||||
elements: str
|
||||
firewall_policy_id:
|
||||
description:
|
||||
- Id of the firewall policy. This is required to update or delete an existing firewall policy
|
||||
@@ -217,9 +220,9 @@ class ClcFirewallPolicy:
|
||||
source_account_alias=dict(required=True),
|
||||
destination_account_alias=dict(),
|
||||
firewall_policy_id=dict(),
|
||||
ports=dict(type='list'),
|
||||
source=dict(type='list'),
|
||||
destination=dict(type='list'),
|
||||
ports=dict(type='list', elements='str'),
|
||||
source=dict(type='list', elements='str'),
|
||||
destination=dict(type='list', elements='str'),
|
||||
wait=dict(default=True), # @FIXME type=bool
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
enabled=dict(default=True, choices=[True, False])
|
||||
|
||||
@@ -53,6 +53,7 @@ options:
|
||||
- A list of nodes that needs to be added to the load balancer pool
|
||||
type: list
|
||||
default: []
|
||||
elements: dict
|
||||
status:
|
||||
description:
|
||||
- The status of the loadbalancer
|
||||
@@ -869,7 +870,7 @@ class ClcLoadBalancer:
|
||||
port=dict(choices=[80, 443]),
|
||||
method=dict(choices=['leastConnection', 'roundRobin']),
|
||||
persistence=dict(choices=['standard', 'sticky']),
|
||||
nodes=dict(type='list', default=[]),
|
||||
nodes=dict(type='list', default=[], elements='dict'),
|
||||
status=dict(default='enabled', choices=['enabled', 'disabled']),
|
||||
state=dict(
|
||||
default='present',
|
||||
|
||||
@@ -18,6 +18,7 @@ options:
|
||||
- A list of server Ids to modify.
|
||||
type: list
|
||||
required: True
|
||||
elements: str
|
||||
cpu:
|
||||
description:
|
||||
- How many CPUs to update on the server
|
||||
@@ -396,7 +397,7 @@ class ClcModifyServer:
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True),
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
cpu=dict(),
|
||||
memory=dict(),
|
||||
|
||||
@@ -23,11 +23,13 @@ options:
|
||||
description:
|
||||
- A list of ports to expose. This is required when state is 'present'
|
||||
type: list
|
||||
elements: int
|
||||
server_ids:
|
||||
description:
|
||||
- A list of servers to create public ips on.
|
||||
type: list
|
||||
required: True
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
|
||||
@@ -193,9 +195,9 @@ class ClcPublicIp(object):
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True),
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
|
||||
ports=dict(type='list'),
|
||||
ports=dict(type='list', elements='int'),
|
||||
wait=dict(type='bool', default=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ options:
|
||||
description:
|
||||
- The list of additional disks for the server
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
add_public_ip:
|
||||
description:
|
||||
@@ -66,6 +67,7 @@ options:
|
||||
- The list of custom fields to set on the server.
|
||||
type: list
|
||||
default: []
|
||||
elements: dict
|
||||
description:
|
||||
description:
|
||||
- The description to set for the server.
|
||||
@@ -111,6 +113,7 @@ options:
|
||||
description:
|
||||
- The list of blue print packages to run on the server after its created.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
password:
|
||||
description:
|
||||
@@ -130,6 +133,7 @@ options:
|
||||
description:
|
||||
- A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
secondary_dns:
|
||||
description:
|
||||
@@ -141,6 +145,7 @@ options:
|
||||
A list of server Ids to insure are started, stopped, or absent.
|
||||
type: list
|
||||
default: []
|
||||
elements: str
|
||||
source_server_password:
|
||||
description:
|
||||
- The password for the source server if a clone is specified.
|
||||
@@ -575,8 +580,8 @@ class ClcServer:
|
||||
type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
|
||||
primary_dns=dict(default=None),
|
||||
secondary_dns=dict(default=None),
|
||||
additional_disks=dict(type='list', default=[]),
|
||||
custom_fields=dict(type='list', default=[]),
|
||||
additional_disks=dict(type='list', default=[], elements='dict'),
|
||||
custom_fields=dict(type='list', default=[], elements='dict'),
|
||||
ttl=dict(default=None),
|
||||
managed_os=dict(type='bool', default=False),
|
||||
description=dict(default=None),
|
||||
@@ -586,7 +591,7 @@ class ClcServer:
|
||||
anti_affinity_policy_name=dict(default=None),
|
||||
alert_policy_id=dict(default=None),
|
||||
alert_policy_name=dict(default=None),
|
||||
packages=dict(type='list', default=[]),
|
||||
packages=dict(type='list', default=[], elements='dict'),
|
||||
state=dict(
|
||||
default='present',
|
||||
choices=[
|
||||
@@ -597,7 +602,7 @@ class ClcServer:
|
||||
count=dict(type='int', default=1),
|
||||
exact_count=dict(type='int', default=None),
|
||||
count_group=dict(),
|
||||
server_ids=dict(type='list', default=[]),
|
||||
server_ids=dict(type='list', default=[], elements='str'),
|
||||
add_public_ip=dict(type='bool', default=False),
|
||||
public_ip_protocol=dict(
|
||||
default='TCP',
|
||||
@@ -605,7 +610,7 @@ class ClcServer:
|
||||
'TCP',
|
||||
'UDP',
|
||||
'ICMP']),
|
||||
public_ip_ports=dict(type='list', default=[]),
|
||||
public_ip_ports=dict(type='list', default=[], elements='dict'),
|
||||
configuration_id=dict(default=None),
|
||||
os_type=dict(default=None,
|
||||
choices=[
|
||||
|
||||
@@ -18,6 +18,7 @@ options:
|
||||
- The list of CLC server Ids.
|
||||
type: list
|
||||
required: True
|
||||
elements: str
|
||||
expiration_days:
|
||||
description:
|
||||
- The number of days to keep the server snapshot before it expires.
|
||||
@@ -330,7 +331,7 @@ class ClcSnapshot:
|
||||
:return: the package dictionary object
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True),
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
expiration_days=dict(default=7, type='int'),
|
||||
wait=dict(default=True),
|
||||
state=dict(
|
||||
|
||||
@@ -730,7 +730,7 @@ class LxcContainerManagement(object):
|
||||
for option_line in container_config:
|
||||
# Look for key in config
|
||||
if keyre.match(option_line):
|
||||
_, _value = option_line.split('=', 1)
|
||||
dummy, _value = option_line.split('=', 1)
|
||||
config_value = ' '.join(_value.split())
|
||||
line_index = container_config.index(option_line)
|
||||
# If the sanitized values don't match replace them
|
||||
@@ -953,7 +953,7 @@ class LxcContainerManagement(object):
|
||||
"""
|
||||
|
||||
self.container = self.get_container_bind()
|
||||
for _ in xrange(timeout):
|
||||
for dummy in xrange(timeout):
|
||||
if self._get_state() != 'running':
|
||||
self.container.start()
|
||||
self.state_change = True
|
||||
@@ -1006,7 +1006,7 @@ class LxcContainerManagement(object):
|
||||
:type timeout: ``int``
|
||||
"""
|
||||
|
||||
for _ in xrange(timeout):
|
||||
for dummy in xrange(timeout):
|
||||
if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
|
||||
break
|
||||
|
||||
@@ -1662,7 +1662,7 @@ def main():
|
||||
),
|
||||
backing_store=dict(
|
||||
type='str',
|
||||
choices=LXC_BACKING_STORE.keys(),
|
||||
choices=list(LXC_BACKING_STORE.keys()),
|
||||
default='dir'
|
||||
),
|
||||
template_options=dict(
|
||||
@@ -1699,7 +1699,7 @@ def main():
|
||||
type='path'
|
||||
),
|
||||
state=dict(
|
||||
choices=LXC_ANSIBLE_STATES.keys(),
|
||||
choices=list(LXC_ANSIBLE_STATES.keys()),
|
||||
default='started'
|
||||
),
|
||||
container_command=dict(
|
||||
@@ -1733,7 +1733,7 @@ def main():
|
||||
type='path',
|
||||
),
|
||||
archive_compression=dict(
|
||||
choices=LXC_COMPRESSION_MAP.keys(),
|
||||
choices=list(LXC_COMPRESSION_MAP.keys()),
|
||||
default='gzip'
|
||||
)
|
||||
),
|
||||
|
||||
@@ -45,6 +45,7 @@ options:
|
||||
description:
|
||||
- Profile to be used by the container
|
||||
type: list
|
||||
elements: str
|
||||
devices:
|
||||
description:
|
||||
- 'The devices for the container
|
||||
@@ -658,12 +659,13 @@ def main():
|
||||
),
|
||||
profiles=dict(
|
||||
type='list',
|
||||
elements='str',
|
||||
),
|
||||
source=dict(
|
||||
type='dict',
|
||||
),
|
||||
state=dict(
|
||||
choices=LXD_ANSIBLE_STATES.keys(),
|
||||
choices=list(LXD_ANSIBLE_STATES.keys()),
|
||||
default='started'
|
||||
),
|
||||
target=dict(
|
||||
|
||||
@@ -370,7 +370,6 @@ EXAMPLES = r'''
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
@@ -381,7 +380,7 @@ try:
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
@@ -506,7 +505,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_host=dict(required=True),
|
||||
api_password=dict(no_log=True),
|
||||
api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])),
|
||||
api_token_id=dict(no_log=True),
|
||||
api_token_secret=dict(no_log=True),
|
||||
api_user=dict(required=True),
|
||||
@@ -538,7 +537,10 @@ def main():
|
||||
description=dict(type='str'),
|
||||
hookscript=dict(type='str'),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
)
|
||||
),
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'password', 'ostemplate'])],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
@@ -585,13 +587,7 @@ def main():
|
||||
module.params[param] = value
|
||||
|
||||
auth_args = {'user': api_user}
|
||||
if not (api_token_id and api_token_secret):
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
if not api_token_id:
|
||||
auth_args['password'] = api_password
|
||||
else:
|
||||
auth_args['token_name'] = api_token_id
|
||||
@@ -623,8 +619,6 @@ def main():
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
|
||||
@@ -31,6 +31,9 @@ options:
|
||||
description:
|
||||
- Pass arbitrary arguments to kvm.
|
||||
- This option is for experts only!
|
||||
- If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
|
||||
option has a default of C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
|
||||
Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
|
||||
type: str
|
||||
api_host:
|
||||
description:
|
||||
@@ -774,46 +777,23 @@ EXAMPLES = '''
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
devices:
|
||||
description: The list of devices created or used.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '
|
||||
{
|
||||
"ide0": "VMS_LVM:vm-115-disk-1",
|
||||
"ide1": "VMs:115/vm-115-disk-3.raw",
|
||||
"virtio0": "VMS_LVM:vm-115-disk-2",
|
||||
"virtio1": "VMs:115/vm-115-disk-1.qcow2",
|
||||
"virtio2": "VMs:115/vm-115-disk-2.raw"
|
||||
}'
|
||||
mac:
|
||||
description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '
|
||||
{
|
||||
"net0": "3E:6E:97:D2:31:9F",
|
||||
"net1": "B6:A1:FC:EF:78:A4"
|
||||
}'
|
||||
vmid:
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 115
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 115
|
||||
status:
|
||||
description:
|
||||
- The current virtual machine status.
|
||||
- Returned only when C(state=current)
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"changed": false,
|
||||
"msg": "VM kropta with vmid = 110 is running",
|
||||
"status": "running"
|
||||
}'
|
||||
description: The current virtual machine status.
|
||||
returned: success, not clone, not absent, not update
|
||||
type: str
|
||||
sample: running
|
||||
msg:
|
||||
description: A short message
|
||||
returned: always
|
||||
type: str
|
||||
sample: "VM kropta with vmid = 110 is running"
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
@@ -826,7 +806,7 @@ try:
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
@@ -992,9 +972,9 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
if searchdomains:
|
||||
kwargs['searchdomain'] = ' '.join(searchdomains)
|
||||
|
||||
# -args and skiplock require root@pam user
|
||||
# -args and skiplock require root@pam user - but can not use api tokens
|
||||
if module.params['api_user'] == "root@pam" and module.params['args'] is None:
|
||||
if not update:
|
||||
if not update and module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
kwargs['args'] = vm_args
|
||||
elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
|
||||
kwargs['args'] = module.params['args']
|
||||
@@ -1059,7 +1039,7 @@ def main():
|
||||
agent=dict(type='bool'),
|
||||
args=dict(type='str'),
|
||||
api_host=dict(required=True),
|
||||
api_password=dict(no_log=True),
|
||||
api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])),
|
||||
api_token_id=dict(no_log=True),
|
||||
api_token_secret=dict(no_log=True),
|
||||
api_user=dict(required=True),
|
||||
@@ -1072,12 +1052,12 @@ def main():
|
||||
cipassword=dict(type='str', no_log=True),
|
||||
citype=dict(type='str', choices=['nocloud', 'configdrive2']),
|
||||
ciuser=dict(type='str'),
|
||||
clone=dict(type='str', default=None),
|
||||
clone=dict(type='str'),
|
||||
cores=dict(type='int'),
|
||||
cpu=dict(type='str'),
|
||||
cpulimit=dict(type='int'),
|
||||
cpuunits=dict(type='int'),
|
||||
delete=dict(type='str', default=None),
|
||||
delete=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
digest=dict(type='str'),
|
||||
force=dict(type='bool'),
|
||||
@@ -1100,7 +1080,7 @@ def main():
|
||||
name=dict(type='str'),
|
||||
nameservers=dict(type='list', elements='str'),
|
||||
net=dict(type='dict'),
|
||||
newid=dict(type='int', default=None),
|
||||
newid=dict(type='int'),
|
||||
node=dict(),
|
||||
numa=dict(type='dict'),
|
||||
numa_enabled=dict(type='bool'),
|
||||
@@ -1136,13 +1116,14 @@ def main():
|
||||
vcpus=dict(type='int'),
|
||||
vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
|
||||
virtio=dict(type='dict'),
|
||||
vmid=dict(type='int', default=None),
|
||||
vmid=dict(type='int'),
|
||||
watchdog=dict(),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
),
|
||||
mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
|
||||
required_one_of=[('name', 'vmid',)],
|
||||
required_if=[('state', 'present', ['node'])]
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
|
||||
required_if=[('state', 'present', ['node'])],
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
@@ -1184,7 +1165,6 @@ def main():
|
||||
cores=1,
|
||||
cpu='kvm64',
|
||||
cpuunits=1000,
|
||||
force=False,
|
||||
format='qcow2',
|
||||
kvm=True,
|
||||
memory=512,
|
||||
@@ -1203,12 +1183,6 @@ def main():
|
||||
|
||||
auth_args = {'user': api_user}
|
||||
if not (api_token_id and api_token_secret):
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
auth_args['password'] = api_password
|
||||
else:
|
||||
auth_args['token_name'] = api_token_id
|
||||
@@ -1251,36 +1225,36 @@ def main():
|
||||
|
||||
# Ensure source VM id exists when cloning
|
||||
if not get_vm(proxmox, vmid):
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
# Ensure the choosen VM name doesn't already exist when cloning
|
||||
if get_vmid(proxmox, name):
|
||||
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name)
|
||||
|
||||
# Ensure the choosen VM id doesn't already exist when cloning
|
||||
if get_vm(proxmox, newid):
|
||||
module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
|
||||
module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name))
|
||||
|
||||
if delete is not None:
|
||||
try:
|
||||
settings(module, proxmox, vmid, node, name, delete=delete)
|
||||
module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
|
||||
module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
|
||||
|
||||
if revert is not None:
|
||||
try:
|
||||
settings(module, proxmox, vmid, node, name, revert=revert)
|
||||
module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
|
||||
module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_vm(proxmox, vmid) and not (update or clone):
|
||||
module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
|
||||
elif get_vmid(proxmox, name) and not (update or clone):
|
||||
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name)
|
||||
elif not (node, name):
|
||||
module.fail_json(msg='node, name is mandatory for creating/updating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
@@ -1353,82 +1327,90 @@ def main():
|
||||
scsi=module.params['scsi'],
|
||||
virtio=module.params['virtio'])
|
||||
if update:
|
||||
module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
|
||||
elif clone is not None:
|
||||
module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||
except Exception as e:
|
||||
if update:
|
||||
module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
||||
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
||||
elif clone is not None:
|
||||
module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
|
||||
module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
|
||||
else:
|
||||
module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid <%s> does not exist in cluster' % vmid)
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
|
||||
|
||||
if start_vm(module, proxmox, vm):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'stopped':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
|
||||
|
||||
if stop_vm(module, proxmox, vm, force=module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'restarted':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
|
||||
|
||||
if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
|
||||
module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'absent':
|
||||
status = {}
|
||||
try:
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False)
|
||||
module.exit_json(changed=False, vmid=vmid)
|
||||
|
||||
proxmox_node = proxmox.nodes(vm[0]['node'])
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
taskid = proxmox_node.qemu.delete(vmid)
|
||||
if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
else:
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
@@ -1439,10 +1421,12 @@ def main():
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
if not name:
|
||||
name = vm[0]['name']
|
||||
current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
|
||||
status['status'] = current
|
||||
if status:
|
||||
module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -152,7 +152,7 @@ try:
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
|
||||
|
||||
def get_template(proxmox, node, storage, content_type, template):
|
||||
@@ -205,7 +205,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_host=dict(required=True),
|
||||
api_password=dict(no_log=True),
|
||||
api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])),
|
||||
api_token_id=dict(no_log=True),
|
||||
api_token_secret=dict(no_log=True),
|
||||
api_user=dict(required=True),
|
||||
@@ -218,7 +218,10 @@ def main():
|
||||
timeout=dict(type='int', default=30),
|
||||
force=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
),
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
required_if=[('state', 'absent', ['template'])]
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
@@ -237,12 +240,6 @@ def main():
|
||||
|
||||
auth_args = {'user': api_user}
|
||||
if not (api_token_id and api_token_secret):
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
auth_args['password'] = api_password
|
||||
else:
|
||||
auth_args['token_name'] = api_token_id
|
||||
@@ -291,9 +288,7 @@ def main():
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
|
||||
@@ -695,15 +695,15 @@ def update_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
if name or description or email or thresholds:
|
||||
@@ -864,15 +864,15 @@ def create_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
_ports = []
|
||||
|
||||
@@ -128,7 +128,7 @@ def update_vcn(virtual_network_client, module):
|
||||
primitive_params_update=["vcn_id"],
|
||||
kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
|
||||
module=module,
|
||||
update_attributes=UpdateVcnDetails().attribute_map.keys(),
|
||||
update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -137,17 +137,13 @@ def main():
|
||||
host = search_by_name(hosts_service, module.params['host'])
|
||||
if host is None:
|
||||
raise Exception("Host '%s' was not found." % module.params['host'])
|
||||
tags.extend([
|
||||
tag for tag in hosts_service.host_service(host.id).tags_service().list()
|
||||
])
|
||||
tags.extend(hosts_service.host_service(host.id).tags_service().list())
|
||||
if module.params['vm']:
|
||||
vms_service = connection.system_service().vms_service()
|
||||
vm = search_by_name(vms_service, module.params['vm'])
|
||||
if vm is None:
|
||||
raise Exception("Vm '%s' was not found." % module.params['vm'])
|
||||
tags.extend([
|
||||
tag for tag in vms_service.vm_service(vm.id).tags_service().list()
|
||||
])
|
||||
tags.extend(vms_service.vm_service(vm.id).tags_service().list())
|
||||
|
||||
if not (module.params['vm'] or module.params['host'] or module.params['name']):
|
||||
tags = all_tags
|
||||
|
||||
@@ -31,20 +31,25 @@ options:
|
||||
auth_token:
|
||||
description:
|
||||
- Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
|
||||
type: str
|
||||
|
||||
count:
|
||||
description:
|
||||
- The number of devices to create. Count number can be included in hostname via the %d string formatter.
|
||||
default: 1
|
||||
type: int
|
||||
|
||||
count_offset:
|
||||
description:
|
||||
- From which number to start the count.
|
||||
default: 1
|
||||
type: int
|
||||
|
||||
device_ids:
|
||||
description:
|
||||
- List of device IDs on which to operate.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
tags:
|
||||
description:
|
||||
@@ -57,10 +62,12 @@ options:
|
||||
facility:
|
||||
description:
|
||||
- Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
|
||||
type: str
|
||||
|
||||
features:
|
||||
description:
|
||||
- Dict with "features" for device creation. See Packet API docs for details.
|
||||
type: dict
|
||||
|
||||
hostnames:
|
||||
description:
|
||||
@@ -68,6 +75,8 @@ options:
|
||||
- If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
|
||||
- If only one hostname, it might be expanded to list if I(count)>1.
|
||||
aliases: [name]
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
locked:
|
||||
description:
|
||||
@@ -79,15 +88,18 @@ options:
|
||||
operating_system:
|
||||
description:
|
||||
- OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
|
||||
type: str
|
||||
|
||||
plan:
|
||||
description:
|
||||
- Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
|
||||
type: str
|
||||
|
||||
project_id:
|
||||
description:
|
||||
- ID of project of the device.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
@@ -96,10 +108,12 @@ options:
|
||||
- If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
|
||||
choices: [present, absent, active, inactive, rebooted]
|
||||
default: present
|
||||
type: str
|
||||
|
||||
user_data:
|
||||
description:
|
||||
- Userdata blob made available to the machine
|
||||
type: str
|
||||
|
||||
wait_for_public_IPv:
|
||||
description:
|
||||
@@ -107,16 +121,21 @@ options:
|
||||
- If set to 4, it will wait until IPv4 is assigned to the instance.
|
||||
- If set to 6, wait until public IPv6 is assigned to the instance.
|
||||
choices: [4,6]
|
||||
type: int
|
||||
|
||||
wait_timeout:
|
||||
description:
|
||||
- How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
|
||||
- If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
|
||||
default: 900
|
||||
type: int
|
||||
|
||||
ipxe_script_url:
|
||||
description:
|
||||
- URL of custom iPXE script for provisioning.
|
||||
- More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
|
||||
type: str
|
||||
|
||||
always_pxe:
|
||||
description:
|
||||
- Persist PXE as the first boot option.
|
||||
@@ -601,10 +620,10 @@ def main():
|
||||
no_log=True),
|
||||
count=dict(type='int', default=1),
|
||||
count_offset=dict(type='int', default=1),
|
||||
device_ids=dict(type='list'),
|
||||
device_ids=dict(type='list', elements='str'),
|
||||
facility=dict(),
|
||||
features=dict(type='dict'),
|
||||
hostnames=dict(type='list', aliases=['name']),
|
||||
hostnames=dict(type='list', elements='str', aliases=['name']),
|
||||
tags=dict(type='list', elements='str'),
|
||||
locked=dict(type='bool', default=False, aliases=['lock']),
|
||||
operating_system=dict(),
|
||||
|
||||
@@ -111,6 +111,7 @@ options:
|
||||
required: false
|
||||
default: []
|
||||
type: list
|
||||
elements: dict
|
||||
changes:
|
||||
description:
|
||||
- "List of fields which should be changed by block itself (doesn't
|
||||
@@ -552,7 +553,7 @@ def main():
|
||||
state=dict(default='present', type='str',
|
||||
choices=['started', 'stopped', 'present', 'absent']),
|
||||
name=dict(required=True, type='str'), description=dict(type='str'),
|
||||
event_handlers=dict(default=list(), type='list'),
|
||||
event_handlers=dict(default=list(), type='list', elements='dict'),
|
||||
changes=dict(default=dict(), type='dict'),
|
||||
cache=dict(default=dict(), type='dict'),
|
||||
validate_certs=dict(default=True, type='bool'))
|
||||
|
||||
@@ -78,6 +78,7 @@ options:
|
||||
description:
|
||||
- A list of disks to add, valid properties are documented in vmadm(1M).
|
||||
type: list
|
||||
elements: dict
|
||||
dns_domain:
|
||||
required: false
|
||||
description:
|
||||
@@ -93,6 +94,7 @@ options:
|
||||
description:
|
||||
- Mount additional filesystems into an OS VM.
|
||||
type: list
|
||||
elements: dict
|
||||
firewall_enabled:
|
||||
required: false
|
||||
description:
|
||||
@@ -198,6 +200,7 @@ options:
|
||||
description:
|
||||
- A list of nics to add, valid properties are documented in vmadm(1M).
|
||||
type: list
|
||||
elements: dict
|
||||
nowait:
|
||||
required: false
|
||||
description:
|
||||
@@ -230,6 +233,7 @@ options:
|
||||
description:
|
||||
- List of resolvers to be put into C(/etc/resolv.conf).
|
||||
type: list
|
||||
elements: dict
|
||||
routes:
|
||||
required: false
|
||||
description:
|
||||
@@ -670,7 +674,6 @@ def main():
|
||||
'zfs_snapshot_limit'
|
||||
],
|
||||
'dict': ['customer_metadata', 'internal_metadata', 'routes'],
|
||||
'list': ['disks', 'nics', 'resolvers', 'filesystems']
|
||||
}
|
||||
|
||||
# Start with the options that are not as trivial as those above.
|
||||
@@ -697,6 +700,10 @@ def main():
|
||||
# Regular strings, however these require additional options.
|
||||
spice_password=dict(type='str', no_log=True),
|
||||
vnc_password=dict(type='str', no_log=True),
|
||||
disks=dict(type='list', elements='dict'),
|
||||
nics=dict(type='list', elements='dict'),
|
||||
resolvers=dict(type='list', elements='dict'),
|
||||
filesystems=dict(type='list', elements='dict'),
|
||||
)
|
||||
|
||||
# Add our 'simple' options to options dict.
|
||||
|
||||
@@ -115,6 +115,7 @@ options:
|
||||
- List of disk sizes to be assigned to new virtual instance.
|
||||
default: [ 25 ]
|
||||
type: list
|
||||
elements: int
|
||||
os_code:
|
||||
description:
|
||||
- OS Code to be used for new virtual instance.
|
||||
@@ -140,6 +141,7 @@ options:
|
||||
description:
|
||||
- List of ssh keys by their Id to be assigned to a virtual instance.
|
||||
type: list
|
||||
elements: str
|
||||
post_uri:
|
||||
description:
|
||||
- URL of a post provisioning script to be loaded and executed on virtual instance.
|
||||
@@ -396,13 +398,13 @@ def main():
|
||||
cpus=dict(type='int', choices=CPU_SIZES),
|
||||
memory=dict(type='int', choices=MEMORY_SIZES),
|
||||
flavor=dict(type='str'),
|
||||
disks=dict(type='list', default=[25]),
|
||||
disks=dict(type='list', elements='int', default=[25]),
|
||||
os_code=dict(type='str'),
|
||||
image_id=dict(type='str'),
|
||||
nic_speed=dict(type='int', choices=NIC_SPEEDS),
|
||||
public_vlan=dict(type='str'),
|
||||
private_vlan=dict(type='str'),
|
||||
ssh_keys=dict(type='list', default=[]),
|
||||
ssh_keys=dict(type='list', elements='str', default=[]),
|
||||
post_uri=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=STATES),
|
||||
wait=dict(type='bool', default=True),
|
||||
|
||||
@@ -1305,10 +1305,8 @@ def expand_tags(eg_launchspec, tags):
|
||||
|
||||
for tag in tags:
|
||||
eg_tag = spotinst.aws_elastigroup.Tag()
|
||||
if tag.keys():
|
||||
eg_tag.tag_key = tag.keys()[0]
|
||||
if tag.values():
|
||||
eg_tag.tag_value = tag.values()[0]
|
||||
if tag:
|
||||
eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0]
|
||||
|
||||
eg_tags.append(eg_tag)
|
||||
|
||||
|
||||
@@ -312,12 +312,11 @@ def run():
|
||||
)
|
||||
|
||||
changed = False
|
||||
nomad_jobs = list()
|
||||
result = list()
|
||||
try:
|
||||
job_list = nomad_client.jobs.get_jobs()
|
||||
for job in job_list:
|
||||
nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
|
||||
result = nomad_jobs
|
||||
result.append(nomad_client.job.get_job(job.get('ID')))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
@@ -115,7 +115,6 @@ EXAMPLES = '''
|
||||
local_only: False
|
||||
|
||||
# example playbook:
|
||||
---
|
||||
- name: Upgrade aerospike
|
||||
hosts: all
|
||||
become: true
|
||||
@@ -338,7 +337,7 @@ class Migrations:
|
||||
namespace_tx = \
|
||||
int(namespace_stats[self.module.params['migrate_tx_key']])
|
||||
namespace_rx = \
|
||||
int(namespace_stats[self.module.params['migrate_tx_key']])
|
||||
int(namespace_stats[self.module.params['migrate_rx_key']])
|
||||
except KeyError:
|
||||
self.module.fail_json(
|
||||
msg="Did not find partition remaining key:" +
|
||||
|
||||
@@ -22,11 +22,13 @@ options:
|
||||
description:
|
||||
- Name of the plugin to install.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Desired state of a plugin.
|
||||
choices: ["present", "absent"]
|
||||
default: present
|
||||
type: str
|
||||
src:
|
||||
description:
|
||||
- Optionally set the source location to retrieve the plugin from. This can be a file://
|
||||
@@ -38,16 +40,19 @@ options:
|
||||
effect.
|
||||
- For ES 1.x use url.
|
||||
required: False
|
||||
type: str
|
||||
url:
|
||||
description:
|
||||
- Set exact URL to download the plugin from (Only works for ES 1.x).
|
||||
- For ES 2.x and higher, use src.
|
||||
required: False
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- "Timeout setting: 30s, 1m, 1h..."
|
||||
- Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
|
||||
default: 1m
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
|
||||
@@ -57,20 +62,25 @@ options:
|
||||
description:
|
||||
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
|
||||
- The default changed in Ansible 2.4 to None.
|
||||
type: path
|
||||
plugin_dir:
|
||||
description:
|
||||
- Your configured plugin directory specified in Elasticsearch
|
||||
default: /usr/share/elasticsearch/plugins/
|
||||
type: path
|
||||
proxy_host:
|
||||
description:
|
||||
- Proxy host to use during plugin installation
|
||||
type: str
|
||||
proxy_port:
|
||||
description:
|
||||
- Proxy port to use during plugin installation
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- Version of the plugin to be installed.
|
||||
If plugin exists with previous version, it will NOT be updated
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -241,7 +251,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
|
||||
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
|
||||
src=dict(default=None),
|
||||
url=dict(default=None),
|
||||
timeout=dict(default="1m"),
|
||||
|
||||
@@ -22,31 +22,38 @@ options:
|
||||
description:
|
||||
- Name of the plugin to install.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Desired state of a plugin.
|
||||
choices: ["present", "absent"]
|
||||
default: present
|
||||
type: str
|
||||
url:
|
||||
description:
|
||||
- Set exact URL to download the plugin from.
|
||||
- For local file, prefix its absolute path with file://
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- "Timeout setting: 30s, 1m, 1h etc."
|
||||
default: 1m
|
||||
type: str
|
||||
plugin_bin:
|
||||
description:
|
||||
- Location of the Kibana binary.
|
||||
default: /opt/kibana/bin/kibana
|
||||
type: path
|
||||
plugin_dir:
|
||||
description:
|
||||
- Your configured plugin directory specified in Kibana.
|
||||
default: /opt/kibana/installedPlugins/
|
||||
type: path
|
||||
version:
|
||||
description:
|
||||
- Version of the plugin to be installed.
|
||||
- If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- Delete and re-install the plugin. Can be useful for plugins update.
|
||||
@@ -209,7 +216,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
|
||||
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
|
||||
url=dict(default=None),
|
||||
timeout=dict(default="1m"),
|
||||
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
|
||||
|
||||
@@ -23,28 +23,37 @@ options:
|
||||
description:
|
||||
- The command you would like to perform against the cluster.
|
||||
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
|
||||
type: str
|
||||
config_dir:
|
||||
description:
|
||||
- The path to the riak configuration directory
|
||||
default: /etc/riak
|
||||
type: path
|
||||
http_conn:
|
||||
description:
|
||||
- The ip address and port that is listening for Riak HTTP queries
|
||||
default: 127.0.0.1:8098
|
||||
type: str
|
||||
target_node:
|
||||
description:
|
||||
- The target node for certain operations (join, ping)
|
||||
default: riak@127.0.0.1
|
||||
type: str
|
||||
wait_for_handoffs:
|
||||
description:
|
||||
- Number of seconds to wait for handoffs to complete.
|
||||
type: int
|
||||
default: 0
|
||||
wait_for_ring:
|
||||
description:
|
||||
- Number of seconds to wait for all nodes to agree on the ring.
|
||||
type: int
|
||||
default: 0
|
||||
wait_for_service:
|
||||
description:
|
||||
- Waits for a riak service to come online before continuing.
|
||||
choices: ['kv']
|
||||
type: str
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated. This should only be used
|
||||
@@ -93,8 +102,8 @@ def main():
|
||||
config_dir=dict(default='/etc/riak', type='path'),
|
||||
http_conn=dict(required=False, default='127.0.0.1:8098'),
|
||||
target_node=dict(default='riak@127.0.0.1', required=False),
|
||||
wait_for_handoffs=dict(default=False, type='int'),
|
||||
wait_for_ring=dict(default=False, type='int'),
|
||||
wait_for_handoffs=dict(default=0, type='int'),
|
||||
wait_for_ring=dict(default=0, type='int'),
|
||||
wait_for_service=dict(
|
||||
required=False, default=None, choices=['kv']),
|
||||
validate_certs=dict(default=True, type='bool'))
|
||||
|
||||
@@ -926,7 +926,7 @@ class PgClusterInfo(object):
|
||||
raw = raw.split()[1].split('.')
|
||||
self.pg_info["version"] = dict(
|
||||
major=int(raw[0]),
|
||||
minor=int(raw[1]),
|
||||
minor=int(raw[1].rstrip(',')),
|
||||
)
|
||||
|
||||
def get_recovery_state(self):
|
||||
|
||||
@@ -116,7 +116,7 @@ class PgPing(object):
|
||||
raw = raw.split()[1].split('.')
|
||||
self.version = dict(
|
||||
major=int(raw[0]),
|
||||
minor=int(raw[1]),
|
||||
minor=int(raw[1].rstrip(',')),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -171,6 +171,27 @@ EXAMPLES = r'''
|
||||
search_path:
|
||||
- app1
|
||||
- public
|
||||
|
||||
# If you use a variable in positional_args / named_args that can
|
||||
# be undefined and you wish to set it as NULL, the constructions like
|
||||
# "{{ my_var if (my_var is defined) else none | default(none) }}"
|
||||
# will not work as expected substituting an empty string instead of NULL.
|
||||
# If possible, we suggest to use Ansible's DEFAULT_JINJA2_NATIVE configuration
|
||||
# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
|
||||
# Enabling it fixes this problem. If you cannot enable it, the following workaround
|
||||
# can be used.
|
||||
# You should precheck such a value and define it as NULL when undefined.
|
||||
# For example:
|
||||
- name: When undefined, set to NULL
|
||||
set_fact:
|
||||
my_var: NULL
|
||||
when: my_var is undefined
|
||||
# Then:
|
||||
- name: Insert a value using positional arguments
|
||||
community.postgresql.postgresql_query:
|
||||
query: INSERT INTO test_table (col1) VALUES (%s)
|
||||
positional_args:
|
||||
- '{{ my_var }}'
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
@@ -222,6 +243,9 @@ rowcount:
|
||||
sample: 5
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import decimal
|
||||
|
||||
try:
|
||||
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
|
||||
from psycopg2.extras import DictCursor
|
||||
@@ -389,8 +413,20 @@ def main():
|
||||
if cursor.rowcount > 0:
|
||||
rowcount += cursor.rowcount
|
||||
|
||||
query_result = []
|
||||
try:
|
||||
query_result = [dict(row) for row in cursor.fetchall()]
|
||||
for row in cursor.fetchall():
|
||||
# Ansible engine does not support decimals.
|
||||
# An explicit conversion is required on the module's side
|
||||
row = dict(row)
|
||||
for (key, val) in iteritems(row):
|
||||
if isinstance(val, decimal.Decimal):
|
||||
row[key] = float(val)
|
||||
|
||||
elif isinstance(val, datetime.timedelta):
|
||||
row[key] = str(val)
|
||||
|
||||
query_result.append(row)
|
||||
|
||||
except Psycopg2ProgrammingError as e:
|
||||
if to_native(e) == 'no results to fetch':
|
||||
|
||||
@@ -180,7 +180,7 @@ from ansible.module_utils._text import to_native
|
||||
PG_REQ_VER = 90400
|
||||
|
||||
# To allow to set value like 1mb instead of 1MB, etc:
|
||||
POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
|
||||
LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb")
|
||||
|
||||
# ===========================================
|
||||
# PostgreSQL module specific support methods.
|
||||
@@ -199,6 +199,11 @@ def param_get(cursor, module, name):
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
|
||||
|
||||
if not info:
|
||||
module.fail_json(msg="No such parameter: %s. "
|
||||
"Please check its spelling or presence in your PostgreSQL version "
|
||||
"(https://www.postgresql.org/docs/current/runtime-config.html)" % name)
|
||||
|
||||
raw_val = info[0][1]
|
||||
unit = info[0][2]
|
||||
context = info[0][3]
|
||||
@@ -233,32 +238,55 @@ def pretty_to_bytes(pretty_val):
|
||||
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
|
||||
# Otherwise it returns the passed argument.
|
||||
|
||||
val_in_bytes = None
|
||||
|
||||
if 'kB' in pretty_val:
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024
|
||||
|
||||
elif 'MB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024
|
||||
|
||||
elif 'GB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024
|
||||
|
||||
elif 'TB' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
|
||||
|
||||
elif 'B' in pretty_val.upper():
|
||||
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
|
||||
val_in_bytes = num_part
|
||||
|
||||
else:
|
||||
# It's sometimes possible to have an empty values
|
||||
if not pretty_val:
|
||||
return pretty_val
|
||||
|
||||
return val_in_bytes
|
||||
# If the first char is not a digit, it does not make sense
|
||||
# to parse further, so just return a passed value
|
||||
if not pretty_val[0].isdigit():
|
||||
return pretty_val
|
||||
|
||||
# If the last char is not an alphabetical symbol, it means that
|
||||
# it does not contain any suffixes, so no sense to parse further
|
||||
if not pretty_val[-1].isalpha():
|
||||
return pretty_val
|
||||
|
||||
# Extract digits
|
||||
num_part = []
|
||||
for c in pretty_val:
|
||||
# When we reach the first non-digit element,
|
||||
# e.g. in 1024kB, stop iterating
|
||||
if not c.isdigit():
|
||||
break
|
||||
else:
|
||||
num_part.append(c)
|
||||
|
||||
num_part = ''.join(num_part)
|
||||
|
||||
val_in_bytes = None
|
||||
|
||||
if len(pretty_val) >= 2:
|
||||
if 'kB' in pretty_val[-2:]:
|
||||
val_in_bytes = num_part * 1024
|
||||
|
||||
elif 'MB' in pretty_val[-2:]:
|
||||
val_in_bytes = num_part * 1024 * 1024
|
||||
|
||||
elif 'GB' in pretty_val[-2:]:
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024
|
||||
|
||||
elif 'TB' in pretty_val[-2:]:
|
||||
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
|
||||
|
||||
# For cases like "1B"
|
||||
if not val_in_bytes and 'B' in pretty_val[-1]:
|
||||
val_in_bytes = num_part
|
||||
|
||||
if val_in_bytes is not None:
|
||||
return val_in_bytes
|
||||
else:
|
||||
return pretty_val
|
||||
|
||||
|
||||
def param_set(cursor, module, name, value, context):
|
||||
@@ -308,11 +336,14 @@ def main():
|
||||
# Check input for potentially dangerous elements:
|
||||
check_input(module, name, value, session_role)
|
||||
|
||||
# Allow to pass values like 1mb instead of 1MB, etc:
|
||||
if value:
|
||||
for unit in POSSIBLE_SIZE_UNITS:
|
||||
if value[:-2].isdigit() and unit in value[-2:]:
|
||||
value = value.upper()
|
||||
# Convert a value like 1mb (Postgres does not support) to 1MB, etc:
|
||||
if len(value) > 2 and value[:-2].isdigit() and value[-2:] in LOWERCASE_SIZE_UNITS:
|
||||
value = value.upper()
|
||||
|
||||
# Convert a value like 1b (Postgres does not support) to 1B:
|
||||
elif len(value) > 1 and ('b' in value[-1] and value[:-1].isdigit()):
|
||||
value = value.upper()
|
||||
|
||||
if value is not None and reset:
|
||||
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
|
||||
|
||||
@@ -107,6 +107,7 @@ options:
|
||||
no_password_changes:
|
||||
description:
|
||||
- If C(yes), does not inspect the database for password changes.
|
||||
If the user already exists, skips all password related checks.
|
||||
Useful when C(pg_authid) is not accessible (such as in AWS RDS).
|
||||
Otherwise, makes password changes as necessary.
|
||||
default: no
|
||||
@@ -156,6 +157,10 @@ notes:
|
||||
On the previous versions the whole hashed string is used as a password.
|
||||
- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
|
||||
C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
|
||||
- On some systems (such as AWS RDS), C(pg_authid) is not accessible, thus, the module cannot compare
|
||||
the current and desired C(password). In this case, the module assumes that the passwords are
|
||||
different and changes it reporting that the state has been changed.
|
||||
To skip all password related checks for existing users, use I(no_password_changes=yes).
|
||||
- Supports ``check_mode``.
|
||||
seealso:
|
||||
- module: community.general.postgresql_privs
|
||||
|
||||
@@ -14,11 +14,11 @@ short_description: Updates Vertica configuration parameters.
|
||||
description:
|
||||
- Updates Vertica configuration parameters.
|
||||
options:
|
||||
name:
|
||||
parameter:
|
||||
description:
|
||||
- Name of the parameter to update.
|
||||
required: true
|
||||
aliases: [parameter]
|
||||
aliases: [name]
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
|
||||
@@ -21,25 +21,30 @@ options:
|
||||
description:
|
||||
- Name of the cluster running the schema.
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
Database port to connect to.
|
||||
default: 5433
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of the database running the schema.
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
default: dbadmin
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||
to the C(dbadmin) account on the host.
|
||||
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
|
||||
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
|
||||
that C(unixODBC) and C(pyodbc) are installed on the host and properly configured.
|
||||
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
|
||||
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
|
||||
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
|
||||
|
||||
@@ -15,37 +15,46 @@ short_description: Adds or removes Vertica database roles and assigns roles to t
|
||||
description:
|
||||
- Adds or removes Vertica database role and, optionally, assign other roles.
|
||||
options:
|
||||
name:
|
||||
role:
|
||||
description:
|
||||
- Name of the role to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
aliases: ['name']
|
||||
assigned_roles:
|
||||
description:
|
||||
- Comma separated list of roles to assign to the role.
|
||||
aliases: ['assigned_role']
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of the Vertica database.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Name of the Vertica cluster.
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
default: dbadmin
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||
to the C(dbadmin) account on the host.
|
||||
@@ -168,11 +177,11 @@ def main():
|
||||
role=dict(required=True, aliases=['name']),
|
||||
assigned_roles=dict(default=None, aliases=['assigned_role']),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
db=dict(default=None),
|
||||
db=dict(),
|
||||
cluster=dict(default='localhost'),
|
||||
port=dict(default='5433'),
|
||||
login_user=dict(default='dbadmin'),
|
||||
login_password=dict(default=None, no_log=True),
|
||||
login_password=dict(no_log=True),
|
||||
), supports_check_mode=True)
|
||||
|
||||
if not pyodbc_found:
|
||||
|
||||
@@ -20,44 +20,55 @@ description:
|
||||
will fail and only remove roles created for the schema if they have
|
||||
no dependencies.
|
||||
options:
|
||||
name:
|
||||
schema:
|
||||
description:
|
||||
- Name of the schema to add or remove.
|
||||
required: true
|
||||
aliases: ['name']
|
||||
type: str
|
||||
usage_roles:
|
||||
description:
|
||||
- Comma separated list of roles to create and grant usage access to the schema.
|
||||
aliases: ['usage_role']
|
||||
type: str
|
||||
create_roles:
|
||||
description:
|
||||
- Comma separated list of roles to create and grant usage and create access to the schema.
|
||||
aliases: ['create_role']
|
||||
type: str
|
||||
owner:
|
||||
description:
|
||||
- Name of the user to set as owner of the schema.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to create C(present), or drop C(absent) a schema.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of the Vertica database.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Name of the Vertica cluster.
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
default: dbadmin
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||
to the C(dbadmin) account on the host.
|
||||
@@ -230,15 +241,15 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
schema=dict(required=True, aliases=['name']),
|
||||
usage_roles=dict(default=None, aliases=['usage_role']),
|
||||
create_roles=dict(default=None, aliases=['create_role']),
|
||||
owner=dict(default=None),
|
||||
usage_roles=dict(aliases=['usage_role']),
|
||||
create_roles=dict(aliases=['create_role']),
|
||||
owner=dict(),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
db=dict(default=None),
|
||||
db=dict(),
|
||||
cluster=dict(default='localhost'),
|
||||
port=dict(default='5433'),
|
||||
login_user=dict(default='dbadmin'),
|
||||
login_password=dict(default=None, no_log=True),
|
||||
login_password=dict(no_log=True),
|
||||
), supports_check_mode=True)
|
||||
|
||||
if not pyodbc_found:
|
||||
|
||||
@@ -17,22 +17,27 @@ description:
|
||||
- In such a situation, if the module tries to remove the user it
|
||||
will fail and only remove roles granted to the user.
|
||||
options:
|
||||
name:
|
||||
user:
|
||||
description:
|
||||
- Name of the user to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
aliases: ['name']
|
||||
profile:
|
||||
description:
|
||||
- Sets the user's profile.
|
||||
type: str
|
||||
resource_pool:
|
||||
description:
|
||||
- Sets the user's resource pool.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The user's password encrypted by the MD5 algorithm.
|
||||
- The password must be generated with the format C("md5" + md5[password + username]),
|
||||
resulting in a total of 35 characters. An easy way to do this is by querying
|
||||
the Vertica database with select 'md5'||md5('<user_password><user_name>').
|
||||
type: str
|
||||
expired:
|
||||
description:
|
||||
- Sets the user's password expiration.
|
||||
@@ -46,29 +51,36 @@ options:
|
||||
description:
|
||||
- Comma separated list of roles to assign to the user.
|
||||
aliases: ['role']
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
|
||||
choices: ['present', 'absent', 'locked']
|
||||
default: present
|
||||
type: str
|
||||
db:
|
||||
description:
|
||||
- Name of the Vertica database.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Name of the Vertica cluster.
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Vertica cluster port to connect to.
|
||||
default: 5433
|
||||
type: str
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with.
|
||||
default: dbadmin
|
||||
type: str
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with.
|
||||
type: str
|
||||
notes:
|
||||
- The default authentication assumes that you are either logging in as or sudo'ing
|
||||
to the C(dbadmin) account on the host.
|
||||
@@ -282,18 +294,18 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
user=dict(required=True, aliases=['name']),
|
||||
profile=dict(default=None),
|
||||
resource_pool=dict(default=None),
|
||||
password=dict(default=None, no_log=True),
|
||||
expired=dict(type='bool', default=None),
|
||||
ldap=dict(type='bool', default=None),
|
||||
roles=dict(default=None, aliases=['role']),
|
||||
profile=dict(),
|
||||
resource_pool=dict(),
|
||||
password=dict(no_log=True),
|
||||
expired=dict(type='bool'),
|
||||
ldap=dict(type='bool'),
|
||||
roles=dict(aliases=['role']),
|
||||
state=dict(default='present', choices=['absent', 'present', 'locked']),
|
||||
db=dict(default=None),
|
||||
db=dict(),
|
||||
cluster=dict(default='localhost'),
|
||||
port=dict(default='5433'),
|
||||
login_user=dict(default='dbadmin'),
|
||||
login_password=dict(default=None, no_log=True),
|
||||
login_password=dict(no_log=True),
|
||||
), supports_check_mode=True)
|
||||
|
||||
if not pyodbc_found:
|
||||
|
||||
@@ -59,8 +59,8 @@ options:
|
||||
executable:
|
||||
description:
|
||||
- The path to the C(7z) executable to use for extracting files from the ISO.
|
||||
- If not provided, it will assume the value C(7z).
|
||||
type: path
|
||||
default: '7z'
|
||||
notes:
|
||||
- Only the file checksum (content) is taken into account when extracting files
|
||||
from the ISO image. If C(force=no), only checks the presence of the file.
|
||||
@@ -101,7 +101,8 @@ def main():
|
||||
image=dict(type='path', required=True, aliases=['path', 'src']),
|
||||
dest=dict(type='path', required=True),
|
||||
files=dict(type='list', elements='str', required=True),
|
||||
force=dict(type='bool', default=True, aliases=['thirsty']),
|
||||
force=dict(type='bool', default=True, aliases=['thirsty'],
|
||||
deprecated_aliases=[dict(name='thirsty', version='3.0.0', collection_name='community.general')]),
|
||||
executable=dict(type='path'), # No default on purpose
|
||||
),
|
||||
supports_check_mode=True,
|
||||
@@ -112,10 +113,6 @@ def main():
|
||||
force = module.params['force']
|
||||
executable = module.params['executable']
|
||||
|
||||
if module.params.get('thirsty'):
|
||||
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
dest=dest,
|
||||
|
||||
@@ -66,6 +66,7 @@ options:
|
||||
or a hash where the key is an element name and the value is the element value.
|
||||
- This parameter requires C(xpath) to be set.
|
||||
type: list
|
||||
elements: raw
|
||||
set_children:
|
||||
description:
|
||||
- Set the child-element(s) of a selected element for a given C(xpath).
|
||||
@@ -73,6 +74,7 @@ options:
|
||||
- Child elements must be specified as in C(add_children).
|
||||
- This parameter requires C(xpath) to be set.
|
||||
type: list
|
||||
elements: raw
|
||||
count:
|
||||
description:
|
||||
- Search for a given C(xpath) and provide the count of any matches.
|
||||
@@ -809,8 +811,8 @@ def main():
|
||||
state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
|
||||
value=dict(type='raw'),
|
||||
attribute=dict(type='raw'),
|
||||
add_children=dict(type='list'),
|
||||
set_children=dict(type='list'),
|
||||
add_children=dict(type='list', elements='raw'),
|
||||
set_children=dict(type='list', elements='raw'),
|
||||
count=dict(type='bool', default=False),
|
||||
print_match=dict(type='bool', default=False),
|
||||
pretty_print=dict(type='bool', default=False),
|
||||
|
||||
@@ -137,6 +137,7 @@ options:
|
||||
aliases:
|
||||
- defaultRoles
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
redirect_uris:
|
||||
description:
|
||||
@@ -145,6 +146,7 @@ options:
|
||||
aliases:
|
||||
- redirectUris
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
web_origins:
|
||||
description:
|
||||
@@ -153,6 +155,7 @@ options:
|
||||
aliases:
|
||||
- webOrigins
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
not_before:
|
||||
description:
|
||||
@@ -708,9 +711,9 @@ def main():
|
||||
client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
|
||||
secret=dict(type='str', no_log=True),
|
||||
registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
|
||||
default_roles=dict(type='list', aliases=['defaultRoles']),
|
||||
redirect_uris=dict(type='list', aliases=['redirectUris']),
|
||||
web_origins=dict(type='list', aliases=['webOrigins']),
|
||||
default_roles=dict(type='list', elements='str', aliases=['defaultRoles']),
|
||||
redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']),
|
||||
web_origins=dict(type='list', elements='str', aliases=['webOrigins']),
|
||||
not_before=dict(type='int', aliases=['notBefore']),
|
||||
bearer_only=dict(type='bool', aliases=['bearerOnly']),
|
||||
consent_required=dict(type='bool', aliases=['consentRequired']),
|
||||
|
||||
@@ -34,6 +34,7 @@ description:
|
||||
options:
|
||||
search_terms:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- A list of one or more search terms.
|
||||
- Each search term can either be a simple string or it can be a dictionary for more control.
|
||||
@@ -372,7 +373,7 @@ def main():
|
||||
master_password=dict(required=True, type='str', no_log=True),
|
||||
secret_key=dict(type='str', no_log=True),
|
||||
), default=None),
|
||||
search_terms=dict(required=True, type='list')
|
||||
search_terms=dict(required=True, type='list', elements='dict'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -22,46 +22,56 @@ options:
|
||||
- The path to the bin directory of OpenDJ.
|
||||
required: false
|
||||
default: /opt/opendj/bin
|
||||
type: path
|
||||
hostname:
|
||||
description:
|
||||
- The hostname of the OpenDJ server.
|
||||
required: true
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- The Admin port on which the OpenDJ instance is available.
|
||||
required: true
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- The username to connect to.
|
||||
required: false
|
||||
default: cn=Directory Manager
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password for the cn=Directory Manager user.
|
||||
- Either password or passwordfile is needed.
|
||||
required: false
|
||||
type: str
|
||||
passwordfile:
|
||||
description:
|
||||
- Location to the password file which holds the password for the cn=Directory Manager user.
|
||||
- Either password or passwordfile is needed.
|
||||
required: false
|
||||
type: path
|
||||
backend:
|
||||
description:
|
||||
- The name of the backend on which the property needs to be updated.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The configuration setting to update.
|
||||
required: true
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- The value for the configuration item.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- If configuration needs to be added/updated
|
||||
required: false
|
||||
default: "present"
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -63,6 +63,7 @@ options:
|
||||
required: false
|
||||
links:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- See examples
|
||||
'''
|
||||
@@ -155,7 +156,7 @@ def main():
|
||||
description=dict(required=False),
|
||||
start_time=dict(required=False, default=None, type='int'),
|
||||
end_time=dict(required=False, default=None, type='int'),
|
||||
links=dict(type='list')
|
||||
links=dict(type='list', elements='dict')
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -138,11 +138,11 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
|
||||
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
|
||||
plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
|
||||
proxy_host=dict(default=None),
|
||||
proxy_port=dict(default=None),
|
||||
version=dict(default=None)
|
||||
proxy_host=dict(),
|
||||
proxy_port=dict(),
|
||||
version=dict()
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -47,6 +47,7 @@ options:
|
||||
- ID of user making the request. Only needed when creating a maintenance_window.
|
||||
service:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A comma separated list of PagerDuty service IDs.
|
||||
aliases: [ services ]
|
||||
@@ -233,7 +234,7 @@ def main():
|
||||
name=dict(required=False),
|
||||
user=dict(required=False),
|
||||
token=dict(required=True, no_log=True),
|
||||
service=dict(required=False, type='list', aliases=["services"]),
|
||||
service=dict(required=False, type='list', elements='str', aliases=["services"]),
|
||||
window_id=dict(required=False),
|
||||
requester_id=dict(required=False),
|
||||
hours=dict(default='1', required=False), # @TODO change to int?
|
||||
|
||||
@@ -59,11 +59,13 @@ options:
|
||||
default: "https://api.status.io"
|
||||
components:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The given name of your component (server name)
|
||||
aliases: ['component']
|
||||
containers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The given name of your container (data center)
|
||||
aliases: ['container']
|
||||
@@ -339,9 +341,9 @@ def main():
|
||||
state=dict(required=False, default='present',
|
||||
choices=['present', 'absent']),
|
||||
url=dict(default='https://api.status.io', required=False),
|
||||
components=dict(type='list', required=False, default=None,
|
||||
components=dict(type='list', elements='str', required=False, default=None,
|
||||
aliases=['component']),
|
||||
containers=dict(type='list', required=False, default=None,
|
||||
containers=dict(type='list', elements='str', required=False, default=None,
|
||||
aliases=['container']),
|
||||
all_infrastructure_affected=dict(type='bool', default=False,
|
||||
required=False),
|
||||
@@ -423,7 +425,7 @@ def main():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
(rc, _, error) = create_maintenance(
|
||||
(rc, dummy, error) = create_maintenance(
|
||||
auth_headers, url, statuspage, host_ids,
|
||||
all_infrastructure_affected, automation,
|
||||
title, desc, returned_date, maintenance_notify_now,
|
||||
@@ -449,7 +451,7 @@ def main():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
(rc, _, error) = delete_maintenance(
|
||||
(rc, dummy, error) = delete_maintenance(
|
||||
auth_headers, url, statuspage, maintenance_id)
|
||||
if rc == 0:
|
||||
module.exit_json(
|
||||
|
||||
@@ -40,6 +40,7 @@ options:
|
||||
description:
|
||||
- List of records to ensure they either exist or do not exist.
|
||||
type: list
|
||||
elements: str
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create.
|
||||
@@ -167,7 +168,7 @@ def main():
|
||||
account_api_token=dict(type='str', no_log=True),
|
||||
domain=dict(type='str'),
|
||||
record=dict(type='str'),
|
||||
record_ids=dict(type='list'),
|
||||
record_ids=dict(type='list', elements='str'),
|
||||
type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
|
||||
'POOL']),
|
||||
ttl=dict(type='int', default=3600),
|
||||
|
||||
@@ -323,7 +323,7 @@ def normalize_ip(ip, ip_version):
|
||||
if '/' in ip:
|
||||
ip, range = ip.split('/')
|
||||
else:
|
||||
ip, range = ip, ''
|
||||
range = ''
|
||||
ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
|
||||
if range == '':
|
||||
range = '32' if ip_version.lower() == 'ipv4' else '128'
|
||||
|
||||
@@ -28,51 +28,64 @@ options:
|
||||
- Manage DNS record.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
server:
|
||||
description:
|
||||
- Apply DNS modification on this server, specified by IPv4 or IPv6 address.
|
||||
required: true
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Use this TCP port when connecting to C(server).
|
||||
default: 53
|
||||
type: int
|
||||
key_name:
|
||||
description:
|
||||
- Use TSIG key name to authenticate against DNS C(server)
|
||||
type: str
|
||||
key_secret:
|
||||
description:
|
||||
- Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
|
||||
type: str
|
||||
key_algorithm:
|
||||
description:
|
||||
- Specify key algorithm used by C(key_secret).
|
||||
choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
|
||||
'hmac-sha512']
|
||||
default: 'hmac-md5'
|
||||
type: str
|
||||
zone:
|
||||
description:
|
||||
- DNS record will be modified on this C(zone).
|
||||
- When omitted DNS will be queried to attempt finding the correct zone.
|
||||
- Starting with Ansible 2.7 this parameter is optional.
|
||||
type: str
|
||||
record:
|
||||
description:
|
||||
- Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
|
||||
required: true
|
||||
type: str
|
||||
type:
|
||||
description:
|
||||
- Sets the record type.
|
||||
default: 'A'
|
||||
type: str
|
||||
ttl:
|
||||
description:
|
||||
- Sets the record TTL.
|
||||
default: 3600
|
||||
type: int
|
||||
value:
|
||||
description:
|
||||
- Sets the record value.
|
||||
type: list
|
||||
elements: str
|
||||
protocol:
|
||||
description:
|
||||
- Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
|
||||
default: 'tcp'
|
||||
choices: ['tcp', 'udp']
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -432,7 +445,7 @@ def main():
|
||||
record=dict(required=True, type='str'),
|
||||
type=dict(required=False, default='A', type='str'),
|
||||
ttl=dict(required=False, default=3600, type='int'),
|
||||
value=dict(required=False, default=None, type='list'),
|
||||
value=dict(required=False, default=None, type='list', elements='str'),
|
||||
protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
|
||||
),
|
||||
supports_check_mode=True
|
||||
|
||||
@@ -63,6 +63,7 @@ options:
|
||||
description:
|
||||
- Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
ddns:
|
||||
description:
|
||||
@@ -282,7 +283,7 @@ def main():
|
||||
hostname=dict(type='str', aliases=['name']),
|
||||
ip=dict(type='str'),
|
||||
ddns=dict(type='bool', default=False),
|
||||
statements=dict(type='list', default=[]),
|
||||
statements=dict(type='list', elements='str', default=[]),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
@@ -39,6 +39,7 @@ options:
|
||||
- The email-address(es) the mail is being sent to.
|
||||
- This is a list, which may contain address and phrase portions.
|
||||
type: list
|
||||
elements: str
|
||||
default: root
|
||||
aliases: [ recipients ]
|
||||
cc:
|
||||
@@ -46,11 +47,13 @@ options:
|
||||
- The email-address(es) the mail is being copied to.
|
||||
- This is a list, which may contain address and phrase portions.
|
||||
type: list
|
||||
elements: str
|
||||
bcc:
|
||||
description:
|
||||
- The email-address(es) the mail is being 'blind' copied to.
|
||||
- This is a list, which may contain address and phrase portions.
|
||||
type: list
|
||||
elements: str
|
||||
subject:
|
||||
description:
|
||||
- The subject of the email being sent.
|
||||
@@ -85,12 +88,14 @@ options:
|
||||
- A list of pathnames of files to attach to the message.
|
||||
- Attached files will have their content-type set to C(application/octet-stream).
|
||||
type: list
|
||||
elements: path
|
||||
default: []
|
||||
headers:
|
||||
description:
|
||||
- A list of headers which should be added to the message.
|
||||
- Each individual header is specified as C(header=value) (see example below).
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
charset:
|
||||
description:
|
||||
@@ -211,13 +216,13 @@ def main():
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=25),
|
||||
sender=dict(type='str', default='root', aliases=['from']),
|
||||
to=dict(type='list', default=['root'], aliases=['recipients']),
|
||||
cc=dict(type='list', default=[]),
|
||||
bcc=dict(type='list', default=[]),
|
||||
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
|
||||
cc=dict(type='list', elements='str', default=[]),
|
||||
bcc=dict(type='list', elements='str', default=[]),
|
||||
subject=dict(type='str', required=True, aliases=['msg']),
|
||||
body=dict(type='str'),
|
||||
attach=dict(type='list', default=[]),
|
||||
headers=dict(type='list', default=[]),
|
||||
attach=dict(type='list', elements='path', default=[]),
|
||||
headers=dict(type='list', elements='str', default=[]),
|
||||
charset=dict(type='str', default='utf-8'),
|
||||
subtype=dict(type='str', default='plain', choices=['html', 'plain']),
|
||||
secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
|
||||
|
||||
@@ -32,6 +32,7 @@ options:
|
||||
required: true
|
||||
dest:
|
||||
type: list
|
||||
elements: int
|
||||
description:
|
||||
- Phone number(s) to send SMS message to
|
||||
required: true
|
||||
@@ -119,7 +120,7 @@ def main():
|
||||
api_key=dict(required=True, no_log=True),
|
||||
api_secret=dict(required=True, no_log=True),
|
||||
src=dict(required=True, type='int'),
|
||||
dest=dict(required=True, type='list'),
|
||||
dest=dict(required=True, type='list', elements='int'),
|
||||
msg=dict(required=True),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -45,11 +45,13 @@ options:
|
||||
- and above any sections or actions present.
|
||||
actions:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- This array of objects will power the action links
|
||||
- found at the bottom of the card.
|
||||
sections:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- Contains a list of sections to display in the card.
|
||||
- For more information see https://dev.outlook.com/Connectors/reference.
|
||||
@@ -264,8 +266,8 @@ def main():
|
||||
color=dict(type='str'),
|
||||
title=dict(type='str'),
|
||||
text=dict(type='str'),
|
||||
actions=dict(type='list'),
|
||||
sections=dict(type='list')
|
||||
actions=dict(type='list', elements='dict'),
|
||||
sections=dict(type='list', elements='dict')
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
@@ -89,6 +89,7 @@ options:
|
||||
- 'danger'
|
||||
attachments:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- Define a list of attachments.
|
||||
'''
|
||||
@@ -215,7 +216,7 @@ def main():
|
||||
link_names=dict(type='int', default=1, choices=[0, 1]),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
|
||||
attachments=dict(type='list', required=False)
|
||||
attachments=dict(type='list', elements='dict', required=False)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ options:
|
||||
required: true
|
||||
to_addresses:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A list with one or more recipient email addresses.
|
||||
required: true
|
||||
@@ -58,14 +59,17 @@ options:
|
||||
- Sendgrid API key to use instead of username/password.
|
||||
cc:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A list of email addresses to cc.
|
||||
bcc:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A list of email addresses to bcc.
|
||||
attachments:
|
||||
type: list
|
||||
elements: path
|
||||
description:
|
||||
- A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
|
||||
from_name:
|
||||
@@ -209,16 +213,16 @@ def main():
|
||||
username=dict(required=False),
|
||||
password=dict(required=False, no_log=True),
|
||||
api_key=dict(required=False, no_log=True),
|
||||
bcc=dict(required=False, type='list'),
|
||||
cc=dict(required=False, type='list'),
|
||||
bcc=dict(required=False, type='list', elements='str'),
|
||||
cc=dict(required=False, type='list', elements='str'),
|
||||
headers=dict(required=False, type='dict'),
|
||||
from_address=dict(required=True),
|
||||
from_name=dict(required=False),
|
||||
to_addresses=dict(required=True, type='list'),
|
||||
to_addresses=dict(required=True, type='list', elements='str'),
|
||||
subject=dict(required=True),
|
||||
body=dict(required=True),
|
||||
html_body=dict(required=False, default=False, type='bool'),
|
||||
attachments=dict(required=False, type='list')
|
||||
attachments=dict(required=False, type='list', elements='path')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
|
||||
@@ -116,6 +116,7 @@ options:
|
||||
default: 'normal'
|
||||
attachments:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- Define a list of attachments. This list mirrors the Slack JSON API.
|
||||
- For more information, see U(https://api.slack.com/docs/attachments).
|
||||
@@ -420,7 +421,7 @@ def main():
|
||||
parse=dict(type='str', default=None, choices=['none', 'full']),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
color=dict(type='str', default='normal'),
|
||||
attachments=dict(type='list', required=False, default=None),
|
||||
attachments=dict(type='list', elements='dict', required=False, default=None),
|
||||
blocks=dict(type='list', elements='dict'),
|
||||
message_id=dict(type='str', default=None),
|
||||
),
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
required: true
|
||||
to_numbers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
one or more phone numbers to send the text message to,
|
||||
format +15551112222
|
||||
@@ -143,7 +144,7 @@ def main():
|
||||
auth_token=dict(required=True, no_log=True),
|
||||
msg=dict(required=True),
|
||||
from_number=dict(required=True),
|
||||
to_numbers=dict(required=True, aliases=['to_number'], type='list'),
|
||||
to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'),
|
||||
media_url=dict(default=None, required=False),
|
||||
),
|
||||
supports_check_mode=True
|
||||
|
||||
@@ -164,7 +164,7 @@ def get_package_state(names, pkg_spec, module):
|
||||
if stdout:
|
||||
# If the requested package name is just a stem, like "python", we may
|
||||
# find multiple packages with that name.
|
||||
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
|
||||
pkg_spec[name]['installed_names'] = stdout.splitlines()
|
||||
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
|
||||
pkg_spec[name]['installed_state'] = True
|
||||
else:
|
||||
|
||||
@@ -105,6 +105,7 @@ options:
|
||||
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
|
||||
default: []
|
||||
type: list
|
||||
elements: raw
|
||||
consumer_type:
|
||||
description:
|
||||
- The type of unit to register, defaults to system
|
||||
@@ -153,6 +154,7 @@ options:
|
||||
addons:
|
||||
description: Syspurpose attribute addons
|
||||
type: list
|
||||
elements: str
|
||||
sync:
|
||||
description:
|
||||
- When this option is true, then syspurpose attributes are synchronized with
|
||||
@@ -594,7 +596,7 @@ class Rhsm(RegistrationBase):
|
||||
|
||||
if missing_pools or serials:
|
||||
changed = True
|
||||
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
|
||||
return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()),
|
||||
'unsubscribed_serials': serials}
|
||||
|
||||
def sync_syspurpose(self):
|
||||
@@ -787,7 +789,7 @@ def main():
|
||||
'org_id': {},
|
||||
'environment': {},
|
||||
'pool': {'default': '^$'},
|
||||
'pool_ids': {'default': [], 'type': 'list'},
|
||||
'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'},
|
||||
'consumer_type': {},
|
||||
'consumer_name': {},
|
||||
'consumer_id': {},
|
||||
@@ -803,7 +805,7 @@ def main():
|
||||
'role': {},
|
||||
'usage': {},
|
||||
'service_level_agreement': {},
|
||||
'addons': {'type': 'list'},
|
||||
'addons': {'type': 'list', 'elements': 'str'},
|
||||
'sync': {'type': 'bool', 'default': False}
|
||||
}
|
||||
}
|
||||
@@ -814,7 +816,7 @@ def main():
|
||||
mutually_exclusive=[['activationkey', 'username'],
|
||||
['activationkey', 'consumer_id'],
|
||||
['activationkey', 'environment'],
|
||||
['activationkey', 'autosubscribe'],
|
||||
['activationkey', 'auto_attach'],
|
||||
['pool', 'pool_ids']],
|
||||
required_if=[['state', 'present', ['username', 'activationkey'], True]],
|
||||
)
|
||||
|
||||
@@ -106,12 +106,14 @@ def main():
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try: # Python 2.7.9 and newer
|
||||
ssl_context = ssl.create_unverified_context()
|
||||
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
|
||||
ssl._create_default_context = ssl._create_unverified_context
|
||||
else: # Python 2.7.8 and older
|
||||
ssl._create_default_https_context = ssl._create_unverified_https_context
|
||||
try:
|
||||
ssl_context = ssl._create_unverified_context()
|
||||
except AttributeError:
|
||||
# Legacy Python that doesn't verify HTTPS certificates by default
|
||||
pass
|
||||
else:
|
||||
# Handle target environment that doesn't support HTTPS verification
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
|
||||
@@ -229,12 +229,14 @@ def main():
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try: # Python 2.7.9 and newer
|
||||
ssl_context = ssl.create_unverified_context()
|
||||
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
|
||||
ssl._create_default_context = ssl._create_unverified_context
|
||||
else: # Python 2.7.8 and older
|
||||
ssl._create_default_https_context = ssl._create_unverified_https_context
|
||||
try:
|
||||
ssl_context = ssl._create_unverified_context()
|
||||
except AttributeError:
|
||||
# Legacy Python that doesn't verify HTTPS certificates by default
|
||||
pass
|
||||
else:
|
||||
# Handle target environment that doesn't support HTTPS verification
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
|
||||
@@ -27,21 +27,25 @@ options:
|
||||
- IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
|
||||
required: true
|
||||
aliases: [ host, ip ]
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Username used to login to the switch.
|
||||
default: admin
|
||||
aliases: [ user ]
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to use for authentication.
|
||||
default: password
|
||||
type: str
|
||||
path:
|
||||
description:
|
||||
- Name of the absolute path of the filename that includes the body
|
||||
of the http request being sent to the Cisco IMC REST API.
|
||||
- Parameter C(path) is mutual exclusive with parameter C(content).
|
||||
aliases: [ 'src', 'config_file' ]
|
||||
type: path
|
||||
content:
|
||||
description:
|
||||
- When used instead of C(path), sets the content of the API requests directly.
|
||||
@@ -49,11 +53,13 @@ options:
|
||||
- You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
|
||||
the Cisco IMC output is subsequently merged.
|
||||
- Parameter C(content) is mutual exclusive with parameter C(path).
|
||||
type: str
|
||||
protocol:
|
||||
description:
|
||||
- Connection protocol to use.
|
||||
default: https
|
||||
choices: [ http, https ]
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- The socket level timeout in seconds.
|
||||
@@ -61,6 +67,7 @@ options:
|
||||
If this C(timeout) is reached, the module will fail with a
|
||||
C(Connection failure) indicating that C(The read operation timed out).
|
||||
default: 60
|
||||
type: int
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated.
|
||||
@@ -253,11 +260,11 @@ output:
|
||||
errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
|
||||
'''
|
||||
|
||||
import atexit
|
||||
import datetime
|
||||
import itertools
|
||||
import os
|
||||
import traceback
|
||||
from functools import partial
|
||||
|
||||
LXML_ETREE_IMP_ERR = None
|
||||
try:
|
||||
@@ -317,7 +324,6 @@ def merge(one, two):
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
hostname=dict(type='str', required=True, aliases=['host', 'ip']),
|
||||
@@ -374,53 +380,54 @@ def main():
|
||||
result.update(imc_response(module, resp.read()))
|
||||
|
||||
# Store cookie for future requests
|
||||
cookie = ''
|
||||
try:
|
||||
cookie = result['aaaLogin']['attributes']['outCookie']
|
||||
except Exception:
|
||||
module.fail_json(msg='Could not find cookie in output', **result)
|
||||
|
||||
# If we would not log out properly, we run out of sessions quickly
|
||||
atexit.register(logout, module, url, cookie, timeout)
|
||||
try:
|
||||
# Prepare request data
|
||||
if content:
|
||||
rawdata = content
|
||||
elif file_exists:
|
||||
with open(path, 'r') as config_object:
|
||||
rawdata = config_object.read()
|
||||
|
||||
# Prepare request data
|
||||
if content:
|
||||
rawdata = content
|
||||
elif file_exists:
|
||||
with open(path, 'r') as config_object:
|
||||
rawdata = config_object.read()
|
||||
# Wrap the XML documents in a <root> element
|
||||
xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
|
||||
|
||||
# Wrap the XML documents in a <root> element
|
||||
xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
|
||||
# Handle each XML document separately in the same session
|
||||
for xmldoc in list(xmldata):
|
||||
if xmldoc.tag is lxml.etree.Comment:
|
||||
continue
|
||||
# Add cookie to XML
|
||||
xmldoc.set('cookie', cookie)
|
||||
data = lxml.etree.tostring(xmldoc)
|
||||
|
||||
# Handle each XML document separately in the same session
|
||||
for xmldoc in list(xmldata):
|
||||
if xmldoc.tag is lxml.etree.Comment:
|
||||
continue
|
||||
# Add cookie to XML
|
||||
xmldoc.set('cookie', cookie)
|
||||
data = lxml.etree.tostring(xmldoc)
|
||||
# Perform actual request
|
||||
resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
|
||||
if resp is None or info['status'] != 200:
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
|
||||
|
||||
# Perform actual request
|
||||
resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
|
||||
if resp is None or info['status'] != 200:
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
|
||||
# Merge results with previous results
|
||||
rawoutput = resp.read()
|
||||
result = merge(result, imc_response(module, rawoutput, rawinput=data))
|
||||
result['response'] = info['msg']
|
||||
result['status'] = info['status']
|
||||
|
||||
# Merge results with previous results
|
||||
rawoutput = resp.read()
|
||||
result = merge(result, imc_response(module, rawoutput, rawinput=data))
|
||||
result['response'] = info['msg']
|
||||
result['status'] = info['status']
|
||||
# Check for any changes
|
||||
# NOTE: Unfortunately IMC API always report status as 'modified'
|
||||
xmloutput = lxml.etree.fromstring(rawoutput)
|
||||
results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
|
||||
result['changed'] = ('modified' in results)
|
||||
|
||||
# Check for any changes
|
||||
# NOTE: Unfortunately IMC API always report status as 'modified'
|
||||
xmloutput = lxml.etree.fromstring(rawoutput)
|
||||
results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
|
||||
result['changed'] = ('modified' in results)
|
||||
|
||||
# Report success
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.exit_json(**result)
|
||||
# Report success
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.exit_json(**result)
|
||||
finally:
|
||||
logout(module, url, cookie, timeout)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -19,18 +19,22 @@ options:
|
||||
description:
|
||||
- Hostname or ip address of the BMC.
|
||||
required: true
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Remote RMCP port.
|
||||
default: 623
|
||||
type: int
|
||||
user:
|
||||
description:
|
||||
- Username to use to connect to the BMC.
|
||||
required: true
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Password to connect to the BMC.
|
||||
required: true
|
||||
type: str
|
||||
bootdev:
|
||||
description:
|
||||
- Set boot device to use on next reboot
|
||||
@@ -51,6 +55,7 @@ options:
|
||||
- optical
|
||||
- setup
|
||||
- default
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure that boot devices is desired.
|
||||
@@ -59,6 +64,7 @@ options:
|
||||
- absent -- Request system turn on"
|
||||
default: present
|
||||
choices: [ present, absent ]
|
||||
type: str
|
||||
persistent:
|
||||
description:
|
||||
- If set, ask that system firmware uses this device beyond next boot.
|
||||
|
||||
@@ -19,18 +19,22 @@ options:
|
||||
description:
|
||||
- Hostname or ip address of the BMC.
|
||||
required: true
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Remote RMCP port.
|
||||
default: 623
|
||||
type: int
|
||||
user:
|
||||
description:
|
||||
- Username to use to connect to the BMC.
|
||||
required: true
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Password to connect to the BMC.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure that the machine in desired state.
|
||||
@@ -42,10 +46,12 @@ options:
|
||||
- boot -- If system is off, then 'on', else 'reset'"
|
||||
choices: ['on', 'off', shutdown, reset, boot]
|
||||
required: true
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Maximum number of seconds before interrupt request.
|
||||
default: 300
|
||||
type: int
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- pyghmi
|
||||
|
||||
@@ -26,6 +26,7 @@ options:
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -134,7 +135,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
@@ -164,7 +165,7 @@ def main():
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
|
||||
@@ -29,6 +29,7 @@ options:
|
||||
I(SetSystemAttributes) are mutually exclusive commands when C(category)
|
||||
is I(Manager)
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -245,7 +246,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
@@ -278,7 +279,7 @@ def main():
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
|
||||
@@ -30,6 +30,7 @@ options:
|
||||
- C(GetManagerAttributes) returns the list of dicts containing iDRAC,
|
||||
LifecycleController and System attributes
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -171,7 +172,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
@@ -201,7 +202,7 @@ def main():
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
|
||||
@@ -28,6 +28,7 @@ options:
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -550,7 +551,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
@@ -638,7 +639,7 @@ def main():
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
|
||||
@@ -27,6 +27,7 @@ options:
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -228,7 +229,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
command=dict(required=True, type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
@@ -287,7 +288,7 @@ def main():
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
|
||||
@@ -24,11 +24,13 @@ options:
|
||||
- List of categories to execute on OOB controller
|
||||
default: ['Systems']
|
||||
type: list
|
||||
elements: str
|
||||
command:
|
||||
required: false
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
elements: str
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
@@ -296,8 +298,8 @@ def main():
|
||||
category_list = []
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(type='list', default=['Systems']),
|
||||
command=dict(type='list'),
|
||||
category=dict(type='list', elements='str', default=['Systems']),
|
||||
command=dict(type='list', elements='str'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
|
||||
@@ -22,15 +22,18 @@ options:
|
||||
- SSH or HTTP protocol address of the parent branch.
|
||||
aliases: [ parent ]
|
||||
required: yes
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- Absolute path of where the branch should be cloned to.
|
||||
required: yes
|
||||
type: path
|
||||
version:
|
||||
description:
|
||||
- What version of the branch to clone. This can be the
|
||||
bzr revno or revid.
|
||||
default: head
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- If C(yes), any modified files in the working
|
||||
@@ -42,6 +45,7 @@ options:
|
||||
description:
|
||||
- Path to bzr executable to use. If not supplied,
|
||||
the normal mechanism for resolving binary paths will be used.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -34,10 +34,12 @@ options:
|
||||
description:
|
||||
- The name of the setting. If no value is supplied, the value will
|
||||
be read from the config if it has been set.
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- Path to a git repository for reading and writing values from a
|
||||
specific repo.
|
||||
type: path
|
||||
scope:
|
||||
description:
|
||||
- Specify which scope to read/set values from. This is required
|
||||
@@ -45,6 +47,7 @@ options:
|
||||
also specify the repo parameter. It defaults to system only when
|
||||
not using I(list_all)=yes.
|
||||
choices: [ "local", "global", "system" ]
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- "Indicates the setting should be set/unset.
|
||||
@@ -52,10 +55,12 @@ options:
|
||||
when I(state)=absent and I(value) is defined, I(value) is discarded."
|
||||
choices: [ 'present', 'absent' ]
|
||||
default: 'present'
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- When specifying the name of a single setting, supply a value to
|
||||
set that setting to the given value.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -151,7 +156,6 @@ config_values:
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
|
||||
def main():
|
||||
@@ -216,7 +220,7 @@ def main():
|
||||
# Run from root directory to avoid accidentally picking up any local config settings
|
||||
dir = "/"
|
||||
|
||||
(rc, out, err) = module.run_command(' '.join(args), cwd=dir)
|
||||
(rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False)
|
||||
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
|
||||
# This just means nothing has been set at the given scope
|
||||
module.exit_json(changed=False, msg='', config_values={})
|
||||
@@ -243,17 +247,16 @@ def main():
|
||||
if not module.check_mode:
|
||||
if unset:
|
||||
args.insert(len(args) - 1, "--" + unset)
|
||||
cmd = ' '.join(args)
|
||||
cmd = args
|
||||
else:
|
||||
new_value_quoted = shlex_quote(new_value)
|
||||
cmd = ' '.join(args + [new_value_quoted])
|
||||
cmd = args + [new_value]
|
||||
try: # try using extra parameter from ansible-base 2.10.4 onwards
|
||||
(rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False)
|
||||
(rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False)
|
||||
except TypeError:
|
||||
# @TODO remove try/except when community.general drop support for 2.10.x
|
||||
if not os.path.isdir(dir):
|
||||
module.fail_json(msg="Cannot find directory '{0}'".format(dir))
|
||||
(rc, out, err) = module.run_command(cmd, cwd=dir)
|
||||
(rc, out, err) = module.run_command(cmd, cwd=dir, expand_user_and_vars=False)
|
||||
if err:
|
||||
module.fail_json(rc=rc, msg=err, cmd=cmd)
|
||||
|
||||
|
||||
@@ -29,20 +29,24 @@ options:
|
||||
- The name of the individual account or organization that owns the GitHub repository.
|
||||
required: true
|
||||
aliases: [ 'account', 'organization' ]
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- The name of the GitHub repository.
|
||||
required: true
|
||||
aliases: [ 'repository' ]
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name for the deploy key.
|
||||
required: true
|
||||
aliases: [ 'title', 'label' ]
|
||||
type: str
|
||||
key:
|
||||
description:
|
||||
- The SSH public key to add to the repository as a deploy key.
|
||||
required: true
|
||||
type: str
|
||||
read_only:
|
||||
description:
|
||||
- If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
|
||||
@@ -53,6 +57,7 @@ options:
|
||||
- The state of the deploy key.
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
|
||||
@@ -61,16 +66,20 @@ options:
|
||||
username:
|
||||
description:
|
||||
- The username to authenticate with. Should not be set when using personal access token
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
|
||||
type: str
|
||||
otp:
|
||||
description:
|
||||
- The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
|
||||
aliases: ['2fa_token']
|
||||
type: int
|
||||
notes:
|
||||
- "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
|
||||
'''
|
||||
|
||||
@@ -18,20 +18,24 @@ options:
|
||||
description:
|
||||
- Name of repository from which issue needs to be retrieved.
|
||||
required: true
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Name of the GitHub organization in which the repository is hosted.
|
||||
required: true
|
||||
type: str
|
||||
issue:
|
||||
description:
|
||||
- Issue number for which information is required.
|
||||
required: true
|
||||
type: int
|
||||
action:
|
||||
description:
|
||||
- Get various details about issue depending upon action specified.
|
||||
default: 'get_status'
|
||||
choices:
|
||||
- 'get_status'
|
||||
type: str
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
'''
|
||||
|
||||
@@ -17,18 +17,22 @@ options:
|
||||
description:
|
||||
- GitHub Access Token with permission to list and create public keys.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- SSH key name
|
||||
required: true
|
||||
type: str
|
||||
pubkey:
|
||||
description:
|
||||
- SSH public key value. Required when C(state=present).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to remove a key, ensure that it exists, or update its value.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- The default is C(yes), which will replace the existing remote key
|
||||
|
||||
@@ -18,34 +18,43 @@ options:
|
||||
token:
|
||||
description:
|
||||
- GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
|
||||
type: str
|
||||
user:
|
||||
description:
|
||||
- The GitHub account that owns the repository
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- The GitHub account password for the user. Mutually exclusive with C(token).
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- Repository name
|
||||
type: str
|
||||
required: true
|
||||
action:
|
||||
description:
|
||||
- Action to perform
|
||||
type: str
|
||||
required: true
|
||||
choices: [ 'latest_release', 'create_release' ]
|
||||
tag:
|
||||
description:
|
||||
- Tag name when creating a release. Required when using action is set to C(create_release).
|
||||
type: str
|
||||
target:
|
||||
description:
|
||||
- Target of release when creating a release
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of release when creating a release
|
||||
type: str
|
||||
body:
|
||||
description:
|
||||
- Description of the release when creating a release
|
||||
type: str
|
||||
draft:
|
||||
description:
|
||||
- Sets if the release is a draft or not. (boolean)
|
||||
|
||||
@@ -18,22 +18,26 @@ options:
|
||||
repository:
|
||||
description:
|
||||
- Full name of the repository to configure a hook for
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- repo
|
||||
url:
|
||||
description:
|
||||
- URL to which payloads will be delivered
|
||||
type: str
|
||||
required: true
|
||||
content_type:
|
||||
description:
|
||||
- The media type used to serialize the payloads
|
||||
type: str
|
||||
required: false
|
||||
choices: [ form, json ]
|
||||
default: form
|
||||
secret:
|
||||
description:
|
||||
- The shared secret between GitHub and the payload URL.
|
||||
type: str
|
||||
required: false
|
||||
insecure_ssl:
|
||||
description:
|
||||
@@ -61,24 +65,29 @@ options:
|
||||
state:
|
||||
description:
|
||||
- Whether the hook should be present or absent
|
||||
type: str
|
||||
required: false
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
user:
|
||||
description:
|
||||
- User to authenticate to GitHub as
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
token:
|
||||
description:
|
||||
- Token to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
github_url:
|
||||
description:
|
||||
- Base URL of the GitHub API
|
||||
type: str
|
||||
required: false
|
||||
default: https://api.github.com
|
||||
|
||||
|
||||
@@ -19,24 +19,29 @@ options:
|
||||
repository:
|
||||
description:
|
||||
- Full name of the repository to configure a hook for
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- repo
|
||||
user:
|
||||
description:
|
||||
- User to authenticate to GitHub as
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
token:
|
||||
description:
|
||||
- Token to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
github_url:
|
||||
description:
|
||||
- Base URL of the github api
|
||||
type: str
|
||||
required: false
|
||||
default: https://api.github.com
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ options:
|
||||
registration_token:
|
||||
description:
|
||||
- The registration token is used to register new runners.
|
||||
required: True
|
||||
- Required if I(state) is C(present).
|
||||
type: str
|
||||
active:
|
||||
description:
|
||||
@@ -93,6 +93,7 @@ options:
|
||||
required: False
|
||||
default: []
|
||||
type: list
|
||||
elements: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -279,12 +280,12 @@ def main():
|
||||
api_token=dict(type='str', no_log=True),
|
||||
description=dict(type='str', required=True, aliases=["name"]),
|
||||
active=dict(type='bool', default=True),
|
||||
tag_list=dict(type='list', default=[]),
|
||||
tag_list=dict(type='list', elements='str', default=[]),
|
||||
run_untagged=dict(type='bool', default=True),
|
||||
locked=dict(type='bool', default=False),
|
||||
access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
|
||||
maximum_timeout=dict(type='int', default=3600),
|
||||
registration_token=dict(type='str', required=True, no_log=True),
|
||||
registration_token=dict(type='str', no_log=True),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
))
|
||||
|
||||
@@ -300,6 +301,9 @@ def main():
|
||||
required_one_of=[
|
||||
['api_username', 'api_token'],
|
||||
],
|
||||
required_if=[
|
||||
('state', 'present', ['registration_token']),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -21,15 +21,18 @@ options:
|
||||
- The repository address.
|
||||
required: yes
|
||||
aliases: [ name ]
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- Absolute path of where the repository should be cloned to.
|
||||
This parameter is required, unless clone and update are set to no
|
||||
type: path
|
||||
revision:
|
||||
description:
|
||||
- Equivalent C(-r) option in hg command which could be the changeset, revision number,
|
||||
branch name or even tag.
|
||||
aliases: [ version ]
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- Discards uncommitted changes. Runs C(hg update -C). Prior to
|
||||
@@ -55,6 +58,7 @@ options:
|
||||
description:
|
||||
- Path to hg executable to use. If not supplied,
|
||||
the normal mechanism for resolving binary paths will be used.
|
||||
type: str
|
||||
notes:
|
||||
- This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
|
||||
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user