mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 18:06:53 +00:00
Compare commits
96 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d3a2a3bd4 | ||
|
|
686cdf2a6b | ||
|
|
4928810dda | ||
|
|
4dc2e14039 | ||
|
|
6ec769b051 | ||
|
|
e4d3d24b26 | ||
|
|
572e3f0814 | ||
|
|
e03ade818a | ||
|
|
54725bea77 | ||
|
|
db24f9857a | ||
|
|
c00147e532 | ||
|
|
0baceda7f6 | ||
|
|
c563813e4e | ||
|
|
1dbd7d4d00 | ||
|
|
41b72c0055 | ||
|
|
96a8390b5e | ||
|
|
25474f657a | ||
|
|
d7c4849473 | ||
|
|
0d459e5662 | ||
|
|
01bbab6b2c | ||
|
|
59a7064392 | ||
|
|
8e7b779ec9 | ||
|
|
1ba5344258 | ||
|
|
58e9454379 | ||
|
|
af3dec9b97 | ||
|
|
99a161bd06 | ||
|
|
feabad39f4 | ||
|
|
4a5276b589 | ||
|
|
e342dfb467 | ||
|
|
5b425fc297 | ||
|
|
d8328312a1 | ||
|
|
2ce326ca5b | ||
|
|
90ed2fa5c3 | ||
|
|
407d776610 | ||
|
|
951806c888 | ||
|
|
0fe7ea63a8 | ||
|
|
3a95a84963 | ||
|
|
2c3e93cc4d | ||
|
|
656b25a4a1 | ||
|
|
1863694297 | ||
|
|
c0f753dd21 | ||
|
|
369cde2320 | ||
|
|
e90872b486 | ||
|
|
b52d3504cb | ||
|
|
1e150cda01 | ||
|
|
db135b83dc | ||
|
|
ad4866bb3b | ||
|
|
83339c44b3 | ||
|
|
71633249c4 | ||
|
|
fdf244d488 | ||
|
|
5575d454ab | ||
|
|
d4633cfcd5 | ||
|
|
11315c8c69 | ||
|
|
6c387f87dd | ||
|
|
33cf4877f5 | ||
|
|
6e2fee77a7 | ||
|
|
502e5ceb79 | ||
|
|
4685a53f29 | ||
|
|
79616f47cb | ||
|
|
496218b6e6 | ||
|
|
8bd8ccd974 | ||
|
|
c802de865a | ||
|
|
1dfd6e395c | ||
|
|
25eabb39a6 | ||
|
|
869e0e60c2 | ||
|
|
cae5823685 | ||
|
|
3d0dbc1fb0 | ||
|
|
912583026f | ||
|
|
748304dadd | ||
|
|
253c2179de | ||
|
|
fcc72e5af1 | ||
|
|
d472953e10 | ||
|
|
c78d6c95d6 | ||
|
|
c9cb987eb7 | ||
|
|
099a99d288 | ||
|
|
26ea01d5b4 | ||
|
|
a9afbe59e5 | ||
|
|
dc9cab36ac | ||
|
|
99265c5126 | ||
|
|
57aede6b95 | ||
|
|
e51e41203a | ||
|
|
54644179ea | ||
|
|
7d6a1a4483 | ||
|
|
2715e4456c | ||
|
|
a335d1cc56 | ||
|
|
a89b43b110 | ||
|
|
1b599bde37 | ||
|
|
7bd987e2b9 | ||
|
|
8b0896a43d | ||
|
|
402bb01501 | ||
|
|
75afd83508 | ||
|
|
b25f0f3cd2 | ||
|
|
9226c4b0d5 | ||
|
|
fe3e262209 | ||
|
|
b9fac26dcd | ||
|
|
343e5a03a7 |
79
.github/BOTMETA.yml
vendored
79
.github/BOTMETA.yml
vendored
@@ -9,6 +9,8 @@ files:
|
||||
$actions/ironware.py:
|
||||
maintainers: paulquack
|
||||
labels: ironware networking
|
||||
$actions/shutdown.py:
|
||||
authors: nitzmahone samdoran aminvakil
|
||||
$becomes/:
|
||||
labels: become
|
||||
$callbacks/:
|
||||
@@ -71,6 +73,10 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/time.py:
|
||||
authors: resmo
|
||||
$filters/jc.py:
|
||||
authors: kellyjonbrazil
|
||||
$httpapis/:
|
||||
maintainers: $team_networking
|
||||
labels: networking
|
||||
@@ -113,6 +119,10 @@ files:
|
||||
$lookups/dig.py:
|
||||
maintainers: jpmens
|
||||
labels: dig
|
||||
$lookups/tss.py:
|
||||
authors: amigus
|
||||
$lookups/dsv.py:
|
||||
authors: amigus
|
||||
$lookups/hashi_vault.py:
|
||||
labels: hashi_vault
|
||||
$lookups/manifold.py:
|
||||
@@ -237,6 +247,9 @@ files:
|
||||
$modules/cloud/docker/docker_stack.py:
|
||||
authors: dariko
|
||||
maintainers: DBendit WojciechowskiPiotr akshay196 danihodovic felixfontein jwitko kassiansun tbouvet
|
||||
$modules/cloud/docker/docker_stack_task_info.py:
|
||||
authors: imjoseangel
|
||||
maintainers: $team_docker
|
||||
$modules/cloud/docker/docker_swarm.py:
|
||||
authors: WojciechowskiPiotr tbouvet
|
||||
maintainers: DBendit akshay196 danihodovic dariko felixfontein jwitko kassiansun
|
||||
@@ -396,6 +409,9 @@ files:
|
||||
$modules/cloud/scaleway/:
|
||||
authors: sieben
|
||||
maintainers: $team_scaleway
|
||||
$modules/cloud/scaleway/scaleway_database_backup.py:
|
||||
authors: guillaume_ro_fr
|
||||
maintainers: $team_scaleway
|
||||
$modules/cloud/scaleway/scaleway_image_info.py:
|
||||
authors: Spredzy sieben
|
||||
$modules/cloud/scaleway/scaleway_ip_info.py:
|
||||
@@ -482,6 +498,8 @@ files:
|
||||
authors: ThePixelDeveloper samdoran
|
||||
$modules/database/misc/kibana_plugin.py:
|
||||
authors: barryib
|
||||
$modules/database/misc/odbc.py:
|
||||
authors: john-westcott-iv
|
||||
$modules/database/misc/redis.py:
|
||||
authors: slok
|
||||
$modules/database/misc/riak.py:
|
||||
@@ -495,36 +513,38 @@ files:
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_ext.py:
|
||||
authors: Andersson007 andytom dschep strk
|
||||
maintainers: Dorn- amenonsen jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/:
|
||||
authors: Andersson007
|
||||
maintainers: Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
keywords: database postgres postgresql
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_lang.py:
|
||||
authors: andytom jensdepuydt
|
||||
maintainers: Andersson007 Dorn- amenonsen jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_pg_hba.py:
|
||||
authors: sebasmannem
|
||||
maintainers: Andersson007 Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_privs.py:
|
||||
authors: b6d tcraxs
|
||||
maintainers: Andersson007 Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_publication.py:
|
||||
authors: Andersson007 nerzhul
|
||||
maintainers: Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt sebasmannem tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_query.py:
|
||||
authors: Andersson007 archf wrouesnel
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_schema.py:
|
||||
authors: Dorn- andytom
|
||||
maintainers: Andersson007 amenonsen jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_sequence.py:
|
||||
authors: tcraxs
|
||||
maintainers: Andersson007 Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_slot.py:
|
||||
authors: Andersson007 jscalia
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_tablespace.py:
|
||||
authors: Andersson007 Dorn- antoinell
|
||||
maintainers: amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
maintainers: $team_postgresql
|
||||
$modules/database/postgresql/postgresql_user.py:
|
||||
authors: ansible
|
||||
maintainers: $team_postgresql
|
||||
@@ -613,6 +633,7 @@ files:
|
||||
labels: monit
|
||||
$modules/monitoring/nagios.py:
|
||||
authors: tbielawa
|
||||
maintainers: tgoetheyn
|
||||
$modules/monitoring/newrelic_deployment.py:
|
||||
authors: mcodd
|
||||
$modules/monitoring/pagerduty.py:
|
||||
@@ -671,6 +692,9 @@ files:
|
||||
$modules/net_tools/ldap/ldap_passwd.py:
|
||||
authors: KellerFuchs
|
||||
maintainers: jtyr
|
||||
$modules/net_tools/ldap/ldap_search.py:
|
||||
authors: eryx12o45
|
||||
maintainers: jtyr
|
||||
$modules/net_tools/lldp.py:
|
||||
authors: andyhky
|
||||
labels: lldp
|
||||
@@ -987,8 +1011,9 @@ files:
|
||||
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
||||
authors: fgbulsoni
|
||||
$modules/remote_management/redfish/:
|
||||
authors: jose-delarosa
|
||||
authors: jose-delarosa billdodd
|
||||
maintainers: $team_redfish
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/stacki/stacki_host.py:
|
||||
authors: bbyhuy
|
||||
maintainers: bsanders
|
||||
@@ -1018,23 +1043,29 @@ files:
|
||||
$modules/source_control/gitlab/:
|
||||
notify: jlozadad
|
||||
authors: Lunik marwatk
|
||||
maintainers: Shaps dj-wasabi waheedi
|
||||
maintainers: $team_gitlab
|
||||
keywords: gitlab source_control
|
||||
$modules/source_control/gitlab/gitlab_group.py:
|
||||
authors: Lunik dj-wasabi
|
||||
maintainers: Shaps marwatk waheedi
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_group_members.py:
|
||||
authors: zanssa
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_group_variable.py:
|
||||
authors: scodeman
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_project.py:
|
||||
authors: Lunik dj-wasabi
|
||||
maintainers: Shaps marwatk waheedi
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_project_variable.py:
|
||||
authors: markuman
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_runner.py:
|
||||
authors: Lunik SamyCoenen
|
||||
maintainers: Shaps dj-wasabi marwatk waheedi
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/gitlab/gitlab_user.py:
|
||||
authors: Lunik dj-wasabi
|
||||
maintainers: Shaps marwatk waheedi
|
||||
maintainers: $team_gitlab
|
||||
$modules/source_control/hg.py:
|
||||
authors: yeukhon
|
||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||
@@ -1108,6 +1139,8 @@ files:
|
||||
authors: groks
|
||||
$modules/system/dconf.py:
|
||||
authors: azaghal
|
||||
$modules/system/dpkg_divert.py:
|
||||
authors: quidame
|
||||
$modules/system/facter.py:
|
||||
authors: ansible
|
||||
labels: facter
|
||||
@@ -1123,12 +1156,16 @@ files:
|
||||
authors: hryamzik
|
||||
maintainers: obourdon
|
||||
labels: interfaces_file
|
||||
$modules/system/iptables_state.py:
|
||||
authors: quidame
|
||||
$modules/system/java_cert.py:
|
||||
authors: haad
|
||||
$modules/system/java_keystore.py:
|
||||
authors: Mogztter
|
||||
$modules/system/kernel_blacklist.py:
|
||||
authors: matze
|
||||
$modules/system/launchd.py:
|
||||
authors: martinm82
|
||||
$modules/system/lbu.py:
|
||||
authors: kunkku
|
||||
$modules/system/listen_ports_facts.py:
|
||||
@@ -1200,6 +1237,8 @@ files:
|
||||
authors: bcoca
|
||||
$modules/system/syspatch.py:
|
||||
authors: precurse
|
||||
$modules/system/sysupgrade.py:
|
||||
authors: precurse
|
||||
$modules/system/timezone.py:
|
||||
authors: indrajitr jasperla tmshn
|
||||
$modules/system/ufw.py:
|
||||
@@ -1224,7 +1263,7 @@ files:
|
||||
authors: ramondelafuente
|
||||
$modules/web_infrastructure/django_manage.py:
|
||||
authors: tastychutney
|
||||
maintainers: scottanderson42
|
||||
maintainers: scottanderson42 russoz
|
||||
labels: django_manage
|
||||
$modules/web_infrastructure/ejabberd_user.py:
|
||||
authors: privateip
|
||||
@@ -1305,7 +1344,7 @@ macros:
|
||||
team_docker: DBendit WojciechowskiPiotr akshay196 danihodovic dariko felixfontein jwitko kassiansun tbouvet
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_extreme: LindsayHill bigmstone ujwalkomarla
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||
team_google: erjohnso rambleraptor
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
@@ -1320,12 +1359,12 @@ macros:
|
||||
team_netvisor: Qalthos amitsi csharpe-pn pdam preetiparasar
|
||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_postgresql: Andersson007 Dorn- amenonsen andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs
|
||||
team_postgresql: Andersson007 Dorn- andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs ilicmilan
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_rabbitmq: chrishoffman manuel-sousa
|
||||
team_redfish: billdodd mraineri tomasg2012
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: DenBeke QuentinBrosse abarbare jerome-quere kindermoumoute remyleone
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl
|
||||
team_virt: joshainglis karmab
|
||||
team_virt: joshainglis karmab Aversiste
|
||||
|
||||
5
.github/patchback.yml
vendored
Normal file
5
.github/patchback.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
backport_branch_prefix: patchback/backports/
|
||||
backport_label_prefix: backport-
|
||||
target_branch_prefix: stable-
|
||||
...
|
||||
151
CHANGELOG.rst
151
CHANGELOG.rst
@@ -5,6 +5,138 @@ Community General Release Notes
|
||||
.. contents:: Topics
|
||||
|
||||
|
||||
v1.2.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bimonthly minor release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- hashi_vault - support ``VAULT_NAMESPACE`` environment variable for namespaced lookups against Vault Enterprise (in addition to the ``namespace=`` flag supported today) (https://github.com/ansible-collections/community.general/pull/929).
|
||||
- hashi_vault lookup - add ``VAULT_TOKEN_FILE`` as env option to specify ``token_file`` param (https://github.com/ansible-collections/community.general/issues/373).
|
||||
- hashi_vault lookup - add ``VAULT_TOKEN_PATH`` as env option to specify ``token_path`` param (https://github.com/ansible-collections/community.general/issues/373).
|
||||
- ipa_user - add ``userauthtype`` option (https://github.com/ansible-collections/community.general/pull/951).
|
||||
- iptables_state - use FQCN when calling a module from action plugin (https://github.com/ansible-collections/community.general/pull/967).
|
||||
- nagios - add the ``acknowledge`` action (https://github.com/ansible-collections/community.general/pull/820).
|
||||
- nagios - add the ``host`` and ``all`` values for the ``forced_check`` action (https://github.com/ansible-collections/community.general/pull/998).
|
||||
- nagios - add the ``service_check`` action (https://github.com/ansible-collections/community.general/pull/820).
|
||||
- nagios - rename the ``service_check`` action to ``forced_check`` since we now are able to check both a particular service, all services of a particular host and the host itself (https://github.com/ansible-collections/community.general/pull/998).
|
||||
- pkgutil - module can now accept a list of packages (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- pkgutil - module has a new option, ``force``, equivalent to the ``-f`` option to the `pkgutil <http://pkgutil.net/>`_ command (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- pkgutil - module now supports check mode (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- postgresql_privs - add the ``usage_on_types`` option (https://github.com/ansible-collections/community.general/issues/884).
|
||||
- proxmox_kvm - improve code readability (https://github.com/ansible-collections/community.general/pull/934).
|
||||
- pushover - add device parameter (https://github.com/ansible-collections/community.general/pull/802).
|
||||
- redfish_command - add sub-command for ``EnableContinuousBootOverride`` and ``DisableBootOverride`` to allow setting BootSourceOverrideEnabled Redfish property (https://github.com/ansible-collections/community.general/issues/824).
|
||||
- redfish_command - support same reset actions on Managers as on Systems (https://github.com/ansible-collections/community.general/issues/901).
|
||||
- slack - add support for updating messages (https://github.com/ansible-collections/community.general/issues/304).
|
||||
- xml - fixed issue were changed was returned when removing non-existent xpath (https://github.com/ansible-collections/community.general/pull/1007).
|
||||
- zypper_repository - proper failure when python-xml is missing (https://github.com/ansible-collections/community.general/pull/939).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- aerospike_migrations - handle exception when unstable-cluster is returned (https://github.com/ansible-collections/community.general/pull/900).
|
||||
- django_manage - fix idempotence for ``createcachetable`` (https://github.com/ansible-collections/community.general/pull/699).
|
||||
- docker_container - fix idempotency problem with ``published_ports`` when strict comparison is used and list is empty (https://github.com/ansible-collections/community.general/issues/978).
|
||||
- gem - fix get_installed_versions: correctly parse ``default`` version (https://github.com/ansible-collections/community.general/pull/783).
|
||||
- hashi_vault - add missing ``mount_point`` parameter for approle auth (https://github.com/ansible-collections/community.general/pull/897).
|
||||
- hashi_vault lookup - ``token_path`` in config file overridden by env ``HOME`` (https://github.com/ansible-collections/community.general/issues/373).
|
||||
- homebrew_cask - fixed issue where a cask with ``@`` in the name is incorrectly reported as invalid (https://github.com/ansible-collections/community.general/issues/733).
|
||||
- interfaces_file - escape regular expression characters in old value (https://github.com/ansible-collections/community.general/issues/777).
|
||||
- launchd - fix for user-level services (https://github.com/ansible-collections/community.general/issues/896).
|
||||
- nmcli - set ``C`` locale when executing ``nmcli`` (https://github.com/ansible-collections/community.general/issues/989).
|
||||
- parted - fix creating partition when label is changed (https://github.com/ansible-collections/community.general/issues/522).
|
||||
- pkg5 - now works when Python 3 is used on the target (https://github.com/ansible-collections/community.general/pull/789).
|
||||
- postgresql_privs - allow to pass ``PUBLIC`` role written in lowercase letters (https://github.com/ansible-collections/community.general/issues/857).
|
||||
- postgresql_privs - fix the module mistakes a procedure for a function (https://github.com/ansible-collections/community.general/issues/994).
|
||||
- postgresql_privs - rollback if nothing changed (https://github.com/ansible-collections/community.general/issues/885).
|
||||
- postgresql_privs - the module was attempting to revoke grant options even though ``grant_option`` was not specified (https://github.com/ansible-collections/community.general/pull/796).
|
||||
- proxmox_kvm - defer error-checking for non-existent VMs in order to fix idempotency of tasks using ``state=absent`` and properly recognize a success (https://github.com/ansible-collections/community.general/pull/811).
|
||||
- proxmox_kvm - improve handling of long-running tasks by creating a dedicated function (https://github.com/ansible-collections/community.general/pull/831).
|
||||
- slack - fix ``xox[abp]`` token identification to capture everything after ``xox[abp]``, as the token is the only thing that should be in this argument (https://github.com/ansible-collections/community.general/issues/862).
|
||||
- terraform - fix incorrectly reporting a status of unchanged when number of resources added or destroyed are multiples of 10 (https://github.com/ansible-collections/community.general/issues/561).
|
||||
- timezone - support Python3 on macos/darwin (https://github.com/ansible-collections/community.general/pull/945).
|
||||
- zfs - fixed ``invalid character '@' in pool name"`` error when working with snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- proxmox - Proxmox inventory source
|
||||
- stackpath_compute - StackPath Edge Computing inventory source
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
scaleway
|
||||
^^^^^^^^
|
||||
|
||||
- scaleway_database_backup - Scaleway database backups management module
|
||||
|
||||
Source Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
gitlab
|
||||
^^^^^^
|
||||
|
||||
- gitlab_group_members - Manage group members on GitLab Server
|
||||
- gitlab_group_variable - Creates, updates, or deletes GitLab groups variables
|
||||
|
||||
v1.1.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Release for Ansible 2.10.0.
|
||||
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- The collection dependencies where adjusted so that ``community.kubernetes`` and ``google.cloud`` are required to be of version 1.0.0 or newer (https://github.com/ansible-collections/community.general/pull/774).
|
||||
- jc - new filter to convert the output of many shell commands and file-types to JSON. Uses the jc library at https://github.com/kellyjonbrazil/jc. For example, filtering the STDOUT output of ``uname -a`` via ``{{ result.stdout | community.general.jc('uname') }}``. Requires Python 3.6+ (https://github.com/ansible-collections/community.general/pull/750).
|
||||
- xfconf - add support for ``double`` type (https://github.com/ansible-collections/community.general/pull/744).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- cobbler inventory plugin - ``name`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- dsv lookup - use correct dict usage (https://github.com/ansible-collections/community.general/pull/743).
|
||||
- inventory plugins - allow FQCN in ``plugin`` option (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- ipa_hostgroup - fix an issue with load-balanced ipa and cookie handling with Python 3 (https://github.com/ansible-collections/community.general/issues/737).
|
||||
- oc connection plugin - ``transport`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- postgresql_set - allow to pass an empty string to the ``value`` parameter (https://github.com/ansible-collections/community.general/issues/775).
|
||||
- xfconf - make it work in non-english locales (https://github.com/ansible-collections/community.general/pull/744).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
Cloud
|
||||
~~~~~
|
||||
|
||||
docker
|
||||
^^^^^^
|
||||
|
||||
- docker_stack_task_info - Return information of the tasks on a docker stack
|
||||
|
||||
System
|
||||
~~~~~~
|
||||
|
||||
- iptables_state - Save iptables state into a file or restore it from a file
|
||||
- shutdown - Shut down a machine
|
||||
- sysupgrade - Manage OpenBSD system upgrades
|
||||
|
||||
v1.0.0
|
||||
======
|
||||
|
||||
@@ -149,7 +281,6 @@ Major Changes
|
||||
- docker_container - the ``network_mode`` option will be set by default to the name of the first network in ``networks`` if at least one network is given and ``networks_cli_compatible`` is ``true`` (will be default from community.general 2.0.0 on). Set to an explicit value to avoid deprecation warnings if you specify networks and set ``networks_cli_compatible`` to ``true``. The current default (not specifying it) is equivalent to the value ``default``.
|
||||
- docker_container - the module has a new option, ``container_default_behavior``, whose default value will change from ``compatibility`` to ``no_defaults``. Set to an explicit value to avoid deprecation warnings.
|
||||
- gitlab_user - no longer requires ``name``, ``email`` and ``password`` arguments when ``state=absent``.
|
||||
- zabbix_action - no longer requires ``esc_period`` and ``event_source`` arguments when ``state=absent``.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
@@ -286,16 +417,6 @@ Minor Changes
|
||||
- terraform - Adds option ``backend_config_files``. This can accept a list of paths to multiple configuration files (https://github.com/ansible-collections/community.general/pull/394).
|
||||
- terraform - Adds option ``variables_files`` for multiple var-files (https://github.com/ansible-collections/community.general/issues/224).
|
||||
- ufw - accept ``interface_in`` and ``interface_out`` as parameters.
|
||||
- zabbix_action - allow str values for ``esc_period`` options (https://github.com/ansible/ansible/pull/66841).
|
||||
- zabbix_host - now supports configuring user macros and host tags on the managed host (see https://github.com/ansible/ansible/pull/66777)
|
||||
- zabbix_host_info - ``host_name`` based search results now include host groups.
|
||||
- zabbix_hostmacro - ``macro_name`` now accepts macros in zabbix native format as well (e.g. ``{$MACRO}``)
|
||||
- zabbix_hostmacro - ``macro_value`` is no longer required when ``state=absent``
|
||||
- zabbix_proxy - ``interface`` sub-options ``type`` and ``main`` are now deprecated and will be removed in community.general 3.0.0. Also, the values passed to ``interface`` are now checked for correct types and unexpected keys.
|
||||
- zabbix_proxy - added option proxy_address for comma-delimited list of IP/CIDR addresses or DNS names to accept active proxy requests from
|
||||
- zabbix_template - add new option omit_date to remove date from exported/dumped template (https://github.com/ansible/ansible/pull/67302)
|
||||
- zabbix_template - adding new update rule templateLinkage.deleteMissing for newer zabbix versions (https://github.com/ansible/ansible/pull/66747).
|
||||
- zabbix_template_info - add new option omit_date to remove date from exported/dumped template (https://github.com/ansible/ansible/pull/67302)
|
||||
- zypper - Added ``allow_vendor_change`` and ``replacefiles`` zypper options (https://github.com/ansible-collections/community.general/issues/381)
|
||||
|
||||
Breaking Changes / Porting Guide
|
||||
@@ -326,7 +447,6 @@ Deprecated Features
|
||||
- redfish_config - the ``bios_attribute_name`` and ``bios_attribute_value`` options will be removed. To maintain the existing behavior use the ``bios_attributes`` option instead.
|
||||
- redfish_config and redfish_command - the behavior to select the first System, Manager, or Chassis resource to modify when multiple are present will be removed. Use the new ``resource_id`` option to specify target resource to modify.
|
||||
- redfish_config, redfish_command - Behavior to modify the first System, Mananger, or Chassis resource when multiple are present is deprecated. Use the new ``resource_id`` option to specify target resource to modify.
|
||||
- zabbix_proxy - deprecates ``interface`` sub-options ``type`` and ``main`` when proxy type is set to passive via ``status=passive``. Make sure these suboptions are removed from your playbook as they were never supported by Zabbix in the first place.
|
||||
|
||||
Removed Features (previously deprecated)
|
||||
----------------------------------------
|
||||
@@ -494,13 +614,6 @@ Bugfixes
|
||||
- terraform module - fixes usage for providers not supporting workspaces
|
||||
- yarn - Return correct values when running yarn in check mode (https://github.com/ansible-collections/community.general/pull/153).
|
||||
- yarn - handle no version when installing module by name (https://github.com/ansible/ansible/issues/55097)
|
||||
- zabbix_action - arguments ``event_source`` and ``esc_period`` no longer required when ``state=absent``
|
||||
- zabbix_host - fixed inventory_mode key error, which occurs with Zabbix 4.4.1 or more (https://github.com/ansible/ansible/issues/65304).
|
||||
- zabbix_host - was not possible to update a host where visible_name was not set in zabbix
|
||||
- zabbix_mediatype - Fixed to support zabbix 4.4 or more and python3 (https://github.com/ansible/ansible/pull/67693)
|
||||
- zabbix_template - fixed error when providing empty ``link_templates`` to the module (see https://github.com/ansible/ansible/issues/66417)
|
||||
- zabbix_template - fixed invalid (non-importable) output provided by exporting XML (see https://github.com/ansible/ansible/issues/66466)
|
||||
- zabbix_user - Fixed an issue where module failed with zabbix 4.4 or above (see https://github.com/ansible/ansible/pull/67475)
|
||||
- zfs_delegate_admin - add missing choices diff/hold/release to the permissions parameter (https://github.com/ansible-collections/community.general/pull/278)
|
||||
|
||||
New Plugins
|
||||
|
||||
@@ -245,19 +245,6 @@ releases:
|
||||
- terraform module - fixes usage for providers not supporting workspaces
|
||||
- yarn - Return correct values when running yarn in check mode (https://github.com/ansible-collections/community.general/pull/153).
|
||||
- yarn - handle no version when installing module by name (https://github.com/ansible/ansible/issues/55097)
|
||||
- zabbix_action - arguments ``event_source`` and ``esc_period`` no longer required
|
||||
when ``state=absent``
|
||||
- zabbix_host - fixed inventory_mode key error, which occurs with Zabbix 4.4.1
|
||||
or more (https://github.com/ansible/ansible/issues/65304).
|
||||
- zabbix_host - was not possible to update a host where visible_name was not
|
||||
set in zabbix
|
||||
- zabbix_mediatype - Fixed to support zabbix 4.4 or more and python3 (https://github.com/ansible/ansible/pull/67693)
|
||||
- zabbix_template - fixed error when providing empty ``link_templates`` to the
|
||||
module (see https://github.com/ansible/ansible/issues/66417)
|
||||
- zabbix_template - fixed invalid (non-importable) output provided by exporting
|
||||
XML (see https://github.com/ansible/ansible/issues/66466)
|
||||
- zabbix_user - Fixed an issue where module failed with zabbix 4.4 or above
|
||||
(see https://github.com/ansible/ansible/pull/67475)
|
||||
- zfs_delegate_admin - add missing choices diff/hold/release to the permissions
|
||||
parameter (https://github.com/ansible-collections/community.general/pull/278)
|
||||
deprecated_features:
|
||||
@@ -297,10 +284,6 @@ releases:
|
||||
- redfish_config, redfish_command - Behavior to modify the first System, Mananger,
|
||||
or Chassis resource when multiple are present is deprecated. Use the new ``resource_id``
|
||||
option to specify target resource to modify.
|
||||
- zabbix_proxy - deprecates ``interface`` sub-options ``type`` and ``main``
|
||||
when proxy type is set to passive via ``status=passive``. Make sure these
|
||||
suboptions are removed from your playbook as they were never supported by
|
||||
Zabbix in the first place.
|
||||
major_changes:
|
||||
- docker_container - the ``network_mode`` option will be set by default to the
|
||||
name of the first network in ``networks`` if at least one network is given
|
||||
@@ -313,8 +296,6 @@ releases:
|
||||
Set to an explicit value to avoid deprecation warnings.
|
||||
- gitlab_user - no longer requires ``name``, ``email`` and ``password`` arguments
|
||||
when ``state=absent``.
|
||||
- zabbix_action - no longer requires ``esc_period`` and ``event_source`` arguments
|
||||
when ``state=absent``.
|
||||
minor_changes:
|
||||
- A new filter ``to_time_unit`` with specializations ``to_milliseconds``, ``to_seconds``,
|
||||
``to_minutes``, ``to_hours``, ``to_days``, ``to_weeks``, ``to_months`` and
|
||||
@@ -495,24 +476,6 @@ releases:
|
||||
paths to multiple configuration files (https://github.com/ansible-collections/community.general/pull/394).
|
||||
- terraform - Adds option ``variables_files`` for multiple var-files (https://github.com/ansible-collections/community.general/issues/224).
|
||||
- ufw - accept ``interface_in`` and ``interface_out`` as parameters.
|
||||
- zabbix_action - allow str values for ``esc_period`` options (https://github.com/ansible/ansible/pull/66841).
|
||||
- zabbix_host - now supports configuring user macros and host tags on the managed
|
||||
host (see https://github.com/ansible/ansible/pull/66777)
|
||||
- zabbix_host_info - ``host_name`` based search results now include host groups.
|
||||
- zabbix_hostmacro - ``macro_name`` now accepts macros in zabbix native format
|
||||
as well (e.g. ``{$MACRO}``)
|
||||
- zabbix_hostmacro - ``macro_value`` is no longer required when ``state=absent``
|
||||
- zabbix_proxy - ``interface`` sub-options ``type`` and ``main`` are now deprecated
|
||||
and will be removed in community.general 3.0.0. Also, the values passed to
|
||||
``interface`` are now checked for correct types and unexpected keys.
|
||||
- zabbix_proxy - added option proxy_address for comma-delimited list of IP/CIDR
|
||||
addresses or DNS names to accept active proxy requests from
|
||||
- zabbix_template - add new option omit_date to remove date from exported/dumped
|
||||
template (https://github.com/ansible/ansible/pull/67302)
|
||||
- zabbix_template - adding new update rule templateLinkage.deleteMissing for
|
||||
newer zabbix versions (https://github.com/ansible/ansible/pull/66747).
|
||||
- zabbix_template_info - add new option omit_date to remove date from exported/dumped
|
||||
template (https://github.com/ansible/ansible/pull/67302)
|
||||
- zypper - Added ``allow_vendor_change`` and ``replacefiles`` zypper options
|
||||
(https://github.com/ansible-collections/community.general/issues/381)
|
||||
release_summary: 'This is the first proper release of the ``community.general``
|
||||
@@ -680,10 +643,8 @@ releases:
|
||||
- 63629-postgresql_db_pgc_support.yaml
|
||||
- 63887-docker_swarm_service-sort-lists-when-checking-changes.yml
|
||||
- 63903-ufw.yaml
|
||||
- 63969-zabbix_action_argsfix.yml
|
||||
- 63990-replace-deprecated-basic-functions.yml
|
||||
- 64007-postgresql_db_allow_user_name_with_dots.yml
|
||||
- 64032-zabbix_template_fix_return_XML_as_a_string_even_python3.yml
|
||||
- 64059-mysql_user_fix_password_comparison.yaml
|
||||
- 64288-fix-hashi-vault-kv-v2.yaml
|
||||
- 64371-postgresql_privs-always-reports-as-changed-when-using-default_privs.yml
|
||||
@@ -704,7 +665,6 @@ releases:
|
||||
- 65164-postgres_use_query_params_with_cursor.yml
|
||||
- 65223-postgresql_db-exception-added.yml
|
||||
- 65238-fix_pacman_stdout_parsing.yml
|
||||
- 65304-fix_zabbix_host_inventory_mode_key_error.yml
|
||||
- 65310-postgresql_owner_use_query_params.yml
|
||||
- 65372-misc-context-manager.yml
|
||||
- 65387-homebrew_check_mode_option.yml
|
||||
@@ -726,13 +686,11 @@ releases:
|
||||
- 65894-redfish-bios-attributes.yaml
|
||||
- 65903-postgresql_privs_sort_lists_with_none_elements.yml
|
||||
- 65993-restart-docker_container-on-restart-policy-updates.yaml
|
||||
- 66026-zabbix_host_info.yml
|
||||
- 66048-mysql_add_master_data_parameter.yml
|
||||
- 66060-redfish-new-resource-id-option.yaml
|
||||
- 66144-docker_container-removal-timeout.yml
|
||||
- 66151-docker_swarm_service-healthcheck-start-period.yml
|
||||
- 66157-postgresql-create-unique-indexes.yml
|
||||
- 66247-zabbix_proxy-address-field.yaml
|
||||
- 66252-mysql_replication_fail_on_error.yml
|
||||
- 66268-cyberarkpassword-fix-invalid-attr.yaml
|
||||
- 66322-moved_line_causing_terraform_output_suppression.yml
|
||||
@@ -740,26 +698,20 @@ releases:
|
||||
- 66357-support-changing-fetch_url-settings-for-rundeck-modules.yaml
|
||||
- 66382-docker_container-port-range.yml
|
||||
- 66398-pamd_fix-attributeerror-when-removing-first-line.yml
|
||||
- 66463-zabbix_template-fix-error-linktemplate-and-importdump.yml
|
||||
- 66592_ipa_encoding_fix.yml
|
||||
- 66599-docker-healthcheck.yml
|
||||
- 66600-docker_container-volumes.yml
|
||||
- 66688-mysql_db_add_skip_lock_tables_option.yml
|
||||
- 66711-postgresql_user_add_comment_parameter.yml
|
||||
- 66717-postgresql_db_add_dump_extra_args_param.yml
|
||||
- 66747-zabbix_template-newupdaterule-deletemissinglinkedtemplate.yml
|
||||
- 66777-zabbix_host_tags_macros_support.yml
|
||||
- 66801-mysql_user_priv_can_be_dict.yml
|
||||
- 66806-mysql_variables_not_support_variables_with_dot.yml
|
||||
- 66807-redhat_subscription-no-default-quantity.yaml
|
||||
- 66837-zabbix-proxy-interface.yml
|
||||
- 66841-zabbix_action-allowstrfor-esc_period.yml
|
||||
- 66914-purefa_user_string.yaml
|
||||
- 66929-pmrun-quote-entire-success-command-string.yml
|
||||
- 66957-scaleway-jsonify-only-for-json-requests.yml
|
||||
- 66974-mysql_user_doesnt_support_privs_with_underscore.yml
|
||||
- 67046-postgresql_modules_make_params_required.yml
|
||||
- 67302-zabbix_template_info-add-omit_date-field.yml
|
||||
- 67337-fix-proxysql-mysql-cursor.yaml
|
||||
- 67353-docker_login-permissions.yml
|
||||
- 67418-postgresql_set_converts_value_to_uppercase.yml
|
||||
@@ -767,7 +719,6 @@ releases:
|
||||
- 67464-postgresql_info_add_collecting_subscription_info.yml
|
||||
- 67614-postgresql_info_add_collecting_publication_info.yml
|
||||
- 67655-scaleway_compute-get-image-instead-loop-on-list.yml
|
||||
- 67693-zabbix_mediatype.yml
|
||||
- 67747-mysql_db_add_dump_extra_args_param.yml
|
||||
- 67767-mysql_db_fix_bug_introduced_by_56721.yml
|
||||
- 67832-run_powershell_modules_on_windows_containers.yml
|
||||
@@ -786,7 +737,6 @@ releases:
|
||||
- firewalld-version-0_7_0.yml
|
||||
- firewalld_zone_target.yml
|
||||
- fix-oc-conn-plugin-envvar.yml
|
||||
- fix_zabbix_host_visible_name.yml
|
||||
- gitlab-project-variable-variable-type.yml
|
||||
- gitlab_project_variable.yml
|
||||
- ldap-params-removal.yml
|
||||
@@ -805,8 +755,6 @@ releases:
|
||||
- solaris_zone_name_fix.yml
|
||||
- syslogger-disable-check-mode.yaml
|
||||
- xml-deprecated-functions.yml
|
||||
- zabbix-hostmacro.yml
|
||||
- zabbix_user-mediatype-error.yml
|
||||
modules:
|
||||
- description: Override a debian package's version of a file
|
||||
name: dpkg_divert
|
||||
@@ -1110,3 +1058,176 @@ releases:
|
||||
name: tss
|
||||
namespace: null
|
||||
release_date: '2020-07-31'
|
||||
1.1.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- cobbler inventory plugin - ``name`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- dsv lookup - use correct dict usage (https://github.com/ansible-collections/community.general/pull/743).
|
||||
- inventory plugins - allow FQCN in ``plugin`` option (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- ipa_hostgroup - fix an issue with load-balanced ipa and cookie handling with
|
||||
Python 3 (https://github.com/ansible-collections/community.general/issues/737).
|
||||
- oc connection plugin - ``transport`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
|
||||
- postgresql_set - allow to pass an empty string to the ``value`` parameter
|
||||
(https://github.com/ansible-collections/community.general/issues/775).
|
||||
- xfconf - make it work in non-english locales (https://github.com/ansible-collections/community.general/pull/744).
|
||||
minor_changes:
|
||||
- The collection dependencies where adjusted so that ``community.kubernetes``
|
||||
and ``google.cloud`` are required to be of version 1.0.0 or newer (https://github.com/ansible-collections/community.general/pull/774).
|
||||
- jc - new filter to convert the output of many shell commands and file-types
|
||||
to JSON. Uses the jc library at https://github.com/kellyjonbrazil/jc. For
|
||||
example, filtering the STDOUT output of ``uname -a`` via ``{{ result.stdout
|
||||
| community.general.jc('uname') }}``. Requires Python 3.6+ (https://github.com/ansible-collections/community.general/pull/750).
|
||||
- xfconf - add support for ``double`` type (https://github.com/ansible-collections/community.general/pull/744).
|
||||
release_summary: 'Release for Ansible 2.10.0.
|
||||
|
||||
'
|
||||
fragments:
|
||||
- 1.1.0.yml
|
||||
- 722-plugins.yml
|
||||
- 738-ipa-python3.yml
|
||||
- 744-xfconf_make_locale-independent.yml
|
||||
- 750-jc-new-filter.yaml
|
||||
- 776-postgresql_set_allow_empty_string.yaml
|
||||
- dsv_fix.yml
|
||||
- galaxy-yml.yml
|
||||
modules:
|
||||
- description: Return information of the tasks on a docker stack
|
||||
name: docker_stack_task_info
|
||||
namespace: cloud.docker
|
||||
- description: Save iptables state into a file or restore it from a file
|
||||
name: iptables_state
|
||||
namespace: system
|
||||
- description: Shut down a machine
|
||||
name: shutdown
|
||||
namespace: system
|
||||
- description: Manage OpenBSD system upgrades
|
||||
name: sysupgrade
|
||||
namespace: system
|
||||
release_date: '2020-08-18'
|
||||
1.2.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- aerospike_migrations - handle exception when unstable-cluster is returned
|
||||
(https://github.com/ansible-collections/community.general/pull/900).
|
||||
- django_manage - fix idempotence for ``createcachetable`` (https://github.com/ansible-collections/community.general/pull/699).
|
||||
- docker_container - fix idempotency problem with ``published_ports`` when strict
|
||||
comparison is used and list is empty (https://github.com/ansible-collections/community.general/issues/978).
|
||||
- 'gem - fix get_installed_versions: correctly parse ``default`` version (https://github.com/ansible-collections/community.general/pull/783).'
|
||||
- hashi_vault - add missing ``mount_point`` parameter for approle auth (https://github.com/ansible-collections/community.general/pull/897).
|
||||
- hashi_vault lookup - ``token_path`` in config file overridden by env ``HOME``
|
||||
(https://github.com/ansible-collections/community.general/issues/373).
|
||||
- homebrew_cask - fixed issue where a cask with ``@`` in the name is incorrectly
|
||||
reported as invalid (https://github.com/ansible-collections/community.general/issues/733).
|
||||
- interfaces_file - escape regular expression characters in old value (https://github.com/ansible-collections/community.general/issues/777).
|
||||
- launchd - fix for user-level services (https://github.com/ansible-collections/community.general/issues/896).
|
||||
- nmcli - set ``C`` locale when executing ``nmcli`` (https://github.com/ansible-collections/community.general/issues/989).
|
||||
- parted - fix creating partition when label is changed (https://github.com/ansible-collections/community.general/issues/522).
|
||||
- pkg5 - now works when Python 3 is used on the target (https://github.com/ansible-collections/community.general/pull/789).
|
||||
- postgresql_privs - allow to pass ``PUBLIC`` role written in lowercase letters
|
||||
(https://github.com/ansible-collections/community.general/issues/857).
|
||||
- postgresql_privs - fix the module mistakes a procedure for a function (https://github.com/ansible-collections/community.general/issues/994).
|
||||
- postgresql_privs - rollback if nothing changed (https://github.com/ansible-collections/community.general/issues/885).
|
||||
- postgresql_privs - the module was attempting to revoke grant options even
|
||||
though ``grant_option`` was not specified (https://github.com/ansible-collections/community.general/pull/796).
|
||||
- proxmox_kvm - defer error-checking for non-existent VMs in order to fix idempotency
|
||||
of tasks using ``state=absent`` and properly recognize a success (https://github.com/ansible-collections/community.general/pull/811).
|
||||
- proxmox_kvm - improve handling of long-running tasks by creating a dedicated
|
||||
function (https://github.com/ansible-collections/community.general/pull/831).
|
||||
- slack - fix ``xox[abp]`` token identification to capture everything after
|
||||
``xox[abp]``, as the token is the only thing that should be in this argument
|
||||
(https://github.com/ansible-collections/community.general/issues/862).
|
||||
- terraform - fix incorrectly reporting a status of unchanged when number of
|
||||
resources added or destroyed are multiples of 10 (https://github.com/ansible-collections/community.general/issues/561).
|
||||
- timezone - support Python3 on macos/darwin (https://github.com/ansible-collections/community.general/pull/945).
|
||||
- zfs - fixed ``invalid character '@' in pool name"`` error when working with
|
||||
snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932).
|
||||
minor_changes:
|
||||
- hashi_vault - support ``VAULT_NAMESPACE`` environment variable for namespaced
|
||||
lookups against Vault Enterprise (in addition to the ``namespace=`` flag supported
|
||||
today) (https://github.com/ansible-collections/community.general/pull/929).
|
||||
- hashi_vault lookup - add ``VAULT_TOKEN_FILE`` as env option to specify ``token_file``
|
||||
param (https://github.com/ansible-collections/community.general/issues/373).
|
||||
- hashi_vault lookup - add ``VAULT_TOKEN_PATH`` as env option to specify ``token_path``
|
||||
param (https://github.com/ansible-collections/community.general/issues/373).
|
||||
- ipa_user - add ``userauthtype`` option (https://github.com/ansible-collections/community.general/pull/951).
|
||||
- iptables_state - use FQCN when calling a module from action plugin (https://github.com/ansible-collections/community.general/pull/967).
|
||||
- nagios - add the ``acknowledge`` action (https://github.com/ansible-collections/community.general/pull/820).
|
||||
- nagios - add the ``host`` and ``all`` values for the ``forced_check`` action
|
||||
(https://github.com/ansible-collections/community.general/pull/998).
|
||||
- nagios - add the ``service_check`` action (https://github.com/ansible-collections/community.general/pull/820).
|
||||
- nagios - rename the ``service_check`` action to ``forced_check`` since we
|
||||
now are able to check both a particular service, all services of a particular
|
||||
host and the host itself (https://github.com/ansible-collections/community.general/pull/998).
|
||||
- pkgutil - module can now accept a list of packages (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- pkgutil - module has a new option, ``force``, equivalent to the ``-f`` option
|
||||
to the `pkgutil <http://pkgutil.net/>`_ command (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- pkgutil - module now supports check mode (https://github.com/ansible-collections/community.general/pull/799).
|
||||
- postgresql_privs - add the ``usage_on_types`` option (https://github.com/ansible-collections/community.general/issues/884).
|
||||
- proxmox_kvm - improve code readability (https://github.com/ansible-collections/community.general/pull/934).
|
||||
- pushover - add device parameter (https://github.com/ansible-collections/community.general/pull/802).
|
||||
- redfish_command - add sub-command for ``EnableContinuousBootOverride`` and
|
||||
``DisableBootOverride`` to allow setting BootSourceOverrideEnabled Redfish
|
||||
property (https://github.com/ansible-collections/community.general/issues/824).
|
||||
- redfish_command - support same reset actions on Managers as on Systems (https://github.com/ansible-collections/community.general/issues/901).
|
||||
- slack - add support for updating messages (https://github.com/ansible-collections/community.general/issues/304).
|
||||
- xml - fixed issue were changed was returned when removing non-existent xpath
|
||||
(https://github.com/ansible-collections/community.general/pull/1007).
|
||||
- zypper_repository - proper failure when python-xml is missing (https://github.com/ansible-collections/community.general/pull/939).
|
||||
release_summary: Regular bimonthly minor release.
|
||||
fragments:
|
||||
- 1.2.0.yml
|
||||
- 522-parted_change_label.yml
|
||||
- 563-update-terraform-status-test.yaml
|
||||
- 699-django_manage-createcachetable-fix-idempotence.yml
|
||||
- 777-interfaces_file-re-escape.yml
|
||||
- 783-fix-gem-installed-versions.yaml
|
||||
- 789-pkg5-wrap-to-modify-package-list.yaml
|
||||
- 796-postgresql_privs-grant-option-bug.yaml
|
||||
- 802-pushover-device-parameter.yml
|
||||
- 811-proxmox-kvm-state-absent.yml
|
||||
- 820_nagios_added_acknowledge_and_servicecheck.yml
|
||||
- 825-bootsource-override-option.yaml
|
||||
- 831-proxmox-kvm-wait.yml
|
||||
- 843-update-slack-messages.yml
|
||||
- 858-postgresql_privs_should_allow_public_role_lowercased.yml
|
||||
- 887-rollback-if-nothing-changed.yml
|
||||
- 892-slack-token-validation.yml
|
||||
- 897-lookup-plugin-hashivault-add-approle-mount-point.yaml
|
||||
- 899_launchd_user_service.yml
|
||||
- 900-aerospike-migration-handle-unstable-cluster.yaml
|
||||
- 902-hashi_vault-token-path.yml
|
||||
- 903-enhance-redfish-manager-reset-actions.yml
|
||||
- 929-vault-namespace-support.yml
|
||||
- 939-zypper_repository_proper_failure_on_missing_python-xml.yml
|
||||
- 941-postgresql_privs_usage_on_types_option.yml
|
||||
- 943-proxmox-kvm-code-cleanup.yml
|
||||
- 945-darwin-timezone-py3.yaml
|
||||
- 951-ipa_user-add-userauthtype-param.yaml
|
||||
- 967-use-fqcn-when-calling-a-module-from-action-plugin.yml
|
||||
- 979-docker_container-published_ports-empty-idempotency.yml
|
||||
- 992-nmcli-locale.yml
|
||||
- 996-postgresql_privs_fix_function_handling.yml
|
||||
- 998-nagios-added_forced_check_for_all_services_or_host.yml
|
||||
- homebrew-cask-at-symbol-fix.yaml
|
||||
- pkgutil-check-mode-etc.yaml
|
||||
- xml-remove-changed.yml
|
||||
- zfs-root-snapshot.yml
|
||||
modules:
|
||||
- description: Manage group members on GitLab Server
|
||||
name: gitlab_group_members
|
||||
namespace: source_control.gitlab
|
||||
- description: Creates, updates, or deletes GitLab groups variables
|
||||
name: gitlab_group_variable
|
||||
namespace: source_control.gitlab
|
||||
- description: Scaleway database backups management module
|
||||
name: scaleway_database_backup
|
||||
namespace: cloud.scaleway
|
||||
plugins:
|
||||
inventory:
|
||||
- description: Proxmox inventory source
|
||||
name: proxmox
|
||||
namespace: null
|
||||
- description: StackPath Edge Computing inventory source
|
||||
name: stackpath_compute
|
||||
namespace: null
|
||||
release_date: '2020-09-30'
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 1.0.0
|
||||
version: 1.2.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
description: null
|
||||
license_file: COPYING
|
||||
tags: null
|
||||
tags: [community]
|
||||
# NOTE: No more dependencies can be added to this list
|
||||
dependencies:
|
||||
ansible.netcommon: '>=1.0.0'
|
||||
ansible.posix: '>=1.0.0'
|
||||
community.kubernetes: '>=0.11.1' # check https://galaxy.ansible.com/community/kubernetes
|
||||
google.cloud: '>=0.10.1' # check https://galaxy.ansible.com/google/cloud
|
||||
community.kubernetes: '>=1.0.0'
|
||||
google.cloud: '>=1.0.0'
|
||||
repository: https://github.com/ansible-collections/community.general
|
||||
#documentation: https://github.com/ansible-collection-migration/community.general/tree/main/docs
|
||||
homepage: https://github.com/ansible-collections/community.general
|
||||
|
||||
@@ -740,3 +740,16 @@ plugin_routing:
|
||||
removal_version: 2.0.0
|
||||
warning_text: The mysql module_utils has been moved to the community.mysql collection.
|
||||
redirect: community.mysql.mysql
|
||||
callback:
|
||||
actionable:
|
||||
deprecation:
|
||||
removal_version: 2.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
full_skip:
|
||||
deprecation:
|
||||
removal_version: 2.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
stderr:
|
||||
deprecation:
|
||||
removal_version: 2.0.0
|
||||
warning_text: see plugin documentation for details
|
||||
|
||||
1
plugins/action/iptables_state.py
Symbolic link
1
plugins/action/iptables_state.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./system/iptables_state.py
|
||||
1
plugins/action/shutdown.py
Symbolic link
1
plugins/action/shutdown.py
Symbolic link
@@ -0,0 +1 @@
|
||||
./system/shutdown.py
|
||||
189
plugins/action/system/iptables_state.py
Normal file
189
plugins/action/system/iptables_state.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# Copyright: (c) 2020, quidame <quidame@poivron.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import time
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
|
||||
from ansible.utils.vars import merge_hash
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
# Keep internal params away from user interactions
|
||||
_VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
|
||||
DEFAULT_SUDOABLE = True
|
||||
|
||||
MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
|
||||
"This module doesn't support async>0 and poll>0 when its 'state' param "
|
||||
"is set to 'restored'. To enable its rollback feature (that needs the "
|
||||
"module to run asynchronously on the remote), please set task attribute "
|
||||
"'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
|
||||
"'ansible_timeout' (=%s) (recommended).")
|
||||
MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
|
||||
"Attempts to restore iptables state without rollback in case of mistake "
|
||||
"may lead the ansible controller to loose access to the hosts and never "
|
||||
"regain it before fixing firewall rules through a serial console, or any "
|
||||
"other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
|
||||
"'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
|
||||
"(recommended).")
|
||||
MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
|
||||
"You attempt to restore iptables state with rollback in case of mistake, "
|
||||
"but with settings that will lead this rollback to happen AFTER that the "
|
||||
"controller will reach its own timeout. Please set task attribute 'poll' "
|
||||
"(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
|
||||
"'ansible_timeout' (=%s) (recommended).")
|
||||
|
||||
def _async_result(self, module_args, task_vars, timeout):
|
||||
'''
|
||||
Retrieve results of the asynchonous task, and display them in place of
|
||||
the async wrapper results (those with the ansible_job_id key).
|
||||
'''
|
||||
# At least one iteration is required, even if timeout is 0.
|
||||
for i in range(max(1, timeout)):
|
||||
async_result = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=module_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
if async_result['finished'] == 1:
|
||||
break
|
||||
time.sleep(min(1, timeout))
|
||||
|
||||
return async_result
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
|
||||
self._supports_check_mode = True
|
||||
self._supports_async = True
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
del tmp # tmp no longer has any effect
|
||||
|
||||
if not result.get('skipped'):
|
||||
|
||||
# FUTURE: better to let _execute_module calculate this internally?
|
||||
wrap_async = self._task.async_val and not self._connection.has_native_async
|
||||
|
||||
# Set short names for values we'll have to compare or reuse
|
||||
task_poll = self._task.poll
|
||||
task_async = self._task.async_val
|
||||
check_mode = self._play_context.check_mode
|
||||
max_timeout = self._connection._play_context.timeout
|
||||
module_name = self._task.action
|
||||
module_args = self._task.args
|
||||
|
||||
if module_args.get('state', None) == 'restored':
|
||||
if not wrap_async:
|
||||
if not check_mode:
|
||||
display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
|
||||
task_poll,
|
||||
task_async,
|
||||
max_timeout))
|
||||
elif task_poll:
|
||||
raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
|
||||
task_poll,
|
||||
task_async,
|
||||
max_timeout))
|
||||
else:
|
||||
if task_async > max_timeout and not check_mode:
|
||||
display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
|
||||
task_poll,
|
||||
task_async,
|
||||
max_timeout))
|
||||
|
||||
# BEGIN snippet from async_status action plugin
|
||||
env_async_dir = [e for e in self._task.environment if
|
||||
"ANSIBLE_ASYNC_DIR" in e]
|
||||
if len(env_async_dir) > 0:
|
||||
# for backwards compatibility we need to get the dir from
|
||||
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
|
||||
# deprecated and will be removed in favour of shell options
|
||||
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
|
||||
|
||||
msg = "Setting the async dir from the environment keyword " \
|
||||
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
|
||||
"shell option instead"
|
||||
display.deprecated(msg, version='2.0.0',
|
||||
collection_name='community.general') # was Ansible 2.12
|
||||
else:
|
||||
# inject the async directory based on the shell option into the
|
||||
# module args
|
||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||
# END snippet from async_status action plugin
|
||||
|
||||
# Bind the loop max duration to consistent values on both
|
||||
# remote and local sides (if not the same, make the loop
|
||||
# longer on the controller); and set a backup file path.
|
||||
module_args['_timeout'] = task_async
|
||||
module_args['_back'] = '%s/iptables.state' % async_dir
|
||||
async_status_args = dict(_async_dir=async_dir)
|
||||
confirm_cmd = 'rm -f %s' % module_args['_back']
|
||||
remaining_time = max(task_async, max_timeout)
|
||||
|
||||
# do work!
|
||||
result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
|
||||
|
||||
# Then the 3-steps "go ahead or rollback":
|
||||
# - reset connection to ensure a persistent one will not be reused
|
||||
# - confirm the restored state by removing the backup on the remote
|
||||
# - retrieve the results of the asynchronous task to return them
|
||||
if '_back' in module_args:
|
||||
async_status_args['jid'] = result.get('ansible_job_id', None)
|
||||
if async_status_args['jid'] is None:
|
||||
raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
|
||||
|
||||
# Catch early errors due to missing mandatory option, bad
|
||||
# option type/value, missing required system command, etc.
|
||||
result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
|
||||
|
||||
if not result['finished']:
|
||||
try:
|
||||
self._connection.reset()
|
||||
display.v("%s: reset connection" % (module_name))
|
||||
except AttributeError:
|
||||
display.warning("Connection plugin does not allow to reset the connection.")
|
||||
|
||||
for x in range(max_timeout):
|
||||
time.sleep(1)
|
||||
remaining_time -= 1
|
||||
# - AnsibleConnectionFailure covers rejected requests (i.e.
|
||||
# by rules with '--jump REJECT')
|
||||
# - ansible_timeout is able to cover dropped requests (due
|
||||
# to a rule or policy DROP) if not lower than async_val.
|
||||
try:
|
||||
garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
continue
|
||||
|
||||
result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
|
||||
|
||||
# Cleanup async related stuff and internal params
|
||||
for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
|
||||
if result.get(key):
|
||||
del result[key]
|
||||
|
||||
if result.get('invocation', {}).get('module_args'):
|
||||
if '_timeout' in result['invocation']['module_args']:
|
||||
del result['invocation']['module_args']['_back']
|
||||
del result['invocation']['module_args']['_timeout']
|
||||
|
||||
async_status_args['mode'] = 'cleanup'
|
||||
garbage = self._execute_module(
|
||||
module_name='ansible.builtin.async_status',
|
||||
module_args=async_status_args,
|
||||
task_vars=task_vars,
|
||||
wrap_async=False)
|
||||
|
||||
if not wrap_async:
|
||||
# remove a temporary path we created
|
||||
self._remove_tmp_path(self._connection._shell.tmpdir)
|
||||
|
||||
return result
|
||||
211
plugins/action/system/shutdown.py
Normal file
211
plugins/action/system/shutdown.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
|
||||
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
|
||||
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.common.collections import is_string
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class TimedOutException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
TRANSFERS_FILES = False
|
||||
_VALID_ARGS = frozenset((
|
||||
'msg',
|
||||
'delay',
|
||||
'search_paths'
|
||||
))
|
||||
|
||||
DEFAULT_CONNECT_TIMEOUT = None
|
||||
DEFAULT_PRE_SHUTDOWN_DELAY = 0
|
||||
DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
|
||||
DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
|
||||
DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
|
||||
DEFAULT_SUDOABLE = True
|
||||
|
||||
SHUTDOWN_COMMANDS = {
|
||||
'alpine': 'poweroff',
|
||||
'vmkernel': 'halt',
|
||||
}
|
||||
|
||||
SHUTDOWN_COMMAND_ARGS = {
|
||||
'alpine': '',
|
||||
'void': '-h +{delay_min} "{message}"',
|
||||
'freebsd': '-h +{delay_sec}s "{message}"',
|
||||
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
|
||||
'macosx': '-h +{delay_min} "{message}"',
|
||||
'openbsd': '-h +{delay_min} "{message}"',
|
||||
'solaris': '-y -g {delay_sec} -i 5 "{message}"',
|
||||
'sunos': '-y -g {delay_sec} -i 5 "{message}"',
|
||||
'vmkernel': '-d {delay_sec}',
|
||||
'aix': '-Fh',
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ActionModule, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def delay(self):
|
||||
return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
|
||||
|
||||
def _check_delay(self, key, default):
|
||||
"""Ensure that the value is positive or zero"""
|
||||
value = int(self._task.args.get(key, default))
|
||||
if value < 0:
|
||||
value = 0
|
||||
return value
|
||||
|
||||
def _get_value_from_facts(self, variable_name, distribution, default_value):
|
||||
"""Get dist+version specific args first, then distribution, then family, lastly use default"""
|
||||
attr = getattr(self, variable_name)
|
||||
value = attr.get(
|
||||
distribution['name'] + distribution['version'],
|
||||
attr.get(
|
||||
distribution['name'],
|
||||
attr.get(
|
||||
distribution['family'],
|
||||
getattr(self, default_value))))
|
||||
return value
|
||||
|
||||
def get_shutdown_command_args(self, distribution):
|
||||
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
|
||||
# Convert seconds to minutes. If less that 60, set it to 0.
|
||||
delay_sec = self.delay
|
||||
shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
|
||||
return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
|
||||
|
||||
def get_distribution(self, task_vars):
|
||||
# FIXME: only execute the module if we don't already have the facts we need
|
||||
distribution = {}
|
||||
display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
|
||||
module_output = self._execute_module(
|
||||
task_vars=task_vars,
|
||||
module_name='ansible.legacy.setup',
|
||||
module_args={'gather_subset': 'min'})
|
||||
try:
|
||||
if module_output.get('failed', False):
|
||||
raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
|
||||
to_native(module_output['module_stdout']).strip(),
|
||||
to_native(module_output['module_stderr']).strip()))
|
||||
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
|
||||
distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
|
||||
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
|
||||
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
|
||||
return distribution
|
||||
except KeyError as ke:
|
||||
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
|
||||
|
||||
def get_shutdown_command(self, task_vars, distribution):
|
||||
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
|
||||
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
|
||||
search_paths = self._task.args.get('search_paths', default_search_paths)
|
||||
|
||||
# FIXME: switch all this to user arg spec validation methods when they are available
|
||||
# Convert bare strings to a list
|
||||
if is_string(search_paths):
|
||||
search_paths = [search_paths]
|
||||
|
||||
# Error if we didn't get a list
|
||||
err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
|
||||
try:
|
||||
incorrect_type = any(not is_string(x) for x in search_paths)
|
||||
if not isinstance(search_paths, list) or incorrect_type:
|
||||
raise TypeError
|
||||
except TypeError:
|
||||
raise AnsibleError(err_msg.format(search_paths))
|
||||
|
||||
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
|
||||
action=self._task.action,
|
||||
command=shutdown_bin,
|
||||
paths=search_paths))
|
||||
find_result = self._execute_module(
|
||||
task_vars=task_vars,
|
||||
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
|
||||
module_name='ansible.legacy.find',
|
||||
module_args={
|
||||
'paths': search_paths,
|
||||
'patterns': [shutdown_bin],
|
||||
'file_type': 'any'
|
||||
}
|
||||
)
|
||||
|
||||
full_path = [x['path'] for x in find_result['files']]
|
||||
if not full_path:
|
||||
raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
|
||||
self._shutdown_command = full_path[0]
|
||||
return self._shutdown_command
|
||||
|
||||
def perform_shutdown(self, task_vars, distribution):
|
||||
result = {}
|
||||
shutdown_result = {}
|
||||
shutdown_command = self.get_shutdown_command(task_vars, distribution)
|
||||
shutdown_command_args = self.get_shutdown_command_args(distribution)
|
||||
shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
|
||||
|
||||
self.cleanup(force=True)
|
||||
try:
|
||||
display.vvv("{action}: shutting down server...".format(action=self._task.action))
|
||||
display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
|
||||
if self._play_context.check_mode:
|
||||
shutdown_result['rc'] = 0
|
||||
else:
|
||||
shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
|
||||
except AnsibleConnectionFailure as e:
|
||||
# If the connection is closed too quickly due to the system being shutdown, carry on
|
||||
display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
|
||||
shutdown_result['rc'] = 0
|
||||
|
||||
if shutdown_result['rc'] != 0:
|
||||
result['failed'] = True
|
||||
result['shutdown'] = False
|
||||
result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
|
||||
stdout=to_native(shutdown_result['stdout'].strip()),
|
||||
stderr=to_native(shutdown_result['stderr'].strip()))
|
||||
return result
|
||||
|
||||
result['failed'] = False
|
||||
result['shutdown_command'] = shutdown_command_exec
|
||||
return result
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
self._supports_check_mode = True
|
||||
self._supports_async = True
|
||||
|
||||
# If running with local connection, fail so we don't shutdown ourself
|
||||
if self._connection.transport == 'local' and (not self._play_context.check_mode):
|
||||
msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
|
||||
return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
|
||||
|
||||
if task_vars is None:
|
||||
task_vars = {}
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
if result.get('skipped', False) or result.get('failed', False):
|
||||
return result
|
||||
|
||||
distribution = self.get_distribution(task_vars)
|
||||
|
||||
# Initiate shutdown
|
||||
shutdown_result = self.perform_shutdown(task_vars, distribution)
|
||||
|
||||
if shutdown_result['failed']:
|
||||
result = shutdown_result
|
||||
return result
|
||||
|
||||
result['shutdown'] = True
|
||||
result['changed'] = True
|
||||
result['shutdown_command'] = shutdown_result['shutdown_command']
|
||||
|
||||
return result
|
||||
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
|
||||
become_user:
|
||||
description:
|
||||
- User you 'become' to execute the task
|
||||
- This plugin ignores this setting as pfexec uses it's own ``exec_attr`` to figure this out,
|
||||
- This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
|
||||
but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
|
||||
default: root
|
||||
ini:
|
||||
@@ -80,8 +80,8 @@ DOCUMENTATION = '''
|
||||
- name: ansible_pfexec_wrap_execution
|
||||
env:
|
||||
- name: ANSIBLE_PFEXEC_WRAP_EXECUTION
|
||||
note:
|
||||
- This plugin ignores ``become_user`` as pfexec uses it's own ``exec_attr`` to figure this out.
|
||||
notes:
|
||||
- This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
|
||||
'''
|
||||
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
1
plugins/cache/memcached.py
vendored
1
plugins/cache/memcached.py
vendored
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
cache: memcached
|
||||
short_description: Use memcached DB for cache
|
||||
description:
|
||||
|
||||
1
plugins/cache/redis.py
vendored
1
plugins/cache/redis.py
vendored
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
cache: redis
|
||||
short_description: Use Redis DB for cache
|
||||
description:
|
||||
|
||||
@@ -7,6 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: actionable
|
||||
type: stdout
|
||||
short_description: shows only items that need attention
|
||||
|
||||
@@ -7,8 +7,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: cgroup_memory_recap
|
||||
callback_type: aggregate
|
||||
type: aggregate
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
- cgroups
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: context_demo
|
||||
type: aggregate
|
||||
short_description: demo callback that adds play/task context
|
||||
|
||||
@@ -6,13 +6,9 @@
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.template import Templar
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: counter_enabled
|
||||
type: stdout
|
||||
short_description: adds counters to the output items (tasks and hosts/task)
|
||||
@@ -26,6 +22,12 @@ DOCUMENTATION = '''
|
||||
- set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
|
||||
'''
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.template import Templar
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
callback: diy
|
||||
callback_type: stdout
|
||||
type: stdout
|
||||
short_description: Customize the output
|
||||
version_added: 0.2.0
|
||||
description:
|
||||
|
||||
@@ -7,6 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: full_skip
|
||||
type: stdout
|
||||
short_description: suppresses tasks if all hosts skipped
|
||||
|
||||
@@ -6,8 +6,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: hipchat
|
||||
callback_type: notification
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration.
|
||||
- prettytable (python lib)
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: jabber
|
||||
type: notification
|
||||
short_description: post task events to a jabber server
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: log_plays
|
||||
type: notification
|
||||
short_description: write playbook output to log file
|
||||
|
||||
@@ -5,8 +5,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: logdna
|
||||
callback_type: aggregate
|
||||
type: aggregate
|
||||
short_description: Sends playbook logs to LogDNA
|
||||
description:
|
||||
- This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: logentries
|
||||
type: notification
|
||||
short_description: Sends events to Logentries
|
||||
@@ -75,7 +76,7 @@ examples: >
|
||||
To enable, add this to your ansible.cfg file in the defaults block
|
||||
|
||||
[defaults]
|
||||
callback_whitelist = logentries
|
||||
callback_whitelist = community.general.logentries
|
||||
|
||||
Either set the environment variables
|
||||
export LOGENTRIES_API=data.logentries.com
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: logstash
|
||||
type: notification
|
||||
short_description: Sends events to Logstash
|
||||
|
||||
@@ -52,7 +52,7 @@ options:
|
||||
ini:
|
||||
- section: callback_mail
|
||||
key: bcc
|
||||
note:
|
||||
notes:
|
||||
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
|
||||
'''
|
||||
|
||||
|
||||
@@ -6,8 +6,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: 'null'
|
||||
callback_type: stdout
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: Don't display stuff to screen
|
||||
|
||||
@@ -7,6 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: say
|
||||
type: notification
|
||||
requirements:
|
||||
|
||||
@@ -6,8 +6,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: selective
|
||||
callback_type: stdout
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: only print certain tasks
|
||||
|
||||
@@ -7,8 +7,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: slack
|
||||
callback_type: notification
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
- prettytable (python library)
|
||||
|
||||
@@ -21,7 +21,7 @@ DOCUMENTATION = '''
|
||||
callback: splunk
|
||||
type: aggregate
|
||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||
author: "Stuart Hirst <support@convergingdata.com>"
|
||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||
description:
|
||||
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
|
||||
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
|
||||
@@ -63,7 +63,7 @@ EXAMPLES = '''
|
||||
examples: >
|
||||
To enable, add this to your ansible.cfg file in the defaults block
|
||||
[defaults]
|
||||
callback_whitelist = splunk
|
||||
callback_whitelist = community.general.splunk
|
||||
Set the environment variable
|
||||
export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
|
||||
export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
|
||||
|
||||
@@ -7,8 +7,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: stderr
|
||||
callback_type: stdout
|
||||
type: stdout
|
||||
requirements:
|
||||
- set as main display callback
|
||||
short_description: Splits output, sending failed tasks to stderr
|
||||
|
||||
@@ -42,7 +42,7 @@ EXAMPLES = '''
|
||||
examples: >
|
||||
To enable, add this to your ansible.cfg file in the defaults block
|
||||
[defaults]
|
||||
callback_whitelist = sumologic
|
||||
callback_whitelist = community.general.sumologic
|
||||
|
||||
Set the environment variable
|
||||
export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
|
||||
|
||||
@@ -6,8 +6,9 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: syslog_json
|
||||
callback_type: notification
|
||||
type: notification
|
||||
requirements:
|
||||
- whitelist in configuration
|
||||
short_description: sends JSON events to syslog
|
||||
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
callback: unixy
|
||||
type: stdout
|
||||
author: Allyson Bowles <@akatch>
|
||||
author: Allyson Bowles (@akatch)
|
||||
short_description: condensed Ansible output
|
||||
description:
|
||||
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
callback: yaml
|
||||
type: stdout
|
||||
short_description: yaml-ized Ansible screen output
|
||||
|
||||
@@ -9,7 +9,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Maykel Moya <mmoya@speedyrails.com>
|
||||
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
||||
connection: chroot
|
||||
short_description: Interact with local chroot
|
||||
description:
|
||||
|
||||
@@ -11,8 +11,8 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein
|
||||
- Leendert Brouwer
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
connection: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
|
||||
@@ -9,7 +9,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Stephan Lohse <dev-github@ploek.org>
|
||||
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
||||
connection: iocage
|
||||
short_description: Run tasks in iocage jails
|
||||
description:
|
||||
|
||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Joerg Thalheim <joerg@higgsboson.tk>
|
||||
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
||||
connection: lxc
|
||||
short_description: Run tasks in lxc containers via lxc python library
|
||||
description:
|
||||
|
||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Matt Clay <matt@mystile.com>
|
||||
author: Matt Clay (@mattclay) <matt@mystile.com>
|
||||
connection: lxd
|
||||
short_description: Run tasks in lxc containers via lxc CLI
|
||||
description:
|
||||
|
||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- xuxinkun
|
||||
- xuxinkun (!UNKNOWN)
|
||||
|
||||
connection: oc
|
||||
|
||||
@@ -150,7 +150,7 @@ DOCUMENTATION = '''
|
||||
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
|
||||
|
||||
|
||||
CONNECTION_TRANSPORT = 'oc'
|
||||
CONNECTION_TRANSPORT = 'community.general.oc'
|
||||
|
||||
CONNECTION_OPTIONS = {
|
||||
'oc_container': '-c',
|
||||
|
||||
@@ -16,6 +16,7 @@ options:
|
||||
description:
|
||||
- Scaleway OAuth token.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ oauth_token ]
|
||||
api_url:
|
||||
description:
|
||||
|
||||
94
plugins/filter/jc.py
Normal file
94
plugins/filter/jc.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# contributed by Kelly Brazil <kellyjonbrazil@gmail.com>
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleFilterError
|
||||
import importlib
|
||||
|
||||
try:
|
||||
import jc
|
||||
HAS_LIB = True
|
||||
except ImportError:
|
||||
HAS_LIB = False
|
||||
|
||||
|
||||
def jc(data, parser, quiet=True, raw=False):
|
||||
"""Convert returned command output to JSON using the JC library
|
||||
|
||||
Arguments:
|
||||
|
||||
parser required (string) the correct parser for the input data (e.g. 'ifconfig')
|
||||
see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
|
||||
quiet optional (bool) True to suppress warning messages (default is True)
|
||||
raw optional (bool) True to return pre-processed JSON (default is False)
|
||||
|
||||
Returns:
|
||||
|
||||
dictionary or list of dictionaries
|
||||
|
||||
Example:
|
||||
|
||||
- name: run date command
|
||||
hosts: ubuntu
|
||||
tasks:
|
||||
- shell: date
|
||||
register: result
|
||||
- set_fact:
|
||||
myvar: "{{ result.stdout | community.general.jc('date') }}"
|
||||
- debug:
|
||||
msg: "{{ myvar }}"
|
||||
|
||||
produces:
|
||||
|
||||
ok: [192.168.1.239] => {
|
||||
"msg": {
|
||||
"day": 9,
|
||||
"hour": 22,
|
||||
"minute": 6,
|
||||
"month": "Aug",
|
||||
"month_num": 8,
|
||||
"second": 22,
|
||||
"timezone": "UTC",
|
||||
"weekday": "Sun",
|
||||
"weekday_num": 1,
|
||||
"year": 2020
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if not HAS_LIB:
|
||||
raise AnsibleError('You need to install "jc" prior to running jc filter')
|
||||
|
||||
try:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
return jc_parser.parse(data, quiet=quiet, raw=raw)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Query filter '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'jc': jc
|
||||
}
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Orion Poplawski (@opoplawski)
|
||||
name: cobbler
|
||||
plugin_type: inventory
|
||||
short_description: Cobbler inventory source
|
||||
@@ -17,9 +18,9 @@ DOCUMENTATION = '''
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(cobbler) for this plugin to recognize it as it's own.
|
||||
description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
|
||||
required: yes
|
||||
choices: ['cobbler']
|
||||
choices: [ 'cobbler', 'community.general.cobbler' ]
|
||||
url:
|
||||
description: URL to cobbler.
|
||||
default: 'http://cobbler/cobbler_api'
|
||||
@@ -92,7 +93,7 @@ except ImportError:
|
||||
class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
''' Host inventory parser for ansible using cobbler as source. '''
|
||||
|
||||
NAME = 'cobbler'
|
||||
NAME = 'community.general.cobbler'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine']
|
||||
choices: ['docker_machine', 'community.general.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
@@ -54,7 +54,7 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example
|
||||
plugin: docker_machine
|
||||
plugin: community.general.docker_machine
|
||||
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), e.g.:
|
||||
|
||||
@@ -26,11 +26,11 @@ DOCUMENTATION = '''
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
|
||||
recognize it as it's own.
|
||||
description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: docker_swarm
|
||||
choices: [ docker_swarm, community.general.docker_swarm ]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
@@ -101,20 +101,20 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: docker_swarm
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: docker_swarm
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: docker_swarm
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: docker_swarm
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
@@ -122,7 +122,7 @@ client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: docker_swarm
|
||||
plugin: community.general.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
|
||||
@@ -10,8 +10,8 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
name: gitlab_runners
|
||||
plugin_type: inventory
|
||||
authors:
|
||||
- Stefan Heitmüller (stefan.heitmueller@gmx.com)
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
@@ -28,6 +28,7 @@ DOCUMENTATION = '''
|
||||
required: true
|
||||
choices:
|
||||
- gitlab_runners
|
||||
- community.general.gitlab_runners
|
||||
server_url:
|
||||
description: The URL of the GitLab server, with protocol (i.e. http or https).
|
||||
env:
|
||||
@@ -60,11 +61,11 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = '''
|
||||
# gitlab_runners.yml
|
||||
plugin: gitlab_runners
|
||||
plugin: community.general.gitlab_runners
|
||||
host: https://gitlab.com
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: gitlab_runners
|
||||
plugin: community.general.gitlab_runners
|
||||
host: https://gitlab.com
|
||||
strict: False
|
||||
keyed_groups:
|
||||
|
||||
@@ -24,7 +24,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'kubevirt' plugin.
|
||||
required: True
|
||||
choices: ['kubevirt']
|
||||
choices: ['kubevirt', 'community.general.kubevirt']
|
||||
type: str
|
||||
host_format:
|
||||
description:
|
||||
@@ -123,7 +123,7 @@ EXAMPLES = '''
|
||||
# File must be named kubevirt.yaml or kubevirt.yml
|
||||
|
||||
# Authenticate with token, and return all virtual machines for all namespaces
|
||||
plugin: kubevirt
|
||||
plugin: community.general.kubevirt
|
||||
connections:
|
||||
- host: https://kubevirt.io
|
||||
token: xxxxxxxxxxxxxxxx
|
||||
@@ -131,7 +131,7 @@ connections:
|
||||
|
||||
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
|
||||
# connected to network myovsnetwork and from namespace vms
|
||||
plugin: kubevirt
|
||||
plugin: community.general.kubevirt
|
||||
connections:
|
||||
- namespaces:
|
||||
- vms
|
||||
|
||||
@@ -22,7 +22,7 @@ DOCUMENTATION = r'''
|
||||
plugin:
|
||||
description: marks this as an instance of the 'linode' plugin
|
||||
required: true
|
||||
choices: ['linode']
|
||||
choices: ['linode', 'community.general.linode']
|
||||
access_token:
|
||||
description: The Linode account personal access token.
|
||||
required: true
|
||||
@@ -42,10 +42,10 @@ DOCUMENTATION = r'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
|
||||
plugin: linode
|
||||
plugin: community.general.linode
|
||||
|
||||
# Example with regions, types, groups and access token
|
||||
plugin: linode
|
||||
plugin: community.general.linode
|
||||
access_token: foobar
|
||||
regions:
|
||||
- eu-west
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: nmap
|
||||
plugin_type: inventory
|
||||
short_description: Uses nmap to find hosts to target
|
||||
@@ -19,7 +20,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'nmap' plugin.
|
||||
required: True
|
||||
choices: ['nmap']
|
||||
choices: ['nmap', 'community.general.nmap']
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: True
|
||||
@@ -43,10 +44,10 @@ DOCUMENTATION = '''
|
||||
- 'TODO: add OS fingerprinting'
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# inventory.config file in YAML format
|
||||
plugin: nmap
|
||||
strict: False
|
||||
address: 192.168.0.0/24
|
||||
# inventory.config file in YAML format
|
||||
plugin: community.general.nmap
|
||||
strict: False
|
||||
address: 192.168.0.0/24
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
@@ -18,7 +18,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'online' plugin.
|
||||
required: True
|
||||
choices: ['online']
|
||||
choices: ['online', 'community.general.online']
|
||||
oauth_token:
|
||||
required: True
|
||||
description: Online OAuth token.
|
||||
@@ -49,7 +49,7 @@ EXAMPLES = '''
|
||||
# online_inventory.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i online_inventory.yml
|
||||
|
||||
plugin: online
|
||||
plugin: community.general.online
|
||||
hostnames:
|
||||
- public_ipv4
|
||||
groups:
|
||||
|
||||
348
plugins/inventory/proxmox.py
Normal file
348
plugins/inventory/proxmox.py
Normal file
@@ -0,0 +1,348 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: proxmox
|
||||
plugin_type: inventory
|
||||
short_description: Proxmox inventory source
|
||||
version_added: "1.2.0"
|
||||
author:
|
||||
- Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
|
||||
requirements:
|
||||
- requests >= 1.1
|
||||
description:
|
||||
- Get inventory hosts from a Proxmox PVE cluster.
|
||||
- "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
|
||||
- Will retrieve the first network interface with an IP for Proxmox nodes.
|
||||
- Can retrieve LXC/QEMU configuration as facts.
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
|
||||
required: yes
|
||||
choices: ['community.general.proxmox']
|
||||
type: str
|
||||
url:
|
||||
description: URL to Proxmox cluster.
|
||||
default: 'http://localhost:8006'
|
||||
type: str
|
||||
user:
|
||||
description: Proxmox authentication user.
|
||||
required: yes
|
||||
type: str
|
||||
password:
|
||||
description: Proxmox authentication password.
|
||||
required: yes
|
||||
type: str
|
||||
validate_certs:
|
||||
description: Verify SSL certificate if using HTTPS.
|
||||
type: boolean
|
||||
default: yes
|
||||
group_prefix:
|
||||
description: Prefix to apply to Proxmox groups.
|
||||
default: proxmox_
|
||||
type: str
|
||||
facts_prefix:
|
||||
description: Prefix to apply to LXC/QEMU config facts.
|
||||
default: proxmox_
|
||||
type: str
|
||||
want_facts:
|
||||
description: Gather LXC/QEMU configuration facts.
|
||||
default: no
|
||||
type: bool
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# my.proxmox.yml
|
||||
plugin: community.general.proxmox
|
||||
url: http://localhost:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: no
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
# 3rd party imports
|
||||
try:
|
||||
import requests
|
||||
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
|
||||
raise ImportError
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
''' Host inventory parser for ansible using Proxmox as source. '''
|
||||
|
||||
NAME = 'community.general.proxmox'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# from config
|
||||
self.proxmox_url = None
|
||||
|
||||
self.session = None
|
||||
self.cache_key = None
|
||||
self.use_cache = None
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('proxmox.yaml', 'proxmox.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
|
||||
return valid
|
||||
|
||||
def _get_session(self):
|
||||
if not self.session:
|
||||
self.session = requests.session()
|
||||
self.session.verify = self.get_option('validate_certs')
|
||||
return self.session
|
||||
|
||||
def _get_auth(self):
|
||||
credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
|
||||
|
||||
a = self._get_session()
|
||||
ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
|
||||
|
||||
json = ret.json()
|
||||
|
||||
self.credentials = {
|
||||
'ticket': json['data']['ticket'],
|
||||
'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
|
||||
}
|
||||
|
||||
def _get_json(self, url, ignore_errors=None):
|
||||
|
||||
if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
|
||||
|
||||
if self.cache_key not in self._cache:
|
||||
self._cache[self.cache_key] = {'url': ''}
|
||||
|
||||
data = []
|
||||
s = self._get_session()
|
||||
while True:
|
||||
headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
|
||||
ret = s.get(url, headers=headers)
|
||||
if ignore_errors and ret.status_code in ignore_errors:
|
||||
break
|
||||
ret.raise_for_status()
|
||||
json = ret.json()
|
||||
|
||||
# process results
|
||||
# FIXME: This assumes 'return type' matches a specific query,
|
||||
# it will break if we expand the queries and they dont have different types
|
||||
if 'data' not in json:
|
||||
# /hosts/:id does not have a 'data' key
|
||||
data = json
|
||||
break
|
||||
elif isinstance(json['data'], MutableMapping):
|
||||
# /facts are returned as dict in 'data'
|
||||
data = json['data']
|
||||
break
|
||||
else:
|
||||
# /hosts 's 'results' is a list of all hosts, returned is paginated
|
||||
data = data + json['data']
|
||||
break
|
||||
|
||||
self._cache[self.cache_key][url] = data
|
||||
|
||||
return self._cache[self.cache_key][url]
|
||||
|
||||
def _get_nodes(self):
|
||||
return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
|
||||
|
||||
def _get_pools(self):
|
||||
return self._get_json("%s/api2/json/pools" % self.proxmox_url)
|
||||
|
||||
def _get_lxc_per_node(self, node):
|
||||
return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
|
||||
|
||||
def _get_qemu_per_node(self, node):
|
||||
return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
|
||||
|
||||
def _get_members_per_pool(self, pool):
|
||||
ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
|
||||
return ret['members']
|
||||
|
||||
def _get_node_ip(self, node):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
|
||||
|
||||
for iface in ret:
|
||||
try:
|
||||
return iface['address']
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_vm_config(self, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
vmid_key = 'vmid'
|
||||
vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
|
||||
self.inventory.set_variable(name, vmid_key, vmid)
|
||||
|
||||
vmtype_key = 'vmtype'
|
||||
vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
|
||||
self.inventory.set_variable(name, vmtype_key, vmtype)
|
||||
|
||||
for config in ret:
|
||||
key = config
|
||||
key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
|
||||
value = ret[config]
|
||||
try:
|
||||
# fixup disk images as they have no key
|
||||
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
|
||||
value = ('disk_image=' + value)
|
||||
|
||||
if isinstance(value, int) or ',' not in value:
|
||||
value = value
|
||||
# split off strings with commas to a dict
|
||||
else:
|
||||
# skip over any keys that cannot be processed
|
||||
try:
|
||||
value = dict(key.split("=") for key in value.split(","))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
self.inventory.set_variable(name, key, value)
|
||||
except NameError:
|
||||
return None
|
||||
|
||||
def _get_vm_status(self, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
status = ret['status']
|
||||
status_key = 'status'
|
||||
status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
|
||||
self.inventory.set_variable(name, status_key, status)
|
||||
|
||||
def to_safe(self, word):
|
||||
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
||||
#> ProxmoxInventory.to_safe("foo-bar baz")
|
||||
'foo_barbaz'
|
||||
'''
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def _populate(self):
|
||||
|
||||
self._get_auth()
|
||||
|
||||
# gather vm's on nodes
|
||||
for node in self._get_nodes():
|
||||
# FIXME: this can probably be cleaner
|
||||
# create groups
|
||||
lxc_group = 'all_lxc'
|
||||
lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower()))
|
||||
self.inventory.add_group(lxc_group)
|
||||
qemu_group = 'all_qemu'
|
||||
qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower()))
|
||||
self.inventory.add_group(qemu_group)
|
||||
nodes_group = 'nodes'
|
||||
nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower()))
|
||||
self.inventory.add_group(nodes_group)
|
||||
running_group = 'all_running'
|
||||
running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower()))
|
||||
self.inventory.add_group(running_group)
|
||||
stopped_group = 'all_stopped'
|
||||
stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower()))
|
||||
self.inventory.add_group(stopped_group)
|
||||
|
||||
if node.get('node'):
|
||||
self.inventory.add_host(node['node'])
|
||||
|
||||
if node['type'] == 'node':
|
||||
self.inventory.add_child(nodes_group, node['node'])
|
||||
|
||||
# get node IP address
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
|
||||
# get LXC containers for this node
|
||||
node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
|
||||
self.inventory.add_group(node_lxc_group)
|
||||
for lxc in self._get_lxc_per_node(node['node']):
|
||||
self.inventory.add_host(lxc['name'])
|
||||
self.inventory.add_child(lxc_group, lxc['name'])
|
||||
self.inventory.add_child(node_lxc_group, lxc['name'])
|
||||
|
||||
# get LXC status when want_facts == True
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
if lxc['status'] == 'stopped':
|
||||
self.inventory.add_child(stopped_group, lxc['name'])
|
||||
elif lxc['status'] == 'running':
|
||||
self.inventory.add_child(running_group, lxc['name'])
|
||||
|
||||
# get LXC config for facts
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
|
||||
# get QEMU vm's for this node
|
||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||
self.inventory.add_group(node_qemu_group)
|
||||
for qemu in self._get_qemu_per_node(node['node']):
|
||||
if not qemu['template']:
|
||||
self.inventory.add_host(qemu['name'])
|
||||
self.inventory.add_child(qemu_group, qemu['name'])
|
||||
self.inventory.add_child(node_qemu_group, qemu['name'])
|
||||
|
||||
# get QEMU status
|
||||
self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
if qemu['status'] == 'stopped':
|
||||
self.inventory.add_child(stopped_group, qemu['name'])
|
||||
elif qemu['status'] == 'running':
|
||||
self.inventory.add_child(running_group, qemu['name'])
|
||||
|
||||
# get QEMU config for facts
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
|
||||
# gather vm's in pools
|
||||
for pool in self._get_pools():
|
||||
if pool.get('poolid'):
|
||||
pool_group = 'pool_' + pool['poolid']
|
||||
pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower()))
|
||||
self.inventory.add_group(pool_group)
|
||||
|
||||
for member in self._get_members_per_pool(pool['poolid']):
|
||||
if member.get('name'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_REQUESTS:
|
||||
raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
|
||||
'https://github.com/psf/requests.')
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
# read config from file, this sets 'options'
|
||||
self._read_config_data(path)
|
||||
|
||||
# get connection host
|
||||
self.proxmox_url = self.get_option('url')
|
||||
self.proxmox_user = self.get_option('user')
|
||||
self.proxmox_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
self.use_cache = cache and self.get_option('cache')
|
||||
|
||||
# actually populate inventory
|
||||
self._populate()
|
||||
@@ -17,7 +17,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'scaleway' plugin.
|
||||
required: True
|
||||
choices: ['scaleway']
|
||||
choices: ['scaleway', 'community.general.scaleway']
|
||||
regions:
|
||||
description: Filter results on a specific Scaleway region
|
||||
type: list
|
||||
@@ -60,7 +60,7 @@ EXAMPLES = '''
|
||||
|
||||
# use hostname as inventory_hostname
|
||||
# use the private IP address to connect to the host
|
||||
plugin: scaleway
|
||||
plugin: community.general.scaleway
|
||||
regions:
|
||||
- ams1
|
||||
- par1
|
||||
@@ -73,7 +73,7 @@ variables:
|
||||
state: state
|
||||
|
||||
# use hostname as inventory_hostname and public IP address to connect to the host
|
||||
plugin: scaleway
|
||||
plugin: community.general.scaleway
|
||||
hostnames:
|
||||
- hostname
|
||||
regions:
|
||||
|
||||
281
plugins/inventory/stackpath_compute.py
Normal file
281
plugins/inventory/stackpath_compute.py
Normal file
@@ -0,0 +1,281 @@
|
||||
# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
|
||||
# Copyright (c) 2020 Ansible Project
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: stackpath_compute
|
||||
plugin_type: inventory
|
||||
short_description: StackPath Edge Computing inventory source
|
||||
version_added: 1.2.0
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from StackPath Edge Computing.
|
||||
- Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- A token that ensures this is a source file for the plugin.
|
||||
required: true
|
||||
choices: ['community.general.stackpath_compute']
|
||||
client_id:
|
||||
description:
|
||||
- An OAuth client ID generated from the API Management section of the StackPath customer portal
|
||||
U(https://control.stackpath.net/api-management).
|
||||
required: true
|
||||
type: str
|
||||
client_secret:
|
||||
description:
|
||||
- An OAuth client secret generated from the API Management section of the StackPath customer portal
|
||||
U(https://control.stackpath.net/api-management).
|
||||
required: true
|
||||
type: str
|
||||
stack_slugs:
|
||||
description:
|
||||
- A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
|
||||
type: list
|
||||
elements: str
|
||||
use_internal_ip:
|
||||
description:
|
||||
- Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
|
||||
- If an instance doesn't have an external IP it will not be returned when this option is set to false.
|
||||
type: bool
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example using credentials to fetch all workload instances in a stack.
|
||||
---
|
||||
plugin: community.general.stackpath_compute
|
||||
client_id: my_client_id
|
||||
client_secret: my_client_secret
|
||||
stack_slugs:
|
||||
- my_first_stack_slug
|
||||
- my_other_stack_slug
|
||||
use_internal_ip: false
|
||||
'''
|
||||
|
||||
import traceback
|
||||
import json
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.inventory import (
|
||||
BaseInventoryPlugin,
|
||||
Constructable,
|
||||
Cacheable
|
||||
)
|
||||
from ansible.utils.display import Display
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
NAME = 'community.general.stackpath_compute'
|
||||
|
||||
def __init__(self):
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
# credentials
|
||||
self.client_id = None
|
||||
self.client_secret = None
|
||||
self.stack_slug = None
|
||||
self.api_host = "https://gateway.stackpath.com"
|
||||
self.group_keys = [
|
||||
"stackSlug",
|
||||
"workloadId",
|
||||
"cityCode",
|
||||
"countryCode",
|
||||
"continent",
|
||||
"target",
|
||||
"name",
|
||||
"workloadSlug"
|
||||
]
|
||||
|
||||
def _validate_config(self, config):
|
||||
if config['plugin'] != 'community.general.stackpath_compute':
|
||||
raise AnsibleError("plugin doesn't match this plugin")
|
||||
try:
|
||||
client_id = config['client_id']
|
||||
if client_id != 32:
|
||||
raise AnsibleError("client_id must be 32 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
try:
|
||||
client_secret = config['client_secret']
|
||||
if client_secret != 64:
|
||||
raise AnsibleError("client_secret must be 64 characters long")
|
||||
except KeyError:
|
||||
raise AnsibleError("config missing client_id, a required option")
|
||||
return True
|
||||
|
||||
def _set_credentials(self):
|
||||
'''
|
||||
:param config_data: contents of the inventory config file
|
||||
'''
|
||||
self.client_id = self.get_option('client_id')
|
||||
self.client_secret = self.get_option('client_secret')
|
||||
|
||||
def _authenticate(self):
|
||||
payload = json.dumps(
|
||||
{
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"grant_type": "client_credentials",
|
||||
}
|
||||
)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
resp = open_url(
|
||||
self.api_host + '/identity/v1/oauth2/token',
|
||||
headers=headers,
|
||||
data=payload,
|
||||
method="POST"
|
||||
)
|
||||
status_code = resp.code
|
||||
if status_code == 200:
|
||||
body = resp.read()
|
||||
self.auth_token = json.loads(body)["access_token"]
|
||||
|
||||
def _query(self):
|
||||
results = []
|
||||
workloads = []
|
||||
self._authenticate()
|
||||
for stack_slug in self.stack_slugs:
|
||||
try:
|
||||
workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
|
||||
except Exception:
|
||||
raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
|
||||
for workload in workloads:
|
||||
try:
|
||||
workload_instances = self._stackpath_query_get_list(
|
||||
self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
|
||||
)
|
||||
except Exception:
|
||||
raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
|
||||
for instance in workload_instances:
|
||||
if instance["phase"] == "RUNNING":
|
||||
instance["stackSlug"] = stack_slug
|
||||
instance["workloadId"] = workload["id"]
|
||||
instance["workloadSlug"] = workload["slug"]
|
||||
instance["cityCode"] = instance["location"]["cityCode"]
|
||||
instance["countryCode"] = instance["location"]["countryCode"]
|
||||
instance["continent"] = instance["location"]["continent"]
|
||||
instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
|
||||
try:
|
||||
if instance[self.hostname_key]:
|
||||
results.append(instance)
|
||||
except KeyError:
|
||||
pass
|
||||
return results
|
||||
|
||||
def _populate(self, instances):
|
||||
for instance in instances:
|
||||
for group_key in self.group_keys:
|
||||
group = group_key + "_" + instance[group_key]
|
||||
group = group.lower().replace(" ", "_").replace("-", "_")
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_host(instance[self.hostname_key],
|
||||
group=group)
|
||||
|
||||
def _stackpath_query_get_list(self, url):
|
||||
self._authenticate()
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + self.auth_token,
|
||||
}
|
||||
next_page = True
|
||||
result = []
|
||||
cursor = '-1'
|
||||
while next_page:
|
||||
resp = open_url(
|
||||
url + '?page_request.first=10&page_request.after=%s' % cursor,
|
||||
headers=headers,
|
||||
method="GET"
|
||||
)
|
||||
status_code = resp.code
|
||||
if status_code == 200:
|
||||
body = resp.read()
|
||||
body_json = json.loads(body)
|
||||
result.extend(body_json["results"])
|
||||
next_page = body_json["pageInfo"]["hasNextPage"]
|
||||
if next_page:
|
||||
cursor = body_json["pageInfo"]["endCursor"]
|
||||
return result
|
||||
|
||||
def _get_stack_slugs(self, stacks):
|
||||
self.stack_slugs = [stack["slug"] for stack in stacks]
|
||||
|
||||
def verify_file(self, path):
|
||||
'''
|
||||
:param loader: an ansible.parsing.dataloader.DataLoader object
|
||||
:param path: the path to the inventory config file
|
||||
:return the contents of the config file
|
||||
'''
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
|
||||
return True
|
||||
display.debug(
|
||||
"stackpath_compute inventory filename must end with \
|
||||
'stackpath_compute.yml' or 'stackpath_compute.yaml'"
|
||||
)
|
||||
return False
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
config = self._read_config_data(path)
|
||||
self._validate_config(config)
|
||||
self._set_credentials()
|
||||
|
||||
# get user specifications
|
||||
self.use_internal_ip = self.get_option('use_internal_ip')
|
||||
if self.use_internal_ip:
|
||||
self.hostname_key = "ipAddress"
|
||||
else:
|
||||
self.hostname_key = "externalIpAddress"
|
||||
|
||||
self.stack_slugs = self.get_option('stack_slugs')
|
||||
if not self.stack_slugs:
|
||||
try:
|
||||
stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
|
||||
self._get_stack_slugs(stacks)
|
||||
except Exception:
|
||||
raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
|
||||
|
||||
cache_key = self.get_cache_key(path)
|
||||
# false when refresh_cache or --flush-cache is used
|
||||
if cache:
|
||||
# get the user-specified directive
|
||||
cache = self.get_option('cache')
|
||||
|
||||
# Generate inventory
|
||||
cache_needs_update = False
|
||||
if cache:
|
||||
try:
|
||||
results = self._cache[cache_key]
|
||||
except KeyError:
|
||||
# if cache expires or cache file doesn't exist
|
||||
cache_needs_update = True
|
||||
|
||||
if not cache or cache_needs_update:
|
||||
results = self._query()
|
||||
|
||||
self._populate(results)
|
||||
|
||||
# If the cache has expired/doesn't exist or
|
||||
# if refresh_inventory/flush cache is used
|
||||
# when the user is using caching, update the cached inventory
|
||||
try:
|
||||
if cache_needs_update or (not cache and self.get_option('cache')):
|
||||
self._cache[cache_key] = results
|
||||
except Exception:
|
||||
raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: virtualbox
|
||||
plugin_type: inventory
|
||||
short_description: virtualbox inventory source
|
||||
@@ -19,7 +20,7 @@ DOCUMENTATION = '''
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'virtualbox' plugin
|
||||
required: True
|
||||
choices: ['virtualbox']
|
||||
choices: ['virtualbox', 'community.general.virtualbox']
|
||||
running_only:
|
||||
description: toggles showing all vms vs only those currently running
|
||||
type: boolean
|
||||
@@ -38,7 +39,7 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = '''
|
||||
# file must be named vbox.yaml or vbox.yml
|
||||
simple_config_file:
|
||||
plugin: virtualbox
|
||||
plugin: community.general.virtualbox
|
||||
settings_password_file: /etc/virtulbox/secrets
|
||||
query:
|
||||
logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
|
||||
@@ -46,7 +47,7 @@ simple_config_file:
|
||||
ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
|
||||
|
||||
# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
|
||||
plugin: virtualbox
|
||||
plugin: community.general.virtualbox
|
||||
groups:
|
||||
container: "'minis' in (inventory_hostname)"
|
||||
'''
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: cartesian
|
||||
short_description: returns the cartesian product of lists
|
||||
description:
|
||||
@@ -20,11 +21,13 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Example of the change in the description
|
||||
ansible.builtin.debug: msg="{{ lookup('cartesian', [1,2,3], [a, b])}}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
|
||||
|
||||
- name: loops over the cartesian product of the supplied lists
|
||||
ansible.builtin.debug: msg="{{item}}"
|
||||
with_cartesian:
|
||||
ansible.builtin.debug:
|
||||
msg: "{{item}}"
|
||||
with_community.general.cartesian:
|
||||
- "{{list1}}"
|
||||
- "{{list2}}"
|
||||
- [1,2,3,4,5,6]
|
||||
@@ -34,7 +37,8 @@ RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- list of lists composed of elements of the input lists
|
||||
type: lists
|
||||
type: list
|
||||
elements: list
|
||||
"""
|
||||
|
||||
from itertools import product
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: chef_databag
|
||||
short_description: fetches data from a Chef Databag
|
||||
description:
|
||||
@@ -28,13 +29,15 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- ansible.builtin.debug:
|
||||
msg: "{{ lookup('chef_databag', 'name=data_bag_name item=data_bag_item') }}"
|
||||
msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- The value from the databag
|
||||
- The value from the databag.
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: consul_kv
|
||||
short_description: Fetch metadata from a Consul key value store.
|
||||
description:
|
||||
@@ -27,7 +28,7 @@ DOCUMENTATION = '''
|
||||
- If the key has a value with the specified index then this is returned allowing access to historical values.
|
||||
datacenter:
|
||||
description:
|
||||
- Retrieve the key from a consul datatacenter other than the default for the consul host.
|
||||
- Retrieve the key from a consul datacenter other than the default for the consul host.
|
||||
token:
|
||||
description: The acl token to allow access to restricted values.
|
||||
host:
|
||||
@@ -80,24 +81,25 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = """
|
||||
- ansible.builtin.debug:
|
||||
msg: 'key contains {{item}}'
|
||||
with_consul_kv:
|
||||
with_community.general.consul_kv:
|
||||
- 'key/to/retrieve'
|
||||
|
||||
- name: Parameters can be provided after the key be more specific about what to retrieve
|
||||
ansible.builtin.debug:
|
||||
msg: 'key contains {{item}}'
|
||||
with_consul_kv:
|
||||
with_community.general.consul_kv:
|
||||
- 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
|
||||
|
||||
- name: retrieving a KV from a remote cluster on non default port
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
|
||||
msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- Value(s) stored in consul.
|
||||
type: dict
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: credstash
|
||||
short_description: retrieve secrets from Credstash on AWS
|
||||
requirements:
|
||||
@@ -47,13 +48,16 @@ EXAMPLES = """
|
||||
ansible.builtin.shell: credstash put my-github-password secure123
|
||||
|
||||
- name: "Test credstash lookup plugin -- get my github password"
|
||||
ansible.builtin.debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}"
|
||||
|
||||
- name: "Test credstash lookup plugin -- get my other password from us-west-1"
|
||||
ansible.builtin.debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}"
|
||||
|
||||
- name: "Test credstash lookup plugin -- get the company's github password"
|
||||
ansible.builtin.debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}"
|
||||
|
||||
- name: Example play using the 'context' feature
|
||||
hosts: localhost
|
||||
@@ -64,16 +68,19 @@ EXAMPLES = """
|
||||
tasks:
|
||||
|
||||
- name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
|
||||
ansible.builtin.debug: msg="{{ lookup('credstash', 'some-password', context=context) }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
|
||||
|
||||
- name: "Test credstash lookup plugin -- get the password with a context defined here"
|
||||
ansible.builtin.debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- value(s) stored in Credstash
|
||||
- Value(s) stored in Credstash.
|
||||
type: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: cyberarkpassword
|
||||
short_description: get secrets from CyberArk AIM
|
||||
requirements:
|
||||
@@ -29,14 +30,15 @@ DOCUMENTATION = '''
|
||||
- "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
|
||||
default: 'password'
|
||||
_extra:
|
||||
description: for extra_parms values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
|
||||
note:
|
||||
- For Ansible on windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe
|
||||
description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
|
||||
notes:
|
||||
- For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: passing options to the lookup
|
||||
ansible.builtin.debug: msg={{ lookup("cyberarkpassword", cyquery)}}
|
||||
ansible.builtin.debug:
|
||||
msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
|
||||
vars:
|
||||
cyquery:
|
||||
appid: "app_ansible"
|
||||
@@ -45,8 +47,9 @@ EXAMPLES = """
|
||||
|
||||
|
||||
- name: used in a loop
|
||||
ansible.builtin.debug: msg={{item}}
|
||||
with_cyberarkpassword:
|
||||
ansible.builtin.debug:
|
||||
msg: "{{item}}"
|
||||
with_community.general.cyberarkpassword:
|
||||
appid: 'app_ansible'
|
||||
query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
|
||||
output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
|
||||
|
||||
@@ -44,32 +44,43 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Simple A record (IPV4 address) lookup for example.com
|
||||
ansible.builtin.debug: msg="{{ lookup('dig', 'example.com.')}}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.com.')}}"
|
||||
|
||||
- name: "The TXT record for example.org."
|
||||
ansible.builtin.debug: msg="{{ lookup('dig', 'example.org.', 'qtype=TXT') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}"
|
||||
|
||||
- name: "The TXT record for example.org, alternative syntax."
|
||||
ansible.builtin.debug: msg="{{ lookup('dig', 'example.org./TXT') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}"
|
||||
|
||||
- name: use in a loop
|
||||
ansible.builtin.debug: msg="MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "MX record for gmail.com {{ item }}"
|
||||
with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}"
|
||||
|
||||
- ansible.builtin.debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa./PTR') }}"
|
||||
- ansible.builtin.debug: msg="Reverse DNS for 192.0.2.5 is {{ lookup('dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
|
||||
- ansible.builtin.debug: msg="Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
|
||||
|
||||
- ansible.builtin.debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
|
||||
with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
|
||||
with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- list of composed strings or dictonaries with key and value
|
||||
- List of composed strings or dictionaries with key and value
|
||||
If a dictionary, fields shows the keys returned depending on query type
|
||||
type: list
|
||||
elements: raw
|
||||
contains:
|
||||
ALL:
|
||||
description:
|
||||
|
||||
@@ -21,18 +21,21 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: show txt entry
|
||||
ansible.builtin.debug: msg="{{lookup('dnstxt', ['test.example.com'])}}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
|
||||
|
||||
- name: iterate over txt entries
|
||||
ansible.builtin.debug: msg="{{item}}"
|
||||
with_dnstxt:
|
||||
ansible.builtin.debug:
|
||||
msg: "{{item}}"
|
||||
with_community.general.dnstxt:
|
||||
- 'test.example.com'
|
||||
- 'other.example.com'
|
||||
- 'last.example.com'
|
||||
|
||||
- name: iterate of a comma delimited DNS TXT entry
|
||||
ansible.builtin.debug: msg="{{item}}"
|
||||
with_dnstxt: "{{lookup('dnstxt', ['test.example.com']).split(',')}}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{item}}"
|
||||
with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
lookup: dsv
|
||||
author: Adam Migus (adam@migus.org)
|
||||
author: Adam Migus (@amigus) <adam@migus.org>
|
||||
short_description: Get secrets from Thycotic DevOps Secrets Vault
|
||||
version_added: 1.0.0
|
||||
description:
|
||||
@@ -70,6 +70,8 @@ _list:
|
||||
description:
|
||||
- One or more JSON responses to C(GET /secrets/{path}).
|
||||
- See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
@@ -112,7 +114,7 @@ class LookupModule(LookupBase):
|
||||
self.set_options(var_options=variables, direct=kwargs)
|
||||
|
||||
vault = LookupModule.Client(
|
||||
**{
|
||||
{
|
||||
"tenant": self.get_option("tenant"),
|
||||
"client_id": self.get_option("client_id"),
|
||||
"client_secret": self.get_option("client_secret"),
|
||||
|
||||
@@ -53,14 +53,17 @@ DOCUMENTATION = '''
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: "a value from a locally running etcd"
|
||||
ansible.builtin.debug: msg={{ lookup('etcd', 'foo/bar') }}
|
||||
- name: "a value from a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
|
||||
|
||||
- name: "values from multiple folders on a locally running etcd"
|
||||
ansible.builtin.debug: msg={{ lookup('etcd', 'foo', 'bar', 'baz') }}
|
||||
- name: "values from multiple folders on a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}"
|
||||
|
||||
- name: "since Ansible 2.5 you can set server options inline"
|
||||
ansible.builtin.debug: msg="{{ lookup('etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
|
||||
- name: "since Ansible 2.5 you can set server options inline"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
@@ -68,7 +71,7 @@ RETURN = '''
|
||||
description:
|
||||
- list of values associated with input keys
|
||||
type: list
|
||||
elements: strings
|
||||
elements: string
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Eric Belhomme <ebelhomme@fr.scc.com>
|
||||
- Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
|
||||
version_added: '0.2.0'
|
||||
lookup: etcd3
|
||||
short_description: Get key values from etcd3 server
|
||||
@@ -31,7 +31,7 @@ DOCUMENTATION = '''
|
||||
default: False
|
||||
endpoints:
|
||||
description:
|
||||
- Counterpart of C(ETCDCTL_ENDPOINTS) enviroment variable.
|
||||
- Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
|
||||
Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(<host>:<port>) form.
|
||||
- The C(host) part is overwritten by I(host) option, if defined.
|
||||
- The C(port) part is overwritten by I(port) option, if defined.
|
||||
@@ -76,45 +76,46 @@ DOCUMENTATION = '''
|
||||
type: int
|
||||
user:
|
||||
description:
|
||||
- Authentified user name.
|
||||
- Authenticated user name.
|
||||
env:
|
||||
- name: ETCDCTL_USER
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Authentified user password.
|
||||
- Authenticated user password.
|
||||
env:
|
||||
- name: ETCDCTL_PASSWORD
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- I(host) and I(port) options take precedence over (endpoints) option.
|
||||
- The recommanded way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
|
||||
- The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
|
||||
environment variable and keep I(endpoints), I(host), and I(port) unused.
|
||||
seealso:
|
||||
- module: community.general.etcd3
|
||||
- ref: etcd_lookup
|
||||
description: The etcd v2 lookup.
|
||||
|
||||
requirements:
|
||||
- "etcd3 >= 0.10"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: "a value from a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
|
||||
- name: "a value from a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
|
||||
|
||||
- name: "values from multiple folders on a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
|
||||
- name: "values from multiple folders on a locally running etcd"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
|
||||
|
||||
- name: "look for a key prefix"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
|
||||
- name: "look for a key prefix"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
|
||||
|
||||
- name: "connect to etcd3 with a client certificate"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
|
||||
- name: "connect to etcd3 with a client certificate"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = r'''
|
||||
lookup: filetree
|
||||
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
short_description: recursively match all files in a directory tree
|
||||
@@ -19,13 +19,13 @@ options:
|
||||
required: True
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
EXAMPLES = r"""
|
||||
- name: Create directories
|
||||
ansible.builtin.file:
|
||||
path: /web/{{ item.path }}
|
||||
state: directory
|
||||
mode: '{{ item.mode }}'
|
||||
with_filetree: web/
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'directory'
|
||||
|
||||
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
||||
@@ -33,7 +33,7 @@ EXAMPLES = """
|
||||
src: '{{ item.src }}'
|
||||
dest: /web/{{ item.path }}
|
||||
mode: '{{ item.mode }}'
|
||||
with_filetree: web/
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'file'
|
||||
|
||||
- name: Recreate symlinks
|
||||
@@ -43,48 +43,70 @@ EXAMPLES = """
|
||||
state: link
|
||||
force: yes
|
||||
mode: '{{ item.mode }}'
|
||||
with_filetree: web/
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'link'
|
||||
|
||||
- name: list all files under web/
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.filetree', 'web/') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
RETURN = r"""
|
||||
_raw:
|
||||
description: list of dictionaries with file information
|
||||
description: List of dictionaries with file information.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
src:
|
||||
description:
|
||||
- full path to file
|
||||
- not returned when C(item.state) is set to C(directory)
|
||||
- Full path to file.
|
||||
- Not returned when I(item.state) is set to C(directory).
|
||||
type: path
|
||||
root:
|
||||
description: allows filtering by original location
|
||||
description: Allows filtering by original location.
|
||||
type: path
|
||||
path:
|
||||
description: contains the relative path to root
|
||||
description: Contains the relative path to root.
|
||||
type: path
|
||||
mode:
|
||||
description: TODO
|
||||
description: The permissions the resulting file or directory.
|
||||
type: str
|
||||
state:
|
||||
description: TODO
|
||||
type: str
|
||||
owner:
|
||||
description: TODO
|
||||
description: Name of the user that owns the file/directory.
|
||||
type: raw
|
||||
group:
|
||||
description: TODO
|
||||
description: Name of the group that owns the file/directory.
|
||||
type: raw
|
||||
seuser:
|
||||
description: TODO
|
||||
description: The user part of the SELinux file context.
|
||||
type: raw
|
||||
serole:
|
||||
description: TODO
|
||||
description: The role part of the SELinux file context.
|
||||
type: raw
|
||||
setype:
|
||||
description: TODO
|
||||
description: The type part of the SELinux file context.
|
||||
type: raw
|
||||
selevel:
|
||||
description: TODO
|
||||
description: The level part of the SELinux file context.
|
||||
type: raw
|
||||
uid:
|
||||
description: TODO
|
||||
description: Owner ID of the file/directory.
|
||||
type: int
|
||||
gid:
|
||||
description: TODO
|
||||
description: Group ID of the file/directory.
|
||||
type: int
|
||||
size:
|
||||
description: TODO
|
||||
description: Size of the target.
|
||||
type: int
|
||||
mtime:
|
||||
description: TODO
|
||||
description: Time of last modification.
|
||||
type: float
|
||||
ctime:
|
||||
description: TODO
|
||||
description: Time of last metadata update or creation (depends on OS).
|
||||
type: float
|
||||
"""
|
||||
import os
|
||||
import pwd
|
||||
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
lookup: flattened
|
||||
author: Serge van Ginderachter <serge@vanginderachter.be>
|
||||
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
||||
short_description: return single list completely flattened
|
||||
description:
|
||||
- given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
|
||||
@@ -21,7 +21,8 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: "'unnest' all elements into single list"
|
||||
ansible.builtin.debug: msg="all in one list {{lookup('flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
|
||||
ansible.builtin.debug:
|
||||
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
|
||||
@@ -9,7 +9,7 @@ lookup: gcp_storage_file
|
||||
description:
|
||||
- This lookup returns the contents from a file residing on Google Cloud Storage
|
||||
short_description: Return GC Storage content
|
||||
author: Eric Anderson <eanderson@avinetworks.com>
|
||||
author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- requests >= 2.18.4
|
||||
@@ -29,15 +29,19 @@ extends_documentation_fragment:
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- ansible.builtin.debug: msg="the value of foo.txt is {{ lookup('gcp_storage_file',
|
||||
bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
|
||||
auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}"
|
||||
- ansible.builtin.debug:
|
||||
msg: |
|
||||
the value of foo.txt is {{ lookup('community.general.gcp_storage_file',
|
||||
bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
|
||||
auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
_raw:
|
||||
description:
|
||||
- base64 encoded file content
|
||||
type: list
|
||||
elements: str
|
||||
'''
|
||||
|
||||
import base64
|
||||
@@ -141,5 +145,5 @@ class GcpFileLookup():
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
if not HAS_GOOGLE_CLOUD_COLLECTION:
|
||||
raise AnsibleError("community.general.gcp_storage_files needs a supported version of the google.cloud collection installed")
|
||||
raise AnsibleError("community.general.gcp_storage_file needs a supported version of the google.cloud collection installed")
|
||||
return GcpFileLookup().run(terms, variables=variables, **kwargs)
|
||||
|
||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = """
|
||||
lookup: hashi_vault
|
||||
author:
|
||||
- Jonathan Davila <jdavila(at)ansible.com>
|
||||
- Jonathan Davila (!UNKNOWN) <jdavila(at)ansible.com>
|
||||
- Brian Scholer (@briantist)
|
||||
short_description: Retrieve secrets from HashiCorp's vault
|
||||
requirements:
|
||||
@@ -38,13 +38,17 @@ DOCUMENTATION = """
|
||||
token_path:
|
||||
description: If no token is specified, will try to read the token file from this path.
|
||||
env:
|
||||
- name: HOME
|
||||
- name: VAULT_TOKEN_PATH
|
||||
version_added: 1.2.0
|
||||
ini:
|
||||
- section: lookup_hashi_vault
|
||||
key: token_path
|
||||
version_added: '0.2.0'
|
||||
token_file:
|
||||
description: If no token is specified, will try to read the token from this file in C(token_path).
|
||||
env:
|
||||
- name: VAULT_TOKEN_FILE
|
||||
version_added: 1.2.0
|
||||
ini:
|
||||
- section: lookup_hashi_vault
|
||||
key: token_file
|
||||
@@ -117,6 +121,9 @@ DOCUMENTATION = """
|
||||
default: True
|
||||
namespace:
|
||||
description: Namespace where secrets reside. Requires HVAC 0.7.0+ and Vault 0.11+.
|
||||
env:
|
||||
- name: VAULT_NAMESPACE
|
||||
version_added: 1.2.0
|
||||
aws_profile:
|
||||
description: The AWS profile
|
||||
type: str
|
||||
@@ -241,6 +248,8 @@ RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- secrets(s) requested
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -404,7 +413,7 @@ class HashiVault:
|
||||
self.client.auth_ldap(**params)
|
||||
|
||||
def auth_approle(self):
|
||||
params = self.get_options('role_id', 'secret_id')
|
||||
params = self.get_options('role_id', 'secret_id', 'mount_point')
|
||||
self.client.auth_approle(**params)
|
||||
|
||||
def auth_aws_iam_login(self):
|
||||
@@ -532,6 +541,11 @@ class LookupModule(LookupBase):
|
||||
|
||||
def validate_auth_token(self, auth_method):
|
||||
if auth_method == 'token':
|
||||
if not self.get_option('token_path'):
|
||||
# generally we want env vars defined in the spec, but in this case we want
|
||||
# the env var HOME to have lower precedence than any other value source,
|
||||
# including ini, so we're doing it here after all other processing has taken place
|
||||
self.set_option('token_path', os.environ.get('HOME'))
|
||||
if not self.get_option('token') and self.get_option('token_path'):
|
||||
token_filename = os.path.join(
|
||||
self.get_option('token_path'),
|
||||
|
||||
@@ -18,7 +18,7 @@ DOCUMENTATION = '''
|
||||
description:
|
||||
- The list of keys to lookup on the Puppetmaster
|
||||
type: list
|
||||
element_type: string
|
||||
elements: string
|
||||
required: True
|
||||
_bin_file:
|
||||
description:
|
||||
@@ -39,20 +39,24 @@ EXAMPLES = """
|
||||
# All this examples depends on hiera.yml that describes the hierarchy
|
||||
|
||||
- name: "a value from Hiera 'DB'"
|
||||
ansible.builtin.debug: msg={{ lookup('hiera', 'foo') }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.hiera', 'foo') }}"
|
||||
|
||||
- name: "a value from a Hiera 'DB' on other environment"
|
||||
ansible.builtin.debug: msg={{ lookup('hiera', 'foo environment=production') }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
|
||||
|
||||
- name: "a value from a Hiera 'DB' for a concrete node"
|
||||
ansible.builtin.debug: msg={{ lookup('hiera', 'foo fqdn=puppet01.localdomain') }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- a value associated with input key
|
||||
type: strings
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
lookup: keyring
|
||||
author:
|
||||
- Samuel Boucher <boucher.samuel.c@gmail.com>
|
||||
- Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
|
||||
requirements:
|
||||
- keyring (python library)
|
||||
short_description: grab secrets from the OS keyring
|
||||
@@ -20,16 +20,18 @@ EXAMPLES = """
|
||||
- name : output secrets to screen (BAD IDEA)
|
||||
ansible.builtin.debug:
|
||||
msg: "Password: {{item}}"
|
||||
with_keyring:
|
||||
with_community.general.keyring:
|
||||
- 'servicename username'
|
||||
|
||||
- name: access mysql with password from keyring
|
||||
mysql_db: login_password={{lookup('keyring','mysql joe')}} login_user=joe
|
||||
mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: secrets stored
|
||||
description: Secrets stored.
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
HAS_KEYRING = True
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
lookup: lastpass
|
||||
author:
|
||||
- Andrew Zenk <azenk@umn.edu>
|
||||
- Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
|
||||
requirements:
|
||||
- lpass (command line utility)
|
||||
- must have already logged into lastpass
|
||||
@@ -26,12 +26,14 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = """
|
||||
- name: get 'custom_field' from lastpass entry 'entry-name'
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('lastpass', 'entry-name', field='custom_field') }}"
|
||||
msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: secrets stored
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
@@ -25,14 +25,14 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = """
|
||||
- name: query LMDB for a list of country codes
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ query('lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
|
||||
msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
|
||||
|
||||
- name: use list of values in a loop by key wildcard
|
||||
ansible.builtin.debug:
|
||||
msg: "Hello from {{ item.0 }} a.k.a. {{ item.1 }}"
|
||||
vars:
|
||||
- lmdb_kv_db: jp.mdb
|
||||
with_lmdb_kv:
|
||||
with_community.general.lmdb_kv:
|
||||
- "n*"
|
||||
|
||||
- name: get an item by key
|
||||
@@ -41,13 +41,15 @@ EXAMPLES = """
|
||||
- item == 'Belgium'
|
||||
vars:
|
||||
- lmdb_kv_db: jp.mdb
|
||||
with_lmdb_kv:
|
||||
with_community.general.lmdb_kv:
|
||||
- be
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: value(s) stored in LMDB
|
||||
type: list
|
||||
elements: raw
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Kyrylo Galanov (galanoff@gmail.com)
|
||||
- Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
|
||||
lookup: manifold
|
||||
short_description: get credentials from Manifold.co
|
||||
description:
|
||||
@@ -40,11 +40,14 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: all available resources
|
||||
ansible.builtin.debug: msg="{{ lookup('manifold', api_token='SecretToken') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
|
||||
- name: all available resources for a specific project in specific team
|
||||
ansible.builtin.debug: msg="{{ lookup('manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
|
||||
- name: two specific resources
|
||||
ansible.builtin.debug: msg="{{ lookup('manifold', 'resource-1', 'resource-2') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
||||
@@ -22,6 +22,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: nios
|
||||
short_description: Query Infoblox NIOS objects
|
||||
description:
|
||||
@@ -48,11 +49,13 @@ options:
|
||||
EXAMPLES = """
|
||||
- name: fetch all networkview objects
|
||||
ansible.builtin.set_fact:
|
||||
networkviews: "{{ lookup('nios', 'networkview', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
networkviews: "{{ lookup('community.general.nios', 'networkview',
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: fetch the default dns view
|
||||
ansible.builtin.set_fact:
|
||||
dns_views: "{{ lookup('nios', 'view', filter={'name': 'default'}, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
dns_views: "{{ lookup('community.general.nios', 'view', filter={'name': 'default'},
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
# all of the examples below use credentials that are set using env variables
|
||||
# export INFOBLOX_HOST=nios01
|
||||
@@ -61,28 +64,27 @@ EXAMPLES = """
|
||||
|
||||
- name: fetch all host records and include extended attributes
|
||||
ansible.builtin.set_fact:
|
||||
host_records: "{{ lookup('nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
|
||||
host_records: "{{ lookup('community.general.nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
|
||||
|
||||
|
||||
- name: use env variables to pass credentials
|
||||
ansible.builtin.set_fact:
|
||||
networkviews: "{{ lookup('nios', 'networkview') }}"
|
||||
networkviews: "{{ lookup('community.general.nios', 'networkview') }}"
|
||||
|
||||
- name: get a host record
|
||||
ansible.builtin.set_fact:
|
||||
host: "{{ lookup('nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
|
||||
host: "{{ lookup('community.general.nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
|
||||
|
||||
- name: get the authoritative zone from a non default dns view
|
||||
ansible.builtin.set_fact:
|
||||
host: "{{ lookup('nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
|
||||
host: "{{ lookup('community.general.nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
obj_type:
|
||||
description:
|
||||
- The object type specified in the terms argument
|
||||
returned: always
|
||||
type: complex
|
||||
type: dictionary
|
||||
contains:
|
||||
obj_field:
|
||||
description:
|
||||
|
||||
@@ -22,6 +22,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: nios_next_ip
|
||||
short_description: Return the next available IP address for a network
|
||||
description:
|
||||
@@ -48,15 +49,15 @@ options:
|
||||
EXAMPLES = """
|
||||
- name: return next available IP address for network 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
|
||||
ansible.builtin.set_fact:
|
||||
ipaddr: "{{ lookup('nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
|
||||
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
"""
|
||||
|
||||
@@ -64,7 +65,6 @@ RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- The list of next IP addresses available
|
||||
returned: always
|
||||
type: list
|
||||
"""
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author: Unknown (!UNKNOWN)
|
||||
lookup: nios_next_network
|
||||
short_description: Return the next available network range for a network-container
|
||||
description:
|
||||
@@ -56,16 +57,17 @@ options:
|
||||
EXAMPLES = """
|
||||
- name: return next available network for network-container 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25,
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the next 2 available network addresses for network-container 192.168.10.0/24
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, num=2,
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, num=2,
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
|
||||
- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
|
||||
ansible.builtin.set_fact:
|
||||
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
|
||||
networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
|
||||
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
|
||||
"""
|
||||
|
||||
@@ -73,7 +75,6 @@ RETURN = """
|
||||
_list:
|
||||
description:
|
||||
- The list of next network addresses available
|
||||
returned: always
|
||||
type: list
|
||||
"""
|
||||
|
||||
|
||||
@@ -56,26 +56,26 @@ EXAMPLES = """
|
||||
# These examples only work when already signed in to 1Password
|
||||
- name: Retrieve password for KITT when already signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword', 'KITT')
|
||||
var: lookup('community.general.onepassword', 'KITT')
|
||||
|
||||
- name: Retrieve password for Wintermute when already signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword', 'Tessier-Ashpool', section='Wintermute')
|
||||
var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute')
|
||||
|
||||
- name: Retrieve username for HAL when already signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword', 'HAL 9000', field='username', vault='Discovery')
|
||||
var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery')
|
||||
|
||||
- name: Retrieve password for HAL when not signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword'
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
master_password=vault_master_password)
|
||||
|
||||
- name: Retrieve password for HAL when never signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword'
|
||||
var: lookup('community.general.onepassword'
|
||||
'HAL 9000'
|
||||
subdomain='Discovery'
|
||||
master_password=vault_master_password
|
||||
@@ -86,6 +86,8 @@ EXAMPLES = """
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: field data requested
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import errno
|
||||
|
||||
@@ -52,16 +52,18 @@ DOCUMENTATION = '''
|
||||
EXAMPLES = """
|
||||
- name: Retrieve all data about Wintermute
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword_raw', 'Wintermute')
|
||||
var: lookup('community.general.onepassword_raw', 'Wintermute')
|
||||
|
||||
- name: Retrieve all data about Wintermute when not signed in to 1Password
|
||||
ansible.builtin.debug:
|
||||
var: lookup('onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
|
||||
var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: field data requested
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
lookup: passwordstore
|
||||
author:
|
||||
- Patrick Deelman <patrick@patrickdeelman.nl>
|
||||
- Patrick Deelman (!UNKNOWN) <patrick@patrickdeelman.nl>
|
||||
short_description: manage passwords with passwordstore.org's pass utility
|
||||
description:
|
||||
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
|
||||
@@ -58,38 +58,41 @@ EXAMPLES = """
|
||||
# Debug is used for examples, BAD IDEA to show passwords on screen
|
||||
- name: Basic lookup. Fails if example/test doesn't exist
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('passwordstore', 'example/test')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
|
||||
|
||||
- name: Create pass with random 16 character password. If password exists just give the password
|
||||
ansible.builtin.debug:
|
||||
var: mypassword
|
||||
vars:
|
||||
mypassword: "{{ lookup('passwordstore', 'example/test create=true')}}"
|
||||
mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
|
||||
|
||||
- name: Different size password
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('passwordstore', 'example/test create=true length=42')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
|
||||
|
||||
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
|
||||
|
||||
- name: Create an alphanumeric password
|
||||
ansible.builtin.debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
|
||||
|
||||
- name: Return the value for user in the KV pair user, username
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('passwordstore', 'example/test subkey=user')}}"
|
||||
msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
|
||||
|
||||
- name: Return the entire password file content
|
||||
ansible.builtin.set_fact:
|
||||
passfilecontent: "{{ lookup('passwordstore', 'example/test returnall=true')}}"
|
||||
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- a password
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
@@ -8,7 +8,7 @@ DOCUMENTATION = '''
|
||||
lookup: redis
|
||||
author:
|
||||
- Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
||||
- Ansible Core
|
||||
- Ansible Core Team
|
||||
short_description: fetch data from Redis
|
||||
description:
|
||||
- This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
|
||||
@@ -46,23 +46,29 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: query redis for somekey (default or configured settings used)
|
||||
ansible.builtin.debug: msg="{{ lookup('redis', 'somekey') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.redis', 'somekey') }}"
|
||||
|
||||
- name: query redis for list of keys and non-default host and port
|
||||
ansible.builtin.debug: msg="{{ lookup('redis', item, host='myredis.internal.com', port=2121) }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}"
|
||||
loop: '{{list_of_redis_keys}}'
|
||||
|
||||
- name: use list directly
|
||||
ansible.builtin.debug: msg="{{ lookup('redis', 'key1', 'key2', 'key3') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}"
|
||||
|
||||
- name: use list directly with a socket
|
||||
ansible.builtin.debug: msg="{{ lookup('redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description: value(s) stored in Redis
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
lookup: shelvefile
|
||||
author: Alejandro Guirao <lekumberri@gmail.com>
|
||||
author: Alejandro Guirao (!UNKNOWN) <lekumberri@gmail.com>
|
||||
short_description: read keys from Python shelve file
|
||||
description:
|
||||
- Read keys from Python shelve file.
|
||||
@@ -23,12 +23,14 @@ DOCUMENTATION = '''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: retrieve a string value corresponding to a key inside a Python shelve file
|
||||
ansible.builtin.debug: msg="{{ lookup('shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}
|
||||
ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_list:
|
||||
description: value(s) of key(s) in shelve file(s)
|
||||
type: list
|
||||
elements: str
|
||||
"""
|
||||
import shelve
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
lookup: tss
|
||||
author: Adam Migus (adam@migus.org)
|
||||
author: Adam Migus (@amigus) <adam@migus.org>
|
||||
short_description: Get secrets from Thycotic Secret Server
|
||||
version_added: 1.0.0
|
||||
description:
|
||||
@@ -66,6 +66,8 @@ _list:
|
||||
description:
|
||||
- The JSON responses to C(GET /secrets/{id}).
|
||||
- See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
|
||||
@@ -83,7 +83,7 @@ class IPAClient(object):
|
||||
if status_code not in [200, 201, 204]:
|
||||
self._fail('login', info['msg'])
|
||||
|
||||
self.headers = {'Cookie': resp.info().get('Set-Cookie')}
|
||||
self.headers = {'Cookie': info.get('set-cookie')}
|
||||
except Exception as e:
|
||||
self._fail('login', to_native(e))
|
||||
if not self.headers:
|
||||
|
||||
@@ -710,24 +710,6 @@ class RedfishUtils(object):
|
||||
def get_multi_volume_inventory(self):
|
||||
return self.aggregate_systems(self.get_volume_inventory)
|
||||
|
||||
def restart_manager_gracefully(self):
|
||||
result = {}
|
||||
key = "Actions"
|
||||
|
||||
# Search for 'key' entry and extract URI from it
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
action_uri = data[key]["#Manager.Reset"]["target"]
|
||||
|
||||
payload = {'ResetType': 'GracefulRestart'}
|
||||
response = self.post_request(self.root_uri + action_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True}
|
||||
|
||||
def manage_indicator_led(self, command):
|
||||
result = {}
|
||||
key = 'IndicatorLED'
|
||||
@@ -773,6 +755,14 @@ class RedfishUtils(object):
|
||||
return reset_type
|
||||
|
||||
def manage_system_power(self, command):
|
||||
return self.manage_power(command, self.systems_uri,
|
||||
'#ComputerSystem.Reset')
|
||||
|
||||
def manage_manager_power(self, command):
|
||||
return self.manage_power(command, self.manager_uri,
|
||||
'#Manager.Reset')
|
||||
|
||||
def manage_power(self, command, resource_uri, action_name):
|
||||
key = "Actions"
|
||||
reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
|
||||
'GracefulRestart', 'ForceRestart', 'Nmi',
|
||||
@@ -790,8 +780,8 @@ class RedfishUtils(object):
|
||||
if reset_type not in reset_type_values:
|
||||
return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
|
||||
|
||||
# read the system resource and get the current power state
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
# read the resource and get the current power state
|
||||
response = self.get_request(self.root_uri + resource_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
@@ -803,13 +793,13 @@ class RedfishUtils(object):
|
||||
if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
|
||||
return {'ret': True, 'changed': False}
|
||||
|
||||
# get the #ComputerSystem.Reset Action and target URI
|
||||
if key not in data or '#ComputerSystem.Reset' not in data[key]:
|
||||
return {'ret': False, 'msg': 'Action #ComputerSystem.Reset not found'}
|
||||
reset_action = data[key]['#ComputerSystem.Reset']
|
||||
# get the reset Action and target URI
|
||||
if key not in data or action_name not in data[key]:
|
||||
return {'ret': False, 'msg': 'Action %s not found' % action_name}
|
||||
reset_action = data[key][action_name]
|
||||
if 'target' not in reset_action:
|
||||
return {'ret': False,
|
||||
'msg': 'target URI missing from Action #ComputerSystem.Reset'}
|
||||
'msg': 'target URI missing from Action %s' % action_name}
|
||||
action_uri = reset_action['target']
|
||||
|
||||
# get AllowableValues
|
||||
@@ -1501,13 +1491,18 @@ class RedfishUtils(object):
|
||||
return response
|
||||
return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"}
|
||||
|
||||
def set_one_time_boot_device(self, bootdevice, uefi_target, boot_next):
|
||||
def set_boot_override(self, boot_opts):
|
||||
result = {}
|
||||
key = "Boot"
|
||||
|
||||
if not bootdevice:
|
||||
bootdevice = boot_opts.get('bootdevice')
|
||||
uefi_target = boot_opts.get('uefi_target')
|
||||
boot_next = boot_opts.get('boot_next')
|
||||
override_enabled = boot_opts.get('override_enabled')
|
||||
|
||||
if not bootdevice and override_enabled != 'Disabled':
|
||||
return {'ret': False,
|
||||
'msg': "bootdevice option required for SetOneTimeBoot"}
|
||||
'msg': "bootdevice option required for temporary boot override"}
|
||||
|
||||
# Search for 'key' entry and extract URI from it
|
||||
response = self.get_request(self.root_uri + self.systems_uri)
|
||||
@@ -1530,21 +1525,27 @@ class RedfishUtils(object):
|
||||
(bootdevice, allowable_values)}
|
||||
|
||||
# read existing values
|
||||
enabled = boot.get('BootSourceOverrideEnabled')
|
||||
cur_enabled = boot.get('BootSourceOverrideEnabled')
|
||||
target = boot.get('BootSourceOverrideTarget')
|
||||
cur_uefi_target = boot.get('UefiTargetBootSourceOverride')
|
||||
cur_boot_next = boot.get('BootNext')
|
||||
|
||||
if bootdevice == 'UefiTarget':
|
||||
if override_enabled == 'Disabled':
|
||||
payload = {
|
||||
'Boot': {
|
||||
'BootSourceOverrideEnabled': override_enabled
|
||||
}
|
||||
}
|
||||
elif bootdevice == 'UefiTarget':
|
||||
if not uefi_target:
|
||||
return {'ret': False,
|
||||
'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
|
||||
if enabled == 'Once' and target == bootdevice and uefi_target == cur_uefi_target:
|
||||
if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target:
|
||||
# If properties are already set, no changes needed
|
||||
return {'ret': True, 'changed': False}
|
||||
payload = {
|
||||
'Boot': {
|
||||
'BootSourceOverrideEnabled': 'Once',
|
||||
'BootSourceOverrideEnabled': override_enabled,
|
||||
'BootSourceOverrideTarget': bootdevice,
|
||||
'UefiTargetBootSourceOverride': uefi_target
|
||||
}
|
||||
@@ -1553,23 +1554,23 @@ class RedfishUtils(object):
|
||||
if not boot_next:
|
||||
return {'ret': False,
|
||||
'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
|
||||
if enabled == 'Once' and target == bootdevice and boot_next == cur_boot_next:
|
||||
if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next:
|
||||
# If properties are already set, no changes needed
|
||||
return {'ret': True, 'changed': False}
|
||||
payload = {
|
||||
'Boot': {
|
||||
'BootSourceOverrideEnabled': 'Once',
|
||||
'BootSourceOverrideEnabled': override_enabled,
|
||||
'BootSourceOverrideTarget': bootdevice,
|
||||
'BootNext': boot_next
|
||||
}
|
||||
}
|
||||
else:
|
||||
if enabled == 'Once' and target == bootdevice:
|
||||
if cur_enabled == override_enabled and target == bootdevice:
|
||||
# If properties are already set, no changes needed
|
||||
return {'ret': True, 'changed': False}
|
||||
payload = {
|
||||
'Boot': {
|
||||
'BootSourceOverrideEnabled': 'Once',
|
||||
'BootSourceOverrideEnabled': override_enabled,
|
||||
'BootSourceOverrideTarget': bootdevice
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2400,7 +2400,7 @@ class Container(DockerBaseClass):
|
||||
return shlex.split(self.parameters.entrypoint)
|
||||
|
||||
def _get_expected_ports(self):
|
||||
if not self.parameters.published_ports:
|
||||
if self.parameters.published_ports is None:
|
||||
return None
|
||||
expected_bound_ports = {}
|
||||
for container_port, config in self.parameters.published_ports.items():
|
||||
|
||||
95
plugins/modules/cloud/docker/docker_stack_task_info.py
Normal file
95
plugins/modules/cloud/docker/docker_stack_task_info.py
Normal file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_stack_task_info
|
||||
author: "Jose Angel Munoz (@imjoseangel)"
|
||||
short_description: Return information of the tasks on a docker stack
|
||||
description:
|
||||
- Retrieve information on docker stacks tasks using the C(docker stack) command
|
||||
on the target node (see examples).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name.
|
||||
type: str
|
||||
required: yes
|
||||
version_added: "1.1.0"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
results:
|
||||
description: |
|
||||
List of dictionaries containing the list of tasks associated
|
||||
to a stack name.
|
||||
sample: >
|
||||
[{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Shows stack info
|
||||
community.general.docker_stack_task_info:
|
||||
name: test_stack
|
||||
register: result
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: result.results
|
||||
'''
|
||||
|
||||
import json
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def docker_stack_task(module, stack_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command(
|
||||
[docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
|
||||
|
||||
return rc, out.strip(), err.strip()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
'name': dict(type='str', required=True)
|
||||
},
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
|
||||
rc, out, err = docker_stack_task(module, name)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error running docker stack. {0}".format(err),
|
||||
rc=rc, stdout=out, stderr=err)
|
||||
else:
|
||||
if out:
|
||||
ret = list(
|
||||
json.loads(outitem)
|
||||
for outitem in out.splitlines())
|
||||
|
||||
else:
|
||||
ret = []
|
||||
|
||||
module.exit_json(changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
results=ret)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -19,6 +19,7 @@ options:
|
||||
filter:
|
||||
description:
|
||||
- Filter facts
|
||||
type: str
|
||||
choices: [ status, result ]
|
||||
notes:
|
||||
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
|
||||
|
||||
@@ -25,30 +25,37 @@ options:
|
||||
host:
|
||||
description:
|
||||
- Tiller's server host.
|
||||
type: str
|
||||
default: "localhost"
|
||||
port:
|
||||
description:
|
||||
- Tiller's server port.
|
||||
type: int
|
||||
default: 44134
|
||||
namespace:
|
||||
description:
|
||||
- Kubernetes namespace where the chart should be installed.
|
||||
type: str
|
||||
default: "default"
|
||||
name:
|
||||
description:
|
||||
- Release name to manage.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to install C(present), remove C(absent), or purge C(purged) a package.
|
||||
choices: ['absent', 'purged', 'present']
|
||||
type: str
|
||||
default: "present"
|
||||
chart:
|
||||
description: |
|
||||
A map describing the chart to install. See examples for available options.
|
||||
description:
|
||||
- A map describing the chart to install. See examples for available options.
|
||||
type: dict
|
||||
default: {}
|
||||
values:
|
||||
description:
|
||||
- A map of value options for the chart.
|
||||
type: dict
|
||||
default: {}
|
||||
disable_hooks:
|
||||
description:
|
||||
|
||||
@@ -23,116 +23,148 @@ options:
|
||||
user:
|
||||
description:
|
||||
- The user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
url:
|
||||
description:
|
||||
- The url of the oVirt instance.
|
||||
type: str
|
||||
required: true
|
||||
instance_name:
|
||||
description:
|
||||
- The name of the instance to use.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ vmname ]
|
||||
password:
|
||||
description:
|
||||
- Password of the user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
image:
|
||||
description:
|
||||
- The template to use for the instance.
|
||||
type: str
|
||||
resource_type:
|
||||
description:
|
||||
- Whether you want to deploy an image or create an instance from scratch.
|
||||
type: str
|
||||
choices: [ new, template ]
|
||||
zone:
|
||||
description:
|
||||
- Deploy the image to this oVirt cluster.
|
||||
type: str
|
||||
instance_disksize:
|
||||
description:
|
||||
- Size of the instance's disk in GB.
|
||||
type: str
|
||||
aliases: [ vm_disksize]
|
||||
instance_cpus:
|
||||
description:
|
||||
- The instance's number of CPUs.
|
||||
type: str
|
||||
default: 1
|
||||
aliases: [ vmcpus ]
|
||||
instance_nic:
|
||||
description:
|
||||
- The name of the network interface in oVirt/RHEV.
|
||||
type: str
|
||||
aliases: [ vmnic ]
|
||||
instance_network:
|
||||
description:
|
||||
- The logical network the machine should belong to.
|
||||
type: str
|
||||
default: rhevm
|
||||
aliases: [ vmnetwork ]
|
||||
instance_mem:
|
||||
description:
|
||||
- The instance's amount of memory in MB.
|
||||
type: str
|
||||
aliases: [ vmmem ]
|
||||
instance_type:
|
||||
description:
|
||||
- Define whether the instance is a server, desktop or high_performance.
|
||||
- I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
|
||||
type: str
|
||||
choices: [ desktop, server, high_performance ]
|
||||
default: server
|
||||
aliases: [ vmtype ]
|
||||
disk_alloc:
|
||||
description:
|
||||
- Define whether disk is thin or preallocated.
|
||||
type: str
|
||||
choices: [ preallocated, thin ]
|
||||
default: thin
|
||||
disk_int:
|
||||
description:
|
||||
- Interface type of the disk.
|
||||
type: str
|
||||
choices: [ ide, virtio ]
|
||||
default: virtio
|
||||
instance_os:
|
||||
description:
|
||||
- Type of Operating System.
|
||||
type: str
|
||||
aliases: [ vmos ]
|
||||
instance_cores:
|
||||
description:
|
||||
- Define the instance's number of cores.
|
||||
type: str
|
||||
default: 1
|
||||
aliases: [ vmcores ]
|
||||
sdomain:
|
||||
description:
|
||||
- The Storage Domain where you want to create the instance's disk on.
|
||||
type: str
|
||||
region:
|
||||
description:
|
||||
- The oVirt/RHEV datacenter where you want to deploy to.
|
||||
type: str
|
||||
instance_dns:
|
||||
description:
|
||||
- Define the instance's Primary DNS server.
|
||||
type: str
|
||||
aliases: [ dns ]
|
||||
instance_domain:
|
||||
description:
|
||||
- Define the instance's Domain.
|
||||
type: str
|
||||
aliases: [ domain ]
|
||||
instance_hostname:
|
||||
description:
|
||||
- Define the instance's Hostname.
|
||||
type: str
|
||||
aliases: [ hostname ]
|
||||
instance_ip:
|
||||
description:
|
||||
- Define the instance's IP.
|
||||
type: str
|
||||
aliases: [ ip ]
|
||||
instance_netmask:
|
||||
description:
|
||||
- Define the instance's Netmask.
|
||||
type: str
|
||||
aliases: [ netmask ]
|
||||
instance_gateway:
|
||||
description:
|
||||
- Define the instance's Gateway.
|
||||
type: str
|
||||
aliases: [ gateway ]
|
||||
instance_rootpw:
|
||||
description:
|
||||
- Define the instance's Root password.
|
||||
type: str
|
||||
aliases: [ rootpw ]
|
||||
instance_key:
|
||||
description:
|
||||
- Define the instance's Authorized key.
|
||||
type: str
|
||||
aliases: [ key ]
|
||||
state:
|
||||
description:
|
||||
- Create, terminate or remove instances.
|
||||
choices: [ absent, present, restarted, shutdown, started ]
|
||||
type: str
|
||||
choices: [ absent, present, restart, shutdown, started ]
|
||||
default: present
|
||||
requirements:
|
||||
- ovirt-engine-sdk-python
|
||||
|
||||
@@ -16,20 +16,24 @@ options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
type: str
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
type: str
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
type: str
|
||||
vmid:
|
||||
description:
|
||||
- the instance id
|
||||
- if not set, the next available VM ID will be fetched from ProxmoxAPI.
|
||||
- if not set, will be fetched from PromoxAPI based on the hostname
|
||||
type: str
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
@@ -40,51 +44,64 @@ options:
|
||||
- Proxmox VE node, when new VM will be created
|
||||
- required only for C(state=present)
|
||||
- for another states will be autodiscovered
|
||||
type: str
|
||||
pool:
|
||||
description:
|
||||
- Proxmox VE resource pool
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
- the instance hostname
|
||||
- required only for C(state=present)
|
||||
- must be unique if vmid is not passed
|
||||
type: str
|
||||
ostemplate:
|
||||
description:
|
||||
- the template for VM creating
|
||||
- required only for C(state=present)
|
||||
type: str
|
||||
disk:
|
||||
description:
|
||||
- hard disk size in GB for instance
|
||||
type: str
|
||||
default: 3
|
||||
cores:
|
||||
description:
|
||||
- Specify number of cores per socket.
|
||||
type: int
|
||||
default: 1
|
||||
cpus:
|
||||
description:
|
||||
- numbers of allocated cpus for instance
|
||||
type: int
|
||||
default: 1
|
||||
memory:
|
||||
description:
|
||||
- memory size in MB for instance
|
||||
type: int
|
||||
default: 512
|
||||
swap:
|
||||
description:
|
||||
- swap memory size in MB for instance
|
||||
type: int
|
||||
default: 0
|
||||
netif:
|
||||
description:
|
||||
- specifies network interfaces for the container. As a hash/dictionary defining interfaces.
|
||||
type: dict
|
||||
mounts:
|
||||
description:
|
||||
- specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
|
||||
type: dict
|
||||
ip_address:
|
||||
description:
|
||||
- specifies the address the container will be assigned
|
||||
type: str
|
||||
onboot:
|
||||
description:
|
||||
- specifies whether a VM will be started during system bootup
|
||||
@@ -93,20 +110,25 @@ options:
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
type: str
|
||||
default: 'local'
|
||||
cpuunits:
|
||||
description:
|
||||
- CPU weight for a VM
|
||||
type: int
|
||||
default: 1000
|
||||
nameserver:
|
||||
description:
|
||||
- sets DNS server IP address for a container
|
||||
type: str
|
||||
searchdomain:
|
||||
description:
|
||||
- sets DNS search domain for a container
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
type: int
|
||||
default: 30
|
||||
force:
|
||||
description:
|
||||
@@ -119,11 +141,13 @@ options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
type: str
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
|
||||
default: present
|
||||
pubkey:
|
||||
description:
|
||||
- Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
|
||||
type: str
|
||||
unprivileged:
|
||||
description:
|
||||
- Indicate if the container should be unprivileged
|
||||
|
||||
@@ -28,19 +28,23 @@ options:
|
||||
description:
|
||||
- Pass arbitrary arguments to kvm.
|
||||
- This option is for experts only!
|
||||
type: str
|
||||
default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait"
|
||||
api_host:
|
||||
description:
|
||||
- Specify the target host of the Proxmox VE cluster.
|
||||
type: str
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- Specify the user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- Specify the password to authenticate with.
|
||||
- You can use C(PROXMOX_PASSWORD) environment variable.
|
||||
type: str
|
||||
autostart:
|
||||
description:
|
||||
- Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
|
||||
@@ -50,59 +54,73 @@ options:
|
||||
description:
|
||||
- Specify the amount of RAM for the VM in MB.
|
||||
- Using zero disables the balloon driver.
|
||||
type: int
|
||||
default: 0
|
||||
bios:
|
||||
description:
|
||||
- Specify the BIOS implementation.
|
||||
type: str
|
||||
choices: ['seabios', 'ovmf']
|
||||
boot:
|
||||
description:
|
||||
- Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
|
||||
- You can combine to set order.
|
||||
type: str
|
||||
default: cnd
|
||||
bootdisk:
|
||||
description:
|
||||
- Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
|
||||
type: str
|
||||
clone:
|
||||
description:
|
||||
- Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
|
||||
type: str
|
||||
cores:
|
||||
description:
|
||||
- Specify number of cores per socket.
|
||||
type: int
|
||||
default: 1
|
||||
cpu:
|
||||
description:
|
||||
- Specify emulated CPU type.
|
||||
type: str
|
||||
default: kvm64
|
||||
cpulimit:
|
||||
description:
|
||||
- Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
|
||||
- If the computer has 2 CPUs, it has total of '2' CPU time
|
||||
type: int
|
||||
cpuunits:
|
||||
description:
|
||||
- Specify CPU weight for a VM.
|
||||
- You can disable fair-scheduler configuration by setting this to 0
|
||||
type: int
|
||||
default: 1000
|
||||
delete:
|
||||
description:
|
||||
- Specify a list of settings you want to delete.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Specify the description for the VM. Only used on the configuration web interface.
|
||||
- This is saved as comment inside the configuration file.
|
||||
type: str
|
||||
digest:
|
||||
description:
|
||||
- Specify if to prevent changes if current configuration file has different SHA1 digest.
|
||||
- This can be used to prevent concurrent modifications.
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- Allow to force stop VM.
|
||||
- Can be used only with states C(stopped), C(restarted).
|
||||
- Can be used with states C(stopped) and C(restarted).
|
||||
default: no
|
||||
type: bool
|
||||
format:
|
||||
description:
|
||||
- Target drive's backing file's data format.
|
||||
- Used only with clone
|
||||
type: str
|
||||
default: qcow2
|
||||
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk" ]
|
||||
freeze:
|
||||
@@ -126,14 +144,17 @@ options:
|
||||
- C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
|
||||
- C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
|
||||
- /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
|
||||
type: dict
|
||||
hotplug:
|
||||
description:
|
||||
- Selectively enable hotplug features.
|
||||
- This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
|
||||
- Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
|
||||
type: str
|
||||
hugepages:
|
||||
description:
|
||||
- Enable/disable hugepages memory.
|
||||
type: str
|
||||
choices: ['any', '2', '1024']
|
||||
ide:
|
||||
description:
|
||||
@@ -143,9 +164,11 @@ options:
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
- C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
|
||||
type: dict
|
||||
keyboard:
|
||||
description:
|
||||
- Sets the keyboard layout for VNC server.
|
||||
type: str
|
||||
kvm:
|
||||
description:
|
||||
- Enable/disable KVM hardware virtualization.
|
||||
@@ -159,26 +182,32 @@ options:
|
||||
lock:
|
||||
description:
|
||||
- Lock/unlock the VM.
|
||||
type: str
|
||||
choices: ['migrate', 'backup', 'snapshot', 'rollback']
|
||||
machine:
|
||||
description:
|
||||
- Specifies the Qemu machine type.
|
||||
- type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
|
||||
type: str
|
||||
memory:
|
||||
description:
|
||||
- Memory size in MB for instance.
|
||||
type: int
|
||||
default: 512
|
||||
migrate_downtime:
|
||||
description:
|
||||
- Sets maximum tolerated downtime (in seconds) for migrations.
|
||||
type: int
|
||||
migrate_speed:
|
||||
description:
|
||||
- Sets maximum speed (in MB/s) for migrations.
|
||||
- A value of 0 is no limit.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- Specifies the VM name. Only used on the configuration web interface.
|
||||
- Required only for C(state=present).
|
||||
type: str
|
||||
net:
|
||||
description:
|
||||
- A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
|
||||
@@ -189,15 +218,18 @@ options:
|
||||
- The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
|
||||
- Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
|
||||
- If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
|
||||
type: dict
|
||||
newid:
|
||||
description:
|
||||
- VMID for the clone. Used only with clone.
|
||||
- If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
|
||||
type: int
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, where the new VM will be created.
|
||||
- Only required for C(state=present).
|
||||
- For other states, it will be autodiscovered.
|
||||
type: str
|
||||
numa:
|
||||
description:
|
||||
- A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
|
||||
@@ -207,6 +239,7 @@ options:
|
||||
- C(hostnodes) Host NUMA nodes to use.
|
||||
- C(memory) Amount of memory this NUMA node provides.
|
||||
- C(policy) NUMA allocation policy.
|
||||
type: dict
|
||||
onboot:
|
||||
description:
|
||||
- Specifies whether a VM will be started during system bootup.
|
||||
@@ -216,6 +249,7 @@ options:
|
||||
description:
|
||||
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
|
||||
- The l26 is Linux 2.6/3.X Kernel.
|
||||
type: str
|
||||
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
|
||||
default: l26
|
||||
parallel:
|
||||
@@ -223,9 +257,11 @@ options:
|
||||
- A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
|
||||
- Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
|
||||
- Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
|
||||
type: dict
|
||||
pool:
|
||||
description:
|
||||
- Add the new VM to the specified pool.
|
||||
type: str
|
||||
protection:
|
||||
description:
|
||||
- Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
|
||||
@@ -237,6 +273,7 @@ options:
|
||||
revert:
|
||||
description:
|
||||
- Revert a pending change.
|
||||
type: str
|
||||
sata:
|
||||
description:
|
||||
- A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
|
||||
@@ -245,6 +282,7 @@ options:
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
- C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
|
||||
type: dict
|
||||
scsi:
|
||||
description:
|
||||
- A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
|
||||
@@ -253,9 +291,11 @@ options:
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
- C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
|
||||
type: dict
|
||||
scsihw:
|
||||
description:
|
||||
- Specifies the SCSI controller model.
|
||||
type: str
|
||||
choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
|
||||
serial:
|
||||
description:
|
||||
@@ -263,44 +303,54 @@ options:
|
||||
- Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
|
||||
- Values allowed are - C((/dev/.+|socket)).
|
||||
- /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
|
||||
type: dict
|
||||
shares:
|
||||
description:
|
||||
- Rets amount of memory shares for auto-ballooning. (0 - 50000).
|
||||
- The larger the number is, the more memory this VM gets.
|
||||
- The number is relative to weights of all other running VMs.
|
||||
- Using 0 disables auto-ballooning, this means no limit.
|
||||
type: int
|
||||
skiplock:
|
||||
description:
|
||||
- Ignore locks
|
||||
- Only root is allowed to use this option.
|
||||
type: bool
|
||||
smbios:
|
||||
description:
|
||||
- Specifies SMBIOS type 1 fields.
|
||||
type: str
|
||||
snapname:
|
||||
description:
|
||||
- The name of the snapshot. Used only with clone.
|
||||
type: str
|
||||
sockets:
|
||||
description:
|
||||
- Sets the number of CPU sockets. (1 - N).
|
||||
type: int
|
||||
default: 1
|
||||
startdate:
|
||||
description:
|
||||
- Sets the initial date of the real time clock.
|
||||
- Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
|
||||
type: str
|
||||
startup:
|
||||
description:
|
||||
- Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
|
||||
- Order is a non-negative number defining the general startup order.
|
||||
- Shutdown in done with reverse ordering.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the instance.
|
||||
- If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
|
||||
type: str
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
|
||||
default: present
|
||||
storage:
|
||||
description:
|
||||
- Target storage for full clone.
|
||||
type: str
|
||||
tablet:
|
||||
description:
|
||||
- Enables/disables the USB tablet device.
|
||||
@@ -310,6 +360,7 @@ options:
|
||||
description:
|
||||
- Target node. Only allowed if the original VM is on shared storage.
|
||||
- Used only with clone
|
||||
type: str
|
||||
tdf:
|
||||
description:
|
||||
- Enables/disables time drift fix.
|
||||
@@ -322,6 +373,7 @@ options:
|
||||
timeout:
|
||||
description:
|
||||
- Timeout for operations.
|
||||
type: int
|
||||
default: 30
|
||||
update:
|
||||
description:
|
||||
@@ -338,9 +390,11 @@ options:
|
||||
vcpus:
|
||||
description:
|
||||
- Sets number of hotplugged vcpus.
|
||||
type: int
|
||||
vga:
|
||||
description:
|
||||
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
|
||||
type: str
|
||||
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
|
||||
default: std
|
||||
virtio:
|
||||
@@ -351,13 +405,16 @@ options:
|
||||
- C(storage) is the storage identifier where to create the disk.
|
||||
- C(size) is the size of the disk in GB.
|
||||
- C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
|
||||
type: dict
|
||||
vmid:
|
||||
description:
|
||||
- Specifies the VM ID. Instead use I(name) parameter.
|
||||
- If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
|
||||
type: int
|
||||
watchdog:
|
||||
description:
|
||||
- Creates a virtual hardware watchdog device.
|
||||
type: str
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
'''
|
||||
|
||||
@@ -588,9 +645,6 @@ from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
VZ_TYPE = 'qemu'
|
||||
|
||||
|
||||
def get_nextvmid(module, proxmox):
|
||||
try:
|
||||
vmid = proxmox.cluster.nextid.get()
|
||||
@@ -652,19 +706,35 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs):
|
||||
results['vmid'] = int(vmid)
|
||||
|
||||
|
||||
def settings(module, proxmox, vmid, node, name, timeout, **kwargs):
|
||||
def settings(module, proxmox, vmid, node, name, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
|
||||
# Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
|
||||
if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(**kwargs) is None:
|
||||
if proxmox_node.qemu(vmid).config.set(**kwargs) is None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, **kwargs):
|
||||
def wait_for_task(module, proxmox, node, taskid):
|
||||
timeout = module.params['timeout']
|
||||
|
||||
while timeout:
|
||||
task = proxmox.nodes(node).tasks(taskid).status.get()
|
||||
if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
|
||||
# Wait an extra second as the API can be a ahead of the hypervisor
|
||||
time.sleep(1)
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
|
||||
# Available only in PVE 4
|
||||
only_v4 = ['force', 'protection', 'skiplock']
|
||||
|
||||
@@ -698,6 +768,8 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
del kwargs['ide']
|
||||
if 'net' in kwargs:
|
||||
del kwargs['net']
|
||||
if 'force' in kwargs:
|
||||
del kwargs['force']
|
||||
|
||||
# Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
|
||||
for k in list(kwargs.keys()):
|
||||
@@ -723,7 +795,7 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
module.fail_json(msg='skiplock parameter require root@pam user. ')
|
||||
|
||||
if update:
|
||||
if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
|
||||
if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -734,50 +806,35 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
|
||||
taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
|
||||
else:
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
|
||||
taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
if not wait_for_task(module, proxmox, node, taskid):
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def start_vm(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
def start_vm(module, proxmox, vm):
|
||||
vmid = vm[0]['vmid']
|
||||
proxmox_node = proxmox.nodes(vm[0]['node'])
|
||||
taskid = proxmox_node.qemu(vmid).status.start.post()
|
||||
if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def stop_vm(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
return True
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
time.sleep(1)
|
||||
return False
|
||||
def stop_vm(module, proxmox, vm, force):
|
||||
vmid = vm[0]['vmid']
|
||||
proxmox_node = proxmox.nodes(vm[0]['node'])
|
||||
taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
|
||||
if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def proxmox_version(proxmox):
|
||||
@@ -807,7 +864,7 @@ def main():
|
||||
delete=dict(type='str', default=None),
|
||||
description=dict(type='str'),
|
||||
digest=dict(type='str'),
|
||||
force=dict(type='bool', default=None),
|
||||
force=dict(type='bool', default=False),
|
||||
format=dict(type='str', default='qcow2', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk']),
|
||||
freeze=dict(type='bool'),
|
||||
full=dict(type='bool', default=True),
|
||||
@@ -884,7 +941,6 @@ def main():
|
||||
revert = module.params['revert']
|
||||
sockets = module.params['sockets']
|
||||
state = module.params['state']
|
||||
timeout = module.params['timeout']
|
||||
update = bool(module.params['update'])
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
@@ -898,58 +954,60 @@ def main():
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
global VZ_TYPE
|
||||
global PVE_MAJOR_VERSION
|
||||
PVE_MAJOR_VERSION = 3 if proxmox_version(proxmox) < LooseVersion('4.0') else 4
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
# If vmid not set get the Next VM id from ProxmoxAPI
|
||||
# If vm name is set get the VM id from ProxmoxAPI
|
||||
# If vmid is not defined then retrieve its value from the vm name,
|
||||
# the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
|
||||
if not vmid:
|
||||
if state == 'present' and (not update and not clone) and (not delete and not revert):
|
||||
if state == 'present' and not update and not clone and not delete and not revert:
|
||||
try:
|
||||
vmid = get_nextvmid(module, proxmox)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
|
||||
else:
|
||||
clone_target = clone or name
|
||||
try:
|
||||
if not clone:
|
||||
vmid = get_vmid(proxmox, name)[0]
|
||||
else:
|
||||
vmid = get_vmid(proxmox, clone)[0]
|
||||
vmid = get_vmid(proxmox, clone_target)[0]
|
||||
except Exception as e:
|
||||
if not clone:
|
||||
module.fail_json(msg="VM {0} does not exist in cluster.".format(name))
|
||||
else:
|
||||
module.fail_json(msg="VM {0} does not exist in cluster.".format(clone))
|
||||
vmid = -1
|
||||
|
||||
if clone is not None:
|
||||
if get_vmid(proxmox, name):
|
||||
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
|
||||
if vmid is not None:
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
# If newid is not defined then retrieve the next free id from ProxmoxAPI
|
||||
if not newid:
|
||||
try:
|
||||
newid = get_nextvmid(module, proxmox)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
|
||||
else:
|
||||
vm = get_vm(proxmox, newid)
|
||||
if vm:
|
||||
module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
|
||||
|
||||
# Ensure source VM name exists when cloning
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
|
||||
|
||||
# Ensure source VM id exists when cloning
|
||||
if not get_vm(proxmox, vmid):
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
# Ensure the choosen VM name doesn't already exist when cloning
|
||||
if get_vmid(proxmox, name):
|
||||
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
|
||||
|
||||
# Ensure the choosen VM id doesn't already exist when cloning
|
||||
if get_vm(proxmox, newid):
|
||||
module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
|
||||
|
||||
if delete is not None:
|
||||
try:
|
||||
settings(module, proxmox, vmid, node, name, timeout, delete=delete)
|
||||
settings(module, proxmox, vmid, node, name, delete=delete)
|
||||
module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
|
||||
elif revert is not None:
|
||||
|
||||
if revert is not None:
|
||||
try:
|
||||
settings(module, proxmox, vmid, node, name, timeout, revert=revert)
|
||||
settings(module, proxmox, vmid, node, name, revert=revert)
|
||||
module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
|
||||
@@ -965,7 +1023,7 @@ def main():
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' does not exist in cluster" % node)
|
||||
|
||||
create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update,
|
||||
create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update,
|
||||
acpi=module.params['acpi'],
|
||||
agent=module.params['agent'],
|
||||
autostart=module.params['autostart'],
|
||||
@@ -1037,44 +1095,52 @@ def main():
|
||||
elif clone is not None:
|
||||
module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
|
||||
else:
|
||||
module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception=%s" % (VZ_TYPE, name, vmid, e))
|
||||
module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
if vm[0]['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_vm(module, proxmox, vm, vmid, timeout):
|
||||
if start_vm(module, proxmox, vm):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
|
||||
|
||||
if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']):
|
||||
if stop_vm(module, proxmox, vm, force=module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_vm(module, proxmox, vm, vmid, timeout):
|
||||
if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
|
||||
@@ -1083,37 +1149,31 @@ def main():
|
||||
try:
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
proxmox_node = proxmox.nodes(vm[0]['node'])
|
||||
if vm[0]['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout -= 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
taskid = proxmox_node.qemu.delete(vmid)
|
||||
if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
|
||||
proxmox_node.tasks(taskid).log.get()[:1])
|
||||
else:
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
|
||||
|
||||
elif state == 'current':
|
||||
status = {}
|
||||
try:
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status']
|
||||
status['status'] = current
|
||||
if status:
|
||||
module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get vm {0} with vmid = {1} status: ".format(name, vmid) + str(e))
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
|
||||
status['status'] = current
|
||||
if status:
|
||||
module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -18,15 +18,18 @@ options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
type: str
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
type: str
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
type: str
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
@@ -35,29 +38,34 @@ options:
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when you will operate with template
|
||||
type: str
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- path to uploaded file
|
||||
- required only for C(state=present)
|
||||
aliases: ['path']
|
||||
type: path
|
||||
template:
|
||||
description:
|
||||
- the template name
|
||||
- required only for states C(absent), C(info)
|
||||
type: str
|
||||
content_type:
|
||||
description:
|
||||
- content type
|
||||
- required only for C(state=present)
|
||||
type: str
|
||||
default: 'vztmpl'
|
||||
choices: ['vztmpl', 'iso']
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
type: str
|
||||
default: 'local'
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
type: int
|
||||
default: 30
|
||||
force:
|
||||
description:
|
||||
@@ -67,6 +75,7 @@ options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the template
|
||||
type: str
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
notes:
|
||||
|
||||
@@ -29,6 +29,7 @@ options:
|
||||
description:
|
||||
- The password for user authentication.
|
||||
type: str
|
||||
required: true
|
||||
server:
|
||||
description:
|
||||
- The name/IP of your RHEV-m/oVirt instance.
|
||||
@@ -111,15 +112,18 @@ options:
|
||||
description:
|
||||
- This option uses complex arguments and is a list of disks with the options name, size and domain.
|
||||
type: list
|
||||
elements: str
|
||||
ifaces:
|
||||
description:
|
||||
- This option uses complex arguments and is a list of interfaces with the options name and vlan.
|
||||
type: list
|
||||
elements: str
|
||||
aliases: [ interfaces, nics ]
|
||||
boot_order:
|
||||
description:
|
||||
- This option uses complex arguments and is a list of items that specify the bootorder.
|
||||
type: list
|
||||
elements: str
|
||||
default: [ hd, network ]
|
||||
del_prot:
|
||||
description:
|
||||
@@ -1480,14 +1484,14 @@ def main():
|
||||
vmhost=dict(type='str'),
|
||||
vmcpu=dict(type='int', default=2),
|
||||
vmmem=dict(type='int', default=1),
|
||||
disks=dict(type='list'),
|
||||
disks=dict(type='list', elements='str'),
|
||||
osver=dict(type='str', default="rhel_6x64"),
|
||||
ifaces=dict(type='list', aliases=['interfaces', 'nics']),
|
||||
ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
|
||||
timeout=dict(type='int'),
|
||||
mempol=dict(type='int', default=1),
|
||||
vm_ha=dict(type='bool', default=True),
|
||||
cpu_share=dict(type='int', default=0),
|
||||
boot_order=dict(type='list', default=['hd', 'network']),
|
||||
boot_order=dict(type='list', elements='str', default=['hd', 'network']),
|
||||
del_prot=dict(type='bool', default=True),
|
||||
cd_drive=dict(type='str'),
|
||||
),
|
||||
|
||||
@@ -39,6 +39,7 @@ options:
|
||||
- A list of specific functions to deploy.
|
||||
- If this is not provided, all functions in the service will be deployed.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
region:
|
||||
description:
|
||||
@@ -166,7 +167,7 @@ def main():
|
||||
argument_spec=dict(
|
||||
service_path=dict(type='path', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
functions=dict(type='list'),
|
||||
functions=dict(type='list', elements='str'),
|
||||
region=dict(type='str', default=''),
|
||||
stage=dict(type='str', default=''),
|
||||
deploy=dict(type='bool', default=True),
|
||||
|
||||
@@ -20,19 +20,23 @@ options:
|
||||
choices: ['planned', 'present', 'absent']
|
||||
description:
|
||||
- Goal state of given stage/project
|
||||
type: str
|
||||
default: present
|
||||
binary_path:
|
||||
description:
|
||||
- The path of a terraform binary to use, relative to the 'service_path'
|
||||
unless you supply an absolute path.
|
||||
type: path
|
||||
project_path:
|
||||
description:
|
||||
- The path to the root of the Terraform directory with the
|
||||
vars.tf/main.tf/etc to use.
|
||||
type: path
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The terraform workspace to work with.
|
||||
type: str
|
||||
default: default
|
||||
purge_workspace:
|
||||
description:
|
||||
@@ -46,11 +50,13 @@ options:
|
||||
- The path to an existing Terraform plan file to apply. If this is not
|
||||
specified, Ansible will build a new TF plan and execute it.
|
||||
Note that this option is required if 'state' has the 'planned' value.
|
||||
type: path
|
||||
state_file:
|
||||
description:
|
||||
- The path to an existing Terraform state file to use when building plan.
|
||||
If this is not specified, the default `terraform.tfstate` will be used.
|
||||
- This option is ignored when plan is specified.
|
||||
type: path
|
||||
variables_files:
|
||||
description:
|
||||
- The path to a variables file for Terraform to fill into the TF
|
||||
@@ -63,19 +69,24 @@ options:
|
||||
description:
|
||||
- A group of key-values to override template variables or those in
|
||||
variables files.
|
||||
type: dict
|
||||
targets:
|
||||
description:
|
||||
- A list of specific resources to target in this plan/application. The
|
||||
resources selected here will also auto-include any dependencies.
|
||||
type: list
|
||||
elements: str
|
||||
lock:
|
||||
description:
|
||||
- Enable statefile locking, if you use a service that accepts locks (such
|
||||
as S3+DynamoDB) to store your statefile.
|
||||
type: bool
|
||||
default: true
|
||||
lock_timeout:
|
||||
description:
|
||||
- How long to maintain the lock on the statefile, if you use a service
|
||||
that accepts locks (such as S3+DynamoDB).
|
||||
type: int
|
||||
force_init:
|
||||
description:
|
||||
- To avoid duplicating infra, if a state file can't be found this will
|
||||
@@ -86,6 +97,7 @@ options:
|
||||
backend_config:
|
||||
description:
|
||||
- A group of key-values to provide at init stage to the -backend-config parameter.
|
||||
type: dict
|
||||
backend_config_files:
|
||||
description:
|
||||
- The path to a configuration file to provide at init state to the -backend-config parameter.
|
||||
@@ -281,7 +293,7 @@ def main():
|
||||
variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None),
|
||||
plan_file=dict(type='path'),
|
||||
state_file=dict(type='path'),
|
||||
targets=dict(type='list', default=[]),
|
||||
targets=dict(type='list', elements='str', default=[]),
|
||||
lock=dict(type='bool', default=True),
|
||||
lock_timeout=dict(type='int',),
|
||||
force_init=dict(type='bool', default=False),
|
||||
@@ -368,7 +380,7 @@ def main():
|
||||
if needs_application and not module.check_mode and not state == 'planned':
|
||||
rc, out, err = module.run_command(command, cwd=project_path)
|
||||
# checks out to decide if changes were made during execution
|
||||
if '0 added, 0 changed' not in out and not state == "absent" or '0 destroyed' not in out:
|
||||
if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
|
||||
changed = True
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
|
||||
370
plugins/modules/cloud/scaleway/scaleway_database_backup.py
Normal file
370
plugins/modules/cloud/scaleway/scaleway_database_backup.py
Normal file
@@ -0,0 +1,370 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Scaleway database backups management module
|
||||
#
|
||||
# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: scaleway_database_backup
|
||||
short_description: Scaleway database backups management module
|
||||
version_added: 1.2.0
|
||||
author: Guillaume Rodriguez (@guillaume_ro_fr)
|
||||
description:
|
||||
- This module manages database backups on Scaleway account U(https://developer.scaleway.com).
|
||||
extends_documentation_fragment:
|
||||
- community.general.scaleway
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the database backup.
|
||||
- C(present) creates a backup.
|
||||
- C(absent) deletes the backup.
|
||||
- C(exported) creates a download link for the backup.
|
||||
- C(restored) restores the backup to a new database.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
- exported
|
||||
- restored
|
||||
|
||||
region:
|
||||
description:
|
||||
- Scaleway region to use (for example C(fr-par)).
|
||||
type: str
|
||||
required: true
|
||||
choices:
|
||||
- fr-par
|
||||
- nl-ams
|
||||
|
||||
id:
|
||||
description:
|
||||
- UUID used to identify the database backup.
|
||||
- Required for C(absent), C(exported) and C(restored) states.
|
||||
type: str
|
||||
|
||||
name:
|
||||
description:
|
||||
- Name used to identify the database backup.
|
||||
- Required for C(present) state.
|
||||
- Ignored when C(state=absent), C(state=exported) or C(state=restored).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
database_name:
|
||||
description:
|
||||
- Name used to identify the database.
|
||||
- Required for C(present) and C(restored) states.
|
||||
- Ignored when C(state=absent) or C(state=exported).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
instance_id:
|
||||
description:
|
||||
- UUID of the instance associated to the database backup.
|
||||
- Required for C(present) and C(restored) states.
|
||||
- Ignored when C(state=absent) or C(state=exported).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
expires_at:
|
||||
description:
|
||||
- Expiration datetime of the database backup (ISO 8601 format).
|
||||
- Ignored when C(state=absent), C(state=exported) or C(state=restored).
|
||||
type: str
|
||||
required: false
|
||||
|
||||
wait:
|
||||
description:
|
||||
- Wait for the instance to reach its desired state before returning.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
wait_timeout:
|
||||
description:
|
||||
- Time to wait for the backup to reach the expected state.
|
||||
type: int
|
||||
required: false
|
||||
default: 300
|
||||
|
||||
wait_sleep_time:
|
||||
description:
|
||||
- Time to wait before every attempt to check the state of the backup.
|
||||
type: int
|
||||
required: false
|
||||
default: 3
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a backup
|
||||
community.general.scaleway_database_backup:
|
||||
name: 'my_backup'
|
||||
state: present
|
||||
region: 'fr-par'
|
||||
database_name: 'my-database'
|
||||
instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
|
||||
|
||||
- name: Export a backup
|
||||
community.general.scaleway_database_backup:
|
||||
id: '6ef1125a-037e-494f-a911-6d9c49a51691'
|
||||
state: exported
|
||||
region: 'fr-par'
|
||||
|
||||
- name: Restore a backup
|
||||
community.general.scaleway_database_backup:
|
||||
id: '6ef1125a-037e-494f-a911-6d9c49a51691'
|
||||
state: restored
|
||||
region: 'fr-par'
|
||||
database_name: 'my-new-database'
|
||||
instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
|
||||
|
||||
- name: Remove a backup
|
||||
community.general.scaleway_database_backup:
|
||||
id: '6ef1125a-037e-494f-a911-6d9c49a51691'
|
||||
state: absent
|
||||
region: 'fr-par'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
metadata:
|
||||
description: Backup metadata.
|
||||
returned: when C(state=present), C(state=exported) or C(state=restored)
|
||||
type: dict
|
||||
sample: {
|
||||
"metadata": {
|
||||
"created_at": "2020-08-06T12:42:05.631049Z",
|
||||
"database_name": "my-database",
|
||||
"download_url": null,
|
||||
"download_url_expires_at": null,
|
||||
"expires_at": null,
|
||||
"id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
|
||||
"instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
|
||||
"instance_name": "my-instance",
|
||||
"name": "backup_name",
|
||||
"region": "fr-par",
|
||||
"size": 600000,
|
||||
"status": "ready",
|
||||
"updated_at": "2020-08-06T12:42:10.581649Z"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.scaleway import (
|
||||
Scaleway,
|
||||
scaleway_argument_spec,
|
||||
SCALEWAY_REGIONS,
|
||||
)
|
||||
|
||||
stable_states = (
|
||||
'ready',
|
||||
'deleting',
|
||||
)
|
||||
|
||||
|
||||
def wait_to_complete_state_transition(module, account_api, backup=None):
|
||||
wait_timeout = module.params['wait_timeout']
|
||||
wait_sleep_time = module.params['wait_sleep_time']
|
||||
|
||||
if backup is None or backup['status'] in stable_states:
|
||||
return backup
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
end = start + datetime.timedelta(seconds=wait_timeout)
|
||||
while datetime.datetime.utcnow() < end:
|
||||
module.debug('We are going to wait for the backup to finish its transition')
|
||||
|
||||
response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
|
||||
if not response.ok:
|
||||
module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
break
|
||||
response_json = response.json
|
||||
|
||||
if response_json['status'] in stable_states:
|
||||
module.debug('It seems that the backup is not in transition anymore.')
|
||||
module.debug('Backup in state: %s' % response_json['status'])
|
||||
return response_json
|
||||
time.sleep(wait_sleep_time)
|
||||
else:
|
||||
module.fail_json(msg='Backup takes too long to finish its transition')
|
||||
|
||||
|
||||
def present_strategy(module, account_api, backup):
|
||||
name = module.params['name']
|
||||
database_name = module.params['database_name']
|
||||
instance_id = module.params['instance_id']
|
||||
expiration_date = module.params['expires_at']
|
||||
|
||||
if backup is not None:
|
||||
if (backup['name'] == name or name is None) and (
|
||||
backup['expires_at'] == expiration_date or expiration_date is None):
|
||||
wait_to_complete_state_transition(module, account_api, backup)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
payload = {}
|
||||
if name is not None:
|
||||
payload['name'] = name
|
||||
if expiration_date is not None:
|
||||
payload['expires_at'] = expiration_date
|
||||
|
||||
response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
|
||||
payload)
|
||||
if response.ok:
|
||||
result = wait_to_complete_state_transition(module, account_api, response.json)
|
||||
module.exit_json(changed=True, metadata=result)
|
||||
|
||||
module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
|
||||
if expiration_date is not None:
|
||||
payload['expires_at'] = expiration_date
|
||||
|
||||
response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
|
||||
|
||||
if response.ok:
|
||||
result = wait_to_complete_state_transition(module, account_api, response.json)
|
||||
module.exit_json(changed=True, metadata=result)
|
||||
|
||||
module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
|
||||
|
||||
def absent_strategy(module, account_api, backup):
|
||||
if backup is None:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
|
||||
if response.ok:
|
||||
result = wait_to_complete_state_transition(module, account_api, response.json)
|
||||
module.exit_json(changed=True, metadata=result)
|
||||
|
||||
module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
|
||||
|
||||
def exported_strategy(module, account_api, backup):
|
||||
if backup is None:
|
||||
module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
|
||||
|
||||
if backup['download_url'] is not None:
|
||||
module.exit_json(changed=False, metadata=backup)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
backup = wait_to_complete_state_transition(module, account_api, backup)
|
||||
response = account_api.post(
|
||||
'/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
|
||||
|
||||
if response.ok:
|
||||
result = wait_to_complete_state_transition(module, account_api, response.json)
|
||||
module.exit_json(changed=True, metadata=result)
|
||||
|
||||
module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
|
||||
|
||||
def restored_strategy(module, account_api, backup):
|
||||
if backup is None:
|
||||
module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
|
||||
|
||||
database_name = module.params['database_name']
|
||||
instance_id = module.params['instance_id']
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
backup = wait_to_complete_state_transition(module, account_api, backup)
|
||||
|
||||
payload = {'database_name': database_name, 'instance_id': instance_id}
|
||||
response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
|
||||
payload)
|
||||
|
||||
if response.ok:
|
||||
result = wait_to_complete_state_transition(module, account_api, response.json)
|
||||
module.exit_json(changed=True, metadata=result)
|
||||
|
||||
module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
|
||||
|
||||
|
||||
state_strategy = {
|
||||
'present': present_strategy,
|
||||
'absent': absent_strategy,
|
||||
'exported': exported_strategy,
|
||||
'restored': restored_strategy,
|
||||
}
|
||||
|
||||
|
||||
def core(module):
|
||||
state = module.params['state']
|
||||
backup_id = module.params['id']
|
||||
|
||||
account_api = Scaleway(module)
|
||||
|
||||
if backup_id is None:
|
||||
backup_by_id = None
|
||||
else:
|
||||
response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
|
||||
status_code = response.status_code
|
||||
backup_json = response.json
|
||||
backup_by_id = None
|
||||
if status_code == 404:
|
||||
backup_by_id = None
|
||||
elif response.ok:
|
||||
backup_by_id = backup_json
|
||||
else:
|
||||
module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
|
||||
|
||||
state_strategy[state](module, account_api, backup_by_id)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = scaleway_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
|
||||
region=dict(required=True, choices=SCALEWAY_REGIONS),
|
||||
id=dict(),
|
||||
name=dict(type='str'),
|
||||
database_name=dict(required=False),
|
||||
instance_id=dict(required=False),
|
||||
expires_at=dict(),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(type='int', default=300),
|
||||
wait_sleep_time=dict(type='int', default=3),
|
||||
))
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_together=[
|
||||
['database_name', 'instance_id'],
|
||||
],
|
||||
required_if=[
|
||||
['state', 'present', ['name', 'database_name', 'instance_id']],
|
||||
['state', 'absent', ['id']],
|
||||
['state', 'exported', ['id']],
|
||||
['state', 'restored', ['id', 'database_name', 'instance_id']],
|
||||
],
|
||||
)
|
||||
|
||||
core(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user