From 72c13c85ad648c5a54b4baaa8b9ee1fa2b51fb5b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 20 Apr 2026 12:35:43 +0200 Subject: [PATCH] Prepare main for 13.0.0 (#11834) * Bump version to 13.0.0. * Remove deprecated modules and plugins. * Remove deprecated module utils. * Remove leftovers. * Remove mode=compatibility. * Change default of is_pre740 from true to false. * Change default of force_defaults from true to false. * Remove support for ubuntu_legacy mechanism. * Remove cpanm compatibility tests. --- .github/BOTMETA.yml | 35 - changelogs/fragments/removal.yml | 46 + galaxy.yml | 2 +- meta/runtime.yml | 80 +- plugins/doc_fragments/dimensiondata.py | 53 - plugins/doc_fragments/dimensiondata_wait.py | 39 - plugins/doc_fragments/oracle.py | 80 - .../oracle_creatable_resource.py | 29 - .../oracle_display_name_option.py | 21 - plugins/doc_fragments/oracle_name_option.py | 20 - plugins/doc_fragments/oracle_tags.py | 25 - plugins/doc_fragments/oracle_wait_options.py | 30 - plugins/lookup/hiera.py | 97 - plugins/module_utils/cloud.py | 222 -- plugins/module_utils/database.py | 194 -- plugins/module_utils/dimensiondata.py | 331 --- plugins/module_utils/django.py | 4 - .../identity/keycloak/keycloak.py | 4 - plugins/module_utils/known_hosts.py | 171 -- plugins/module_utils/oneandone.py | 245 --- plugins/module_utils/oracle/oci_utils.py | 1787 ----------------- plugins/module_utils/pipx.py | 19 - plugins/module_utils/saslprep.py | 171 -- plugins/modules/atomic_container.py | 232 --- plugins/modules/atomic_host.py | 105 - plugins/modules/atomic_image.py | 177 -- plugins/modules/catapult.py | 154 -- plugins/modules/cpanm.py | 38 +- plugins/modules/dimensiondata_network.py | 272 --- plugins/modules/dimensiondata_vlan.py | 530 ----- plugins/modules/github_repo.py | 13 +- plugins/modules/locale_gen.py | 54 +- plugins/modules/oci_vcn.py | 215 -- plugins/modules/oneandone_firewall_policy.py | 503 ----- plugins/modules/oneandone_load_balancer.py | 634 ------ .../modules/oneandone_monitoring_policy.py | 948 --------- plugins/modules/oneandone_private_network.py | 418 ---- plugins/modules/oneandone_public_ip.py | 306 --- plugins/modules/oneandone_server.py | 656 ------ plugins/modules/pushbullet.py | 193 -- plugins/modules/rocketchat.py | 14 +- plugins/modules/sensu_check.py | 379 ---- plugins/modules/sensu_client.py | 285 --- plugins/modules/sensu_handler.py | 293 --- plugins/modules/sensu_silence.py | 270 --- plugins/modules/sensu_subscription.py | 155 -- plugins/modules/spotinst_aws_elastigroup.py | 1473 -------------- plugins/modules/typetalk.py | 138 -- .../integration/targets/sensu_client/aliases | 6 - .../targets/sensu_client/tasks/main.yml | 179 -- .../integration/targets/sensu_handler/aliases | 6 - .../targets/sensu_handler/tasks/main.yml | 129 -- .../targets/sensu_handler/tasks/pipe.yml | 25 - .../targets/sensu_handler/tasks/set.yml | 53 - .../targets/sensu_handler/tasks/tcp.yml | 56 - .../targets/sensu_handler/tasks/transport.yml | 56 - .../targets/sensu_handler/tasks/udp.yml | 56 - .../module_utils/cloud/test_backoff.py | 51 - .../plugins/module_utils/test_database.py | 139 -- .../plugins/module_utils/test_known_hosts.py | 120 -- .../plugins/module_utils/test_saslprep.py | 54 - tests/unit/plugins/modules/test_cpanm.yaml | 66 - 62 files changed, 104 insertions(+), 13052 deletions(-) create mode 100644 changelogs/fragments/removal.yml delete mode 100644 plugins/doc_fragments/dimensiondata.py delete mode 100644 plugins/doc_fragments/dimensiondata_wait.py delete mode 100644 plugins/doc_fragments/oracle.py delete mode 100644 plugins/doc_fragments/oracle_creatable_resource.py delete mode 100644 plugins/doc_fragments/oracle_display_name_option.py delete mode 100644 plugins/doc_fragments/oracle_name_option.py delete mode 100644 plugins/doc_fragments/oracle_tags.py delete mode 100644 plugins/doc_fragments/oracle_wait_options.py delete mode 100644 plugins/lookup/hiera.py delete mode 100644 plugins/module_utils/cloud.py delete mode 100644 plugins/module_utils/database.py delete mode 100644 plugins/module_utils/dimensiondata.py delete mode 100644 plugins/module_utils/known_hosts.py delete mode 100644 plugins/module_utils/oneandone.py delete mode 100644 plugins/module_utils/oracle/oci_utils.py delete mode 100644 plugins/module_utils/saslprep.py delete mode 100644 plugins/modules/atomic_container.py delete mode 100644 plugins/modules/atomic_host.py delete mode 100644 plugins/modules/atomic_image.py delete mode 100644 plugins/modules/catapult.py delete mode 100644 plugins/modules/dimensiondata_network.py delete mode 100644 plugins/modules/dimensiondata_vlan.py delete mode 100644 plugins/modules/oci_vcn.py delete mode 100644 plugins/modules/oneandone_firewall_policy.py delete mode 100644 plugins/modules/oneandone_load_balancer.py delete mode 100644 plugins/modules/oneandone_monitoring_policy.py delete mode 100644 plugins/modules/oneandone_private_network.py delete mode 100644 plugins/modules/oneandone_public_ip.py delete mode 100644 plugins/modules/oneandone_server.py delete mode 100644 plugins/modules/pushbullet.py delete mode 100644 plugins/modules/sensu_check.py delete mode 100644 plugins/modules/sensu_client.py delete mode 100644 plugins/modules/sensu_handler.py delete mode 100644 plugins/modules/sensu_silence.py delete mode 100644 plugins/modules/sensu_subscription.py delete mode 100644 plugins/modules/spotinst_aws_elastigroup.py delete mode 100644 plugins/modules/typetalk.py delete mode 100644 tests/integration/targets/sensu_client/aliases delete mode 100644 tests/integration/targets/sensu_client/tasks/main.yml delete mode 100644 tests/integration/targets/sensu_handler/aliases delete mode 100644 tests/integration/targets/sensu_handler/tasks/main.yml delete mode 100644 tests/integration/targets/sensu_handler/tasks/pipe.yml delete mode 100644 tests/integration/targets/sensu_handler/tasks/set.yml delete mode 100644 tests/integration/targets/sensu_handler/tasks/tcp.yml delete mode 100644 tests/integration/targets/sensu_handler/tasks/transport.yml delete mode 100644 tests/integration/targets/sensu_handler/tasks/udp.yml delete mode 100644 tests/unit/plugins/module_utils/cloud/test_backoff.py delete mode 100644 tests/unit/plugins/module_utils/test_database.py delete mode 100644 tests/unit/plugins/module_utils/test_known_hosts.py delete mode 100644 tests/unit/plugins/module_utils/test_saslprep.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index d69f7a8eb3..61406dcab5 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -305,8 +305,6 @@ files: $lookups/flattened.py: {} $lookups/github_app_access_token.py: maintainers: weisheng-p blavoie - $lookups/hiera.py: - maintainers: jparrill $lookups/keyring.py: {} $lookups/lastpass.py: {} $lookups/lmdb_kv.py: @@ -399,9 +397,6 @@ files: maintainers: russoz $module_utils/net_tools/pritunl/: maintainers: Lowess - $module_utils/oracle/oci_utils.py: - labels: cloud - maintainers: $team_oracle $module_utils/pacemaker.py: maintainers: munchtoast $module_utils/pipx.py: @@ -487,10 +482,6 @@ files: maintainers: evgkrsk $modules/archive.py: maintainers: bendoh - $modules/atomic_: - maintainers: krsacme - $modules/atomic_container.py: - maintainers: giuseppe krsacme $modules/awall.py: maintainers: tdtrask $modules/beadm.py: @@ -517,8 +508,6 @@ files: maintainers: natefoo $modules/cargo.py: maintainers: radek-sprta - $modules/catapult.py: - maintainers: Jmainguy $modules/circonus_annotation.py: maintainers: NickatEpic $modules/cisco_webex.py: @@ -558,11 +547,6 @@ files: maintainers: shamilovstas $modules/deploy_helper.py: maintainers: ramondelafuente - $modules/dimensiondata_network.py: - labels: dimensiondata_network - maintainers: aimonb tintoy - $modules/dimensiondata_vlan.py: - maintainers: tintoy $modules/discord.py: maintainers: cwollinger $modules/django_check.py: @@ -1054,8 +1038,6 @@ files: maintainers: $team_wdc $modules/ocapi_info.py: maintainers: $team_wdc - $modules/oci_vcn.py: - maintainers: $team_oracle rohitChaware $modules/odbc.py: maintainers: john-westcott-iv $modules/office_365_connector_card.py: @@ -1072,8 +1054,6 @@ files: maintainers: rvalle $modules/one_vnet.py: maintainers: abakanovskii - $modules/oneandone_: - maintainers: aajdinov edevenport $modules/onepassword_info.py: maintainers: Rylon $modules/oneview_: @@ -1198,8 +1178,6 @@ files: $modules/puppet.py: labels: puppet maintainers: emonty - $modules/pushbullet.py: - maintainers: willybarro $modules/pushover.py: maintainers: weaselkeeper wopfel $modules/python_requirements_info.py: @@ -1313,14 +1291,6 @@ files: maintainers: bachradsusi dankeder jamescassell $modules/sendgrid.py: maintainers: makaimc - $modules/sensu_: - maintainers: dmsimard - $modules/sensu_check.py: - maintainers: andsens - $modules/sensu_silence.py: - maintainers: smbambling - $modules/sensu_subscription.py: - maintainers: andsens $modules/seport.py: maintainers: dankeder $modules/serverless.py: @@ -1360,8 +1330,6 @@ files: maintainers: orgito $modules/spectrum_model_attrs.py: maintainers: tgates81 - $modules/spotinst_aws_elastigroup.py: - maintainers: talzur $modules/ss_3par_cpg.py: maintainers: farhan7500 gautamphegde $modules/ssh_config.py: @@ -1416,8 +1384,6 @@ files: maintainers: indrajitr jasperla tmshn $modules/twilio.py: maintainers: makaimc - $modules/typetalk.py: - maintainers: tksmd $modules/udm_: maintainers: keachi $modules/ufw.py: @@ -1656,7 +1622,6 @@ macros: team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding - team_oracle: manojmeda mross22 nalsaber team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare diff --git a/changelogs/fragments/removal.yml b/changelogs/fragments/removal.yml new file mode 100644 index 0000000000..777c974842 --- /dev/null +++ b/changelogs/fragments/removal.yml @@ -0,0 +1,46 @@ +removed_features: + - "dimensiondata - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "dimensiondata_wait - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle_creatable_resource - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle_display_name_option - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle_name_option - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle_tags - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle_wait_options - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "hiera lookup plugin - the lookup has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "cloud module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "database module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "dimensiondata module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "known_hosts module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oracle.oci_utils module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "saslprep module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "atomic_container - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "atomic_host - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "atomic_image - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "catapult - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "dimensiondata_network - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "dimensiondata_vlan - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oci_vcn - the module has been removed. Use ``oracle.oci.oci_network_vcn`` instead (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_firewall_policy - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_load_balancer - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_monitoring_policy - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_private_network - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_public_ip - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "oneandone_server - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "pushbullet - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "sensu_check - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "sensu_client - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "sensu_handler - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "sensu_silence - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "sensu_subscription - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "spotinst_aws_elastigroup - the module has been removed. Use ``spot.cloud_modules.aws_elastigroup`` instead (https://github.com/ansible-collections/community.general/pull/11834)." + - "typetalk - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "keycloak module utils - the deprecated ``KeycloakAPI.add_user_in_group()`` method has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "django module utils - the deprecated ``database``, ``noinput``, ``dry_run``, and ``check`` parameters for the Django runner have been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "pipx module utils - the deprecated ``make_process_list()`` function has been removed (https://github.com/ansible-collections/community.general/pull/11834)." + - "cpanm - the ``mode=compatibility`` is no longer available. Migrate to ``mode=new`` (https://github.com/ansible-collections/community.general/pull/11834)." + - "locale_gen - support for the ``ubuntu_legacy`` mechanism has been removed. Only the ``glibc`` mechanism is supported by the module anymore (https://github.com/ansible-collections/community.general/pull/11834)." +breaking_changes: + - "rocketchat - the default for the ``is_pre740`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/11834)." + - "github_repo - the default for the ``force_defaults`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/11834)." diff --git a/galaxy.yml b/galaxy.yml index a9b7ea7d8f..e93f48394b 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,7 +5,7 @@ namespace: community name: general -version: 12.6.0 +version: 13.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index 0844aa66fc..dd48564069 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -127,11 +127,9 @@ plugin_routing: hashi_vault: redirect: community.hashi_vault.hashi_vault hiera: - deprecation: + tombstone: removal_version: 13.0.0 - warning_text: >- - Hiera has been deprecated a long time ago. - If you disagree with this deprecation, please create an issue in the community.general repository. + warning_text: Hiera has been deprecated a long time ago. manifold: tombstone: removal_version: 11.0.0 @@ -148,15 +146,15 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. atomic_container: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Project Atomic was sunset by the end of 2019. atomic_host: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Project Atomic was sunset by the end of 2019. atomic_image: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Project Atomic was sunset by the end of 2019. bearychat: @@ -164,7 +162,7 @@ plugin_routing: removal_version: 12.0.0 warning_text: Chat service is no longer available. catapult: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details. cisco_spark: @@ -210,11 +208,11 @@ plugin_routing: removal_version: 10.0.0 warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. dimensiondata_network: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Service and its endpoints are no longer available. dimensiondata_vlan: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Service and its endpoints are no longer available. docker_compose: @@ -527,7 +525,7 @@ plugin_routing: nios_zone: redirect: infoblox.nios_modules.nios_zone oci_vcn: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Use oracle.oci.oci_network_vcn instead. ome_device_info: @@ -537,27 +535,27 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use community.general.one_image_info instead. oneandone_firewall_policy: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. oneandone_load_balancer: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. oneandone_monitoring_policy: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. oneandone_private_network: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. oneandone_public_ip: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. oneandone_server: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. onepassword_facts: @@ -868,7 +866,7 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use purestorage.flashblade.purefb_info instead. pushbullet: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Module relies on Python package pushbullet.py which is not maintained and supports only up to Python 3.2. python_requirements_facts: @@ -1024,23 +1022,23 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. sensu_check: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_client: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_handler: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_silence: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_subscription: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sf_account_manager: @@ -1068,7 +1066,7 @@ plugin_routing: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. spotinst_aws_elastigroup: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Module relies on unsupported Python package. Use the module spot.cloud_modules.aws_elastigroup instead. stackdriver: @@ -1082,7 +1080,7 @@ plugin_routing: removal_version: 15.0.0 warning_text: ClearLinux was made EOL in July 2025. If you think the module is still useful for another distribution, please create an issue in the community.general repository. typetalk: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: The typetalk service will be discontinued on Dec 2025. vertica_facts: @@ -1122,11 +1120,11 @@ plugin_routing: _gcp: redirect: community.google._gcp dimensiondata: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Service and its endpoints are no longer available. dimensiondata_wait: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Service and its endpoints are no longer available. docker: @@ -1140,27 +1138,27 @@ plugin_routing: nios: redirect: infoblox.nios_modules.nios oracle: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oracle_creatable_resource: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oracle_display_name_option: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oracle_name_option: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oracle_tags: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oracle_wait_options: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: @@ -1181,15 +1179,15 @@ plugin_routing: package pyrax. module_utils: cloud: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over. database: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over. dimensiondata: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: Service and its endpoints are no longer available. docker.common: @@ -1205,19 +1203,19 @@ plugin_routing: hetzner: redirect: community.hrobot.robot known_hosts: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over. kubevirt: redirect: community.kubevirt.kubevirt net_tools.nios.api: redirect: infoblox.nios_modules.api - oci_utils: - deprecation: + oracle.oci_utils: + tombstone: removal_version: 13.0.0 warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. oneandone: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: DNS fails to resolve the API endpoint used by the module. postgresql: @@ -1240,7 +1238,7 @@ plugin_routing: remote_management.dellemc.ome: redirect: dellemc.openmanage.ome saslprep: - deprecation: + tombstone: removal_version: 13.0.0 warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over. inventory: diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py deleted file mode 100644 index 91ece27619..0000000000 --- a/plugins/doc_fragments/dimensiondata.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# Authors: -# - Adam Friedman - -# -# DEPRECATED -# -# This doc fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - # Dimension Data doc fragment - DOCUMENTATION = r""" -options: - region: - description: - - The target region. - - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]. - - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html). - - Note that the default value C(na) stands for "North America". - - The module prepends C(dd-) to the region choice. - type: str - default: na - mcp_user: - description: - - The username used to authenticate to the CloudControl API. - - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata). - type: str - mcp_password: - description: - - The password used to authenticate to the CloudControl API. - - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - - Required if O(mcp_user) is specified. - type: str - location: - description: - - The target datacenter. - type: str - required: true - validate_certs: - description: - - If V(false), SSL certificates are not validated. - - This should only be used on private instances of the CloudControl API that use self-signed certificates. - type: bool - default: true -""" diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py deleted file mode 100644 index 933c7e9fbf..0000000000 --- a/plugins/doc_fragments/dimensiondata_wait.py +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# Authors: -# - Adam Friedman - -# -# DEPRECATED -# -# This doc fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - # Dimension Data ("wait-for-completion" parameters) doc fragment - DOCUMENTATION = r""" -options: - wait: - description: - - Should we wait for the task to complete before moving onto the next. - type: bool - default: false - wait_time: - description: - - The maximum amount of time (in seconds) to wait for the task to complete. - - Only applicable if O(wait=true). - type: int - default: 600 - wait_poll_interval: - description: - - The amount of time (in seconds) to wait between checks for task completion. - - Only applicable if O(wait=true). - type: int - default: 2 -""" diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py deleted file mode 100644 index 50ff095f0a..0000000000 --- a/plugins/doc_fragments/oracle.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -requirements: - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) -notes: - - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). -options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used. - Otherwise, defaults to C(~/.oci/config). - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the - E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment - variable, if any, is used. This option is required if the user is not specified through a configuration file (See - O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable, - if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See - O(config_file_location)). To get the key pair's fingerprint value please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) - variable, if any, is used. This option is required if the private key is not specified through a configuration file - (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option - must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the - E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified - through a configuration file (See O(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is - performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified, - the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal - based authentication when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required - if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy - OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION) - variable, if any, is used. This option is required if the region is not specified through a configuration file (See - O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) - for more information on OCI regions. - type: str -""" diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py deleted file mode 100644 index 4ff93cbc63..0000000000 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -options: - force_create: - description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, - and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of - the resource, even if it already exists. This option is mutually exclusive with O(key_by). - default: false - type: bool - key_by: - description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance - of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify - a resource. - type: list - elements: str -""" diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py deleted file mode 100644 index 15954522ef..0000000000 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -options: - display_name: - description: Use O(display_name) along with the other options to return only resources that match the given display name - exactly. - type: str -""" diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py deleted file mode 100644 index dfb2ef8c27..0000000000 --- a/plugins/doc_fragments/oracle_name_option.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -options: - name: - description: Use O(name) along with the other options to return only resources that match the given name exactly. - type: str -""" diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py deleted file mode 100644 index 0891fc8fb4..0000000000 --- a/plugins/doc_fragments/oracle_tags.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -options: - defined_tags: - description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - freeform_tags: - description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. - For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict -""" diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py deleted file mode 100644 index 16288b3be3..0000000000 --- a/plugins/doc_fragments/oracle_wait_options.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This fragment is deprecated and will be removed in community.general 13.0.0 -# - - -class ModuleDocFragment: - DOCUMENTATION = r""" -options: - wait: - description: Whether to wait for create or delete operation to complete. - default: true - type: bool - wait_timeout: - description: Time, in seconds, to wait when O(wait=true). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true), - we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during - create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation. - type: str -""" diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py deleted file mode 100644 index 225ae4aaa3..0000000000 --- a/plugins/lookup/hiera.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2017, Juan Manuel Parrilla -# Copyright (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import annotations - -DOCUMENTATION = r""" -author: - - Juan Manuel Parrilla (@jparrill) -name: hiera -short_description: Get info from hiera data -requirements: - - hiera (command line utility) -description: - - Retrieves data from an Puppetmaster node using Hiera as ENC. -deprecated: - removed_in: 13.0.0 - why: >- - Hiera has been deprecated a long time ago. - If you disagree with this deprecation, please create an issue in the community.general repository. - alternative: Unknown. -options: - _terms: - description: - - The list of keys to lookup on the Puppetmaster. - type: list - elements: string - required: true - executable: - description: - - Binary file to execute Hiera. - type: string - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - config_file: - description: - - File that describes the hierarchy of Hiera. - type: string - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG -# FIXME: incomplete options .. _terms? environment/fqdn? -""" - -EXAMPLES = r""" -# All this examples depends on hiera.yml that describes the hierarchy - -- name: "a value from Hiera 'DB'" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo') }}" - -- name: "a value from a Hiera 'DB' on other environment" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}" - -- name: "a value from a Hiera 'DB' for a concrete node" - ansible.builtin.debug: - msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" -""" - -RETURN = r""" -_raw: - description: - - A value associated with input key. - type: list - elements: str -""" - -from ansible.module_utils.common.text.converters import to_text -from ansible.plugins.lookup import LookupBase -from ansible.utils.cmd_functions import run_cmd - - -class Hiera: - def __init__(self, hiera_cfg, hiera_bin): - self.hiera_cfg = hiera_cfg - self.hiera_bin = hiera_bin - - def get(self, hiera_key): - pargs = [self.hiera_bin] - pargs.extend(["-c", self.hiera_cfg]) - - pargs.extend(hiera_key) - - rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}") - - return to_text(output.strip()) - - -class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): - self.set_options(var_options=variables, direct=kwargs) - - hiera = Hiera(self.get_option("config_file"), self.get_option("executable")) - ret = [hiera.get(terms)] - return ret diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py deleted file mode 100644 index cdffe689ce..0000000000 --- a/plugins/module_utils/cloud.py +++ /dev/null @@ -1,222 +0,0 @@ -# -# Copyright (c) 2016 Allen Sanabria, -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# This module utils is deprecated and will be removed in community.general 13.0.0 - -from __future__ import annotations - -""" -This module adds shared support for generic cloud modules - -In order to use this module, include it as part of a custom -module as shown below. - -from ansible.module_utils.cloud import CloudRetry - -The 'cloud' module provides the following common classes: - - * CloudRetry - - The base class to be used by other cloud providers, in order to - provide a backoff/retry decorator based on status codes. - - - Example using the AWSRetry class which inherits from CloudRetry. - - @AWSRetry.exponential_backoff(retries=10, delay=3) - get_ec2_security_group_ids_from_names() - - @AWSRetry.jittered_backoff() - get_ec2_security_group_ids_from_names() - -""" -import random -import syslog -import time -from functools import wraps - - -def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): - """Customizable exponential backoff strategy. - Args: - retries (int): Maximum number of times to retry a request. - delay (float): Initial (base) delay. - backoff (float): base of the exponent to use for exponential - backoff. - max_delay (int): Optional. If provided each delay generated is capped - at this amount. Defaults to 60 seconds. - Returns: - Callable that returns a generator. This generator yields durations in - seconds to be used as delays for an exponential backoff strategy. - Usage: - >>> backoff = _exponential_backoff() - >>> backoff - - >>> list(backoff()) - [2, 4, 8, 16, 32, 60, 60, 60, 60, 60] - """ - - def backoff_gen(): - for retry in range(0, retries): - sleep = delay * backoff**retry - yield sleep if max_delay is None else min(sleep, max_delay) - - return backoff_gen - - -def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): - """Implements the "Full Jitter" backoff strategy described here - https://www.awsarchitectureblog.com/2015/03/backoff.html - Args: - retries (int): Maximum number of times to retry a request. - delay (float): Approximate number of seconds to sleep for the first - retry. - max_delay (int): The maximum number of seconds to sleep for any retry. - _random (random.Random or None): Makes this generator testable by - allowing developers to explicitly pass in the a seeded Random. - Returns: - Callable that returns a generator. This generator yields durations in - seconds to be used as delays for a full jitter backoff strategy. - Usage: - >>> backoff = _full_jitter_backoff(retries=5) - >>> backoff - - >>> list(backoff()) - [3, 6, 5, 23, 38] - >>> list(backoff()) - [2, 1, 6, 6, 31] - """ - - def backoff_gen(): - for retry in range(0, retries): - yield _random.randint(0, min(max_delay, delay * 2**retry)) - - return backoff_gen - - -class CloudRetry: - """CloudRetry can be used by any cloud provider, in order to implement a - backoff algorithm/retry effect based on Status Code from Exceptions. - """ - - # This is the base class of the exception. - # AWS Example botocore.exceptions.ClientError - base_class = None - - @staticmethod - def status_code_from_exception(error): - """Return the status code from the exception object - Args: - error (object): The exception itself. - """ - pass - - @staticmethod - def found(response_code, catch_extra_error_codes=None): - """Return True if the Response Code to retry on was found. - Args: - response_code (str): This is the Response Code that is being matched against. - """ - pass - - @classmethod - def _backoff(cls, backoff_strategy, catch_extra_error_codes=None): - """Retry calling the Cloud decorated function using the provided - backoff strategy. - Args: - backoff_strategy (callable): Callable that returns a generator. The - generator should yield sleep times for each retry of the decorated - function. - """ - - def deco(f): - @wraps(f) - def retry_func(*args, **kwargs): - for delay in backoff_strategy(): - try: - return f(*args, **kwargs) - except Exception as e: - if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type - response_code = cls.status_code_from_exception(e) - if cls.found(response_code, catch_extra_error_codes): - msg = f"{e}: Retrying in {delay} seconds..." - syslog.syslog(syslog.LOG_INFO, msg) - time.sleep(delay) - else: - # Return original exception if exception is not a ClientError - raise e - else: - # Return original exception if exception is not a ClientError - raise e - return f(*args, **kwargs) - - return retry_func # true decorator - - return deco - - @classmethod - def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None): - """ - Retry calling the Cloud decorated function using an exponential backoff. - - Kwargs: - retries (int): Number of times to retry a failed request before giving up - default=10 - delay (int or float): Initial delay between retries in seconds - default=3 - backoff (int or float): backoff multiplier e.g. value of 2 will - double the delay each retry - default=1.1 - max_delay (int or None): maximum amount of time to wait between retries. - default=60 - """ - return cls._backoff( - _exponential_backoff(retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), - catch_extra_error_codes, - ) - - @classmethod - def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None): - """ - Retry calling the Cloud decorated function using a jittered backoff - strategy. More on this strategy here: - - https://www.awsarchitectureblog.com/2015/03/backoff.html - - Kwargs: - retries (int): Number of times to retry a failed request before giving up - default=10 - delay (int): Initial delay between retries in seconds - default=3 - max_delay (int): maximum amount of time to wait between retries. - default=60 - """ - return cls._backoff( - _full_jitter_backoff(retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes - ) - - @classmethod - def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): - """ - Retry calling the Cloud decorated function using an exponential backoff. - - Compatibility for the original implementation of CloudRetry.backoff that - did not provide configurable backoff strategies. Developers should use - CloudRetry.exponential_backoff instead. - - Kwargs: - tries (int): Number of times to try (not retry) before giving up - default=10 - delay (int or float): Initial delay between retries in seconds - default=3 - backoff (int or float): backoff multiplier e.g. value of 2 will - double the delay each retry - default=1.1 - """ - return cls.exponential_backoff( - retries=tries - 1, - delay=delay, - backoff=backoff, - max_delay=None, - catch_extra_error_codes=catch_extra_error_codes, - ) diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py deleted file mode 100644 index 7cb0983b4e..0000000000 --- a/plugins/module_utils/database.py +++ /dev/null @@ -1,194 +0,0 @@ -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c) 2014, Toshio Kuratomi -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -# This module utils is deprecated and will be removed in community.general 13.0.0 - -from __future__ import annotations - -import re -import typing as t - -if t.TYPE_CHECKING: - from ansible.module_utils.basic import AnsibleModule - - -# Input patterns for is_input_dangerous function: -# -# 1. '"' in string and '--' in string or -# "'" in string and '--' in string -PATTERN_1 = re.compile(r"(\'|\").*--") - -# 2. union \ intersect \ except + select -PATTERN_2 = re.compile(r"(UNION|INTERSECT|EXCEPT).*SELECT", re.IGNORECASE) - -# 3. ';' and any KEY_WORDS -PATTERN_3 = re.compile(r";.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)", re.IGNORECASE) - - -class SQLParseError(Exception): - pass - - -class UnclosedQuoteError(SQLParseError): - pass - - -# maps a type of identifier to the maximum number of dot levels that are -# allowed to specify that identifier. For example, a database column can be -# specified by up to 4 levels: database.schema.table.column -_PG_IDENTIFIER_TO_DOT_LEVEL = dict( - database=1, - schema=2, - table=3, - column=4, - role=1, - tablespace=1, - sequence=3, - publication=1, -) -_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) - - -def _find_end_quote(identifier, quote_char): - accumulate = 0 - while True: - try: - quote = identifier.index(quote_char) - except ValueError as e: - raise UnclosedQuoteError from e - accumulate = accumulate + quote - try: - next_char = identifier[quote + 1] - except IndexError: - return accumulate - if next_char == quote_char: - try: - identifier = identifier[quote + 2 :] - accumulate = accumulate + 2 - except IndexError as e: - raise UnclosedQuoteError from e - else: - return accumulate - - -def _identifier_parse(identifier, quote_char): - if not identifier: - raise SQLParseError("Identifier name unspecified or unquoted trailing dot") - - already_quoted = False - if identifier.startswith(quote_char): - already_quoted = True - try: - end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 - except UnclosedQuoteError: - already_quoted = False - else: - if end_quote < len(identifier) - 1: - if identifier[end_quote + 1] == ".": - dot = end_quote + 1 - first_identifier = identifier[:dot] - next_identifier = identifier[dot + 1 :] - further_identifiers = _identifier_parse(next_identifier, quote_char) - further_identifiers.insert(0, first_identifier) - else: - raise SQLParseError("User escaped identifiers must escape extra quotes") - else: - further_identifiers = [identifier] - - if not already_quoted: - try: - dot = identifier.index(".") - except ValueError: - identifier = identifier.replace(quote_char, quote_char * 2) - identifier = f"{quote_char}{identifier}{quote_char}" - further_identifiers = [identifier] - else: - if dot == 0 or dot >= len(identifier) - 1: - identifier = identifier.replace(quote_char, quote_char * 2) - identifier = f"{quote_char}{identifier}{quote_char}" - further_identifiers = [identifier] - else: - first_identifier = identifier[:dot] - next_identifier = identifier[dot + 1 :] - further_identifiers = _identifier_parse(next_identifier, quote_char) - first_identifier = first_identifier.replace(quote_char, quote_char * 2) - first_identifier = f"{quote_char}{first_identifier}{quote_char}" - further_identifiers.insert(0, first_identifier) - - return further_identifiers - - -def pg_quote_identifier(identifier, id_type): - identifier_fragments = _identifier_parse(identifier, quote_char='"') - if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError( - f"PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots" - ) - return ".".join(identifier_fragments) - - -def mysql_quote_identifier(identifier, id_type): - identifier_fragments = _identifier_parse(identifier, quote_char="`") - if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError( - f"MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots" - ) - - special_cased_fragments = [] - for fragment in identifier_fragments: - if fragment == "`*`": - special_cased_fragments.append("*") - else: - special_cased_fragments.append(fragment) - - return ".".join(special_cased_fragments) - - -def is_input_dangerous(string): - """Check if the passed string is potentially dangerous. - Can be used to prevent SQL injections. - - Note: use this function only when you can't use - psycopg2's cursor.execute method parametrized - (typically with DDL queries). - """ - if not string: - return False - - return any(pattern.search(string) for pattern in (PATTERN_1, PATTERN_2, PATTERN_3)) - - -def check_input(module: AnsibleModule, *args) -> None: - """Wrapper for is_input_dangerous function.""" - needs_to_check = args - - dangerous_elements = [] - - for elem in needs_to_check: - if isinstance(elem, str): - if is_input_dangerous(elem): - dangerous_elements.append(elem) - - elif isinstance(elem, list): - for e in elem: - if is_input_dangerous(e): - dangerous_elements.append(e) - - elif elem is None or isinstance(elem, bool): - pass - - else: - elem = str(elem) - if is_input_dangerous(elem): - dangerous_elements.append(elem) - - if dangerous_elements: - module.fail_json(msg=f"Passed input '{', '.join(dangerous_elements)}' is potentially dangerous") diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py deleted file mode 100644 index b6b5012be0..0000000000 --- a/plugins/module_utils/dimensiondata.py +++ /dev/null @@ -1,331 +0,0 @@ -# -# Copyright (c) 2016 Dimension Data -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Authors: -# - Aimon Bustardo -# - Mark Maglana -# - Adam Friedman -# -# Common functionality to be used by various module components - -from __future__ import annotations - -# -# DEPRECATED -# -# This module utils is deprecated and will be removed in community.general 13.0.0 -# -import configparser -import os -import re -import traceback -from os.path import expanduser -from uuid import UUID - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import - -LIBCLOUD_IMP_ERR = None -try: - import libcloud.security - from libcloud.common.dimensiondata import ( # noqa: F401, pylint: disable=unused-import - API_ENDPOINTS, - DimensionDataAPIException, - DimensionDataStatus, - ) - from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import - from libcloud.compute.providers import get_driver - from libcloud.compute.types import Provider - - HAS_LIBCLOUD = True -except ImportError: - LIBCLOUD_IMP_ERR = traceback.format_exc() - HAS_LIBCLOUD = False - -# MCP 2.x version pattern for location (datacenter) names. -# -# Note that this is not a totally reliable way of determining MCP version. -# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. -# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version -# by specifying it in the module parameters. -MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*") - - -class DimensionDataModule: - """ - The base class containing common functionality used by Dimension Data modules for Ansible. - """ - - def __init__(self, module: AnsibleModule) -> None: - """ - Create a new DimensionDataModule. - - Will fail if Apache libcloud is not present. - - :param module: The underlying Ansible module. - :type module: AnsibleModule - """ - - self.module = module - - if not HAS_LIBCLOUD: - self.module.fail_json(msg=missing_required_lib("libcloud"), exception=LIBCLOUD_IMP_ERR) - - # Credentials are common to all Dimension Data modules. - credentials = self.get_credentials() - self.user_id = credentials["user_id"] - self.key = credentials["key"] - - # Region and location are common to all Dimension Data modules. - region = self.module.params["region"] - self.region = f"dd-{region}" - self.location = self.module.params["location"] - - libcloud.security.VERIFY_SSL_CERT = self.module.params["validate_certs"] - - self.driver = get_driver(Provider.DIMENSIONDATA)(self.user_id, self.key, region=self.region) - - # Determine the MCP API version (this depends on the target datacenter). - self.mcp_version = self.get_mcp_version(self.location) - - # Optional "wait-for-completion" arguments - if "wait" in self.module.params: - self.wait = self.module.params["wait"] - self.wait_time = self.module.params["wait_time"] - self.wait_poll_interval = self.module.params["wait_poll_interval"] - else: - self.wait = False - self.wait_time = 0 - self.wait_poll_interval = 0 - - def get_credentials(self): - """ - Get user_id and key from module configuration, environment, or dotfile. - Order of priority is module, environment, dotfile. - - To set in environment: - - export MCP_USER='myusername' - export MCP_PASSWORD='mypassword' - - To set in dot file place a file at ~/.dimensiondata with - the following contents: - - [dimensiondatacloud] - MCP_USER: myusername - MCP_PASSWORD: mypassword - """ - - if not HAS_LIBCLOUD: - self.module.fail_json(msg="libcloud is required for this module.") - - user_id = None - key = None - - # First, try the module configuration - if "mcp_user" in self.module.params: - if "mcp_password" not in self.module.params: - self.module.fail_json( - msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).' - ) - - user_id = self.module.params["mcp_user"] - key = self.module.params["mcp_password"] - - # Fall back to environment - if not user_id or not key: - user_id = os.environ.get("MCP_USER", None) - key = os.environ.get("MCP_PASSWORD", None) - - # Finally, try dotfile (~/.dimensiondata) - if not user_id or not key: - home = expanduser("~") - config = configparser.RawConfigParser() - config.read(f"{home}/.dimensiondata") - - try: - user_id = config.get("dimensiondatacloud", "MCP_USER") - key = config.get("dimensiondatacloud", "MCP_PASSWORD") - except (configparser.NoSectionError, configparser.NoOptionError): - pass - - # One or more credentials not found. Function can't recover from this - # so it has to raise an error instead of fail silently. - if not user_id: - raise MissingCredentialsError("Dimension Data user id not found") - elif not key: - raise MissingCredentialsError("Dimension Data key not found") - - # Both found, return data - return dict(user_id=user_id, key=key) - - def get_mcp_version(self, location): - """ - Get the MCP version for the specified location. - """ - - location = self.driver.ex_get_location_by_id(location) - if MCP_2_LOCATION_NAME_PATTERN.match(location.name): - return "2.0" - - return "1.0" - - def get_network_domain(self, locator, location): - """ - Retrieve a network domain by its name or Id. - """ - - if is_uuid(locator): - network_domain = self.driver.ex_get_network_domain(locator) - else: - matching_network_domains = [ - network_domain - for network_domain in self.driver.ex_list_network_domains(location=location) - if network_domain.name == locator - ] - - if matching_network_domains: - network_domain = matching_network_domains[0] - else: - network_domain = None - - if network_domain: - return network_domain - - raise UnknownNetworkError(f"Network '{locator}' could not be found") - - def get_vlan(self, locator, location, network_domain): - """ - Get a VLAN object by its name or id - """ - if is_uuid(locator): - vlan = self.driver.ex_get_vlan(locator) - else: - matching_vlans = [ - vlan for vlan in self.driver.ex_list_vlans(location, network_domain) if vlan.name == locator - ] - - if matching_vlans: - vlan = matching_vlans[0] - else: - vlan = None - - if vlan: - return vlan - - raise UnknownVLANError(f"VLAN '{locator}' could not be found") - - @staticmethod - def argument_spec(**additional_argument_spec): - """ - Build an argument specification for a Dimension Data module. - :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). - :return: A dict containing the argument specification. - """ - - spec = dict( - region=dict(type="str", default="na"), - mcp_user=dict(type="str", required=False), - mcp_password=dict(type="str", required=False, no_log=True), - location=dict(type="str", required=True), - validate_certs=dict(type="bool", required=False, default=True), - ) - - if additional_argument_spec: - spec.update(additional_argument_spec) - - return spec - - @staticmethod - def argument_spec_with_wait(**additional_argument_spec): - """ - Build an argument specification for a Dimension Data module that includes "wait for completion" arguments. - :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any). - :return: A dict containing the argument specification. - """ - - spec = DimensionDataModule.argument_spec( - wait=dict(type="bool", required=False, default=False), - wait_time=dict(type="int", required=False, default=600), - wait_poll_interval=dict(type="int", required=False, default=2), - ) - - if additional_argument_spec: - spec.update(additional_argument_spec) - - return spec - - @staticmethod - def required_together(*additional_required_together): - """ - Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together. - :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together. - :return: An array containing the argument specifications. - """ - - required_together = [["mcp_user", "mcp_password"]] - - if additional_required_together: - required_together.extend(additional_required_together) - - return required_together - - -class LibcloudNotFound(Exception): - """ - Exception raised when Apache libcloud cannot be found. - """ - - pass - - -class MissingCredentialsError(Exception): - """ - Exception raised when credentials for Dimension Data CloudControl cannot be found. - """ - - pass - - -class UnknownNetworkError(Exception): - """ - Exception raised when a network or network domain cannot be found. - """ - - pass - - -class UnknownVLANError(Exception): - """ - Exception raised when a VLAN cannot be found. - """ - - pass - - -def get_dd_regions(): - """ - Get the list of available regions whose vendor is Dimension Data. - """ - - # Get endpoints - all_regions = API_ENDPOINTS.keys() - - # Only Dimension Data endpoints (no prefix) - regions = [region[3:] for region in all_regions if region.startswith("dd-")] - - return regions - - -def is_uuid(u, version=4): - """ - Test if valid v4 UUID - """ - try: - uuid_obj = UUID(u, version=version) - - return str(uuid_obj) == u - except ValueError: - return False diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py index 1a11b547a9..d2ce1f5c13 100644 --- a/plugins/module_utils/django.py +++ b/plugins/module_utils/django.py @@ -74,10 +74,6 @@ _django_std_arg_fmts: dict[str, ArgFormatter] = dict( # keys can be used in _django_args _args_menu = dict( std=(django_std_args, _django_std_arg_fmts), - database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 - noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0 - dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 - check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 database_dash=(_database_dash, {}), data=(_data, {}), ) diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index a323441caf..fc9d9e7538 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -3019,10 +3019,6 @@ class KeycloakAPI: except Exception as e: self.fail_request(e, msg=f"Could not get groups for user {user_id} in realm {realm}: {e}") - def add_user_in_group(self, user_id, group_id, realm: str = "master"): - """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" - return self.add_user_to_group(user_id, group_id, realm) - def add_user_to_group(self, user_id, group_id, realm: str = "master"): """ Add a user to a group. diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py deleted file mode 100644 index 76942d3655..0000000000 --- a/plugins/module_utils/known_hosts.py +++ /dev/null @@ -1,171 +0,0 @@ -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -# This module utils is deprecated and will be removed in community.general 13.0.0 - -from __future__ import annotations - -import hmac -import os -import re -from urllib.parse import urlparse - -try: - from hashlib import sha1 -except ImportError: - import sha as sha1 # type: ignore[no-redef] - -HASHED_KEY_MAGIC = "|1|" - - -def is_ssh_url(url): - """check if url is ssh""" - - if "@" in url and "://" not in url: - return True - return any(url.startswith(scheme) for scheme in ("ssh://", "git+ssh://", "ssh+git://")) - - -def get_fqdn_and_port(repo_url): - """chop the hostname and port out of a url""" - - fqdn = None - port = None - ipv6_re = re.compile(r"(\[[^]]*\])(?::([0-9]+))?") - if "@" in repo_url and "://" not in repo_url: - # most likely an user@host:path or user@host/path type URL - repo_url = repo_url.split("@", 1)[1] - match = ipv6_re.match(repo_url) - # For this type of URL, colon specifies the path, not the port - if match: - fqdn, path = match.groups() - elif ":" in repo_url: - fqdn = repo_url.split(":")[0] - elif "/" in repo_url: - fqdn = repo_url.split("/")[0] - elif "://" in repo_url: - # this should be something we can parse with urlparse - parts = urlparse(repo_url) - fqdn = parts[1] - if "@" in fqdn: - fqdn = fqdn.split("@", 1)[1] - match = ipv6_re.match(fqdn) - if match: - fqdn, port = match.groups() - elif ":" in fqdn: - fqdn, port = fqdn.split(":")[0:2] - return fqdn, port - - -def check_hostkey(module, fqdn): - return not not_in_host_file(module, fqdn) - - -# this is a variant of code found in connection_plugins/paramiko.py and we should modify -# the paramiko code to import and use this. - - -def not_in_host_file(self, host): - if "USER" in os.environ: - user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") - else: - user_host_file = "~/.ssh/known_hosts" - user_host_file = os.path.expanduser(user_host_file) - - host_file_list = [ - user_host_file, - "/etc/ssh/ssh_known_hosts", - "/etc/ssh/ssh_known_hosts2", - "/etc/openssh/ssh_known_hosts", - ] - - hfiles_not_found = 0 - for hf in host_file_list: - if not os.path.exists(hf): - hfiles_not_found += 1 - continue - - try: - with open(hf) as host_fh: - data = host_fh.read() - except OSError: - hfiles_not_found += 1 - continue - - for line in data.split("\n"): - if line is None or " " not in line: - continue - tokens = line.split() - if tokens[0].find(HASHED_KEY_MAGIC) == 0: - # this is a hashed known host entry - try: - (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC) :].split("|", 2) - hash = hmac.new(kn_salt.decode("base64"), digestmod=sha1) - hash.update(host) - if hash.digest() == kn_host.decode("base64"): - return False - except Exception: - # invalid hashed host key, skip it - continue - else: - # standard host file entry - if host in tokens[0]: - return False - - return True - - -def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): - """use ssh-keyscan to add the hostkey""" - - keyscan_cmd = module.get_bin_path("ssh-keyscan", True) - - if "USER" in os.environ: - user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") - user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") - else: - user_ssh_dir = "~/.ssh/" - user_host_file = "~/.ssh/known_hosts" - user_ssh_dir = os.path.expanduser(user_ssh_dir) - - if not os.path.exists(user_ssh_dir): - if create_dir: - try: - os.makedirs(user_ssh_dir, int("700", 8)) - except Exception: - module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}") - else: - module.fail_json(msg=f"{user_ssh_dir} does not exist") - elif not os.path.isdir(user_ssh_dir): - module.fail_json(msg=f"{user_ssh_dir} is not a directory") - - if port: - this_cmd = f"{keyscan_cmd} -t {key_type} -p {port} {fqdn}" - else: - this_cmd = f"{keyscan_cmd} -t {key_type} {fqdn}" - - rc, out, err = module.run_command(this_cmd, environ_update={"LANGUAGE": "C", "LC_ALL": "C"}) - # ssh-keyscan gives a 0 exit code and prints nothing on timeout - if rc != 0 or not out: - msg = "failed to retrieve hostkey" - if not out: - msg += f'. "{this_cmd}" returned no matches.' - else: - msg += f' using command "{this_cmd}". [stdout]: {out}' - - if err: - msg += f" [stderr]: {err}" - - module.fail_json(msg=msg) - - module.append_to_file(user_host_file, out) - - return rc, out, err diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py deleted file mode 100644 index bb48ea9e52..0000000000 --- a/plugins/module_utils/oneandone.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) Ansible project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import annotations - -import time - -# -# DEPRECATED -# -# This module utils is deprecated and will be removed in community.general 13.0.0 -# - - -class OneAndOneResources: - firewall_policy = "firewall_policy" - load_balancer = "load_balancer" - monitoring_policy = "monitoring_policy" - private_network = "private_network" - public_ip = "public_ip" - role = "role" - server = "server" - user = "user" - vpn = "vpn" - - -def get_resource(oneandone_conn, resource_type, resource_id): - switcher = { - "firewall_policy": oneandone_conn.get_firewall, - "load_balancer": oneandone_conn.get_load_balancer, - "monitoring_policy": oneandone_conn.get_monitoring_policy, - "private_network": oneandone_conn.get_private_network, - "public_ip": oneandone_conn.get_public_ip, - "role": oneandone_conn.get_role, - "server": oneandone_conn.get_server, - "user": oneandone_conn.get_user, - "vpn": oneandone_conn.get_vpn, - } - - return switcher.get(resource_type)(resource_id) - - -def get_datacenter(oneandone_conn, datacenter, full_object=False): - """ - Validates the datacenter exists by ID or country code. - Returns the datacenter ID. - """ - for _datacenter in oneandone_conn.list_datacenters(): - if datacenter in (_datacenter["id"], _datacenter["country_code"]): - if full_object: - return _datacenter - return _datacenter["id"] - - -def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False): - """ - Validates the fixed instance size exists by ID or name. - Return the instance size ID. - """ - for _fixed_instance_size in oneandone_conn.fixed_server_flavors(): - if fixed_instance_size in (_fixed_instance_size["id"], _fixed_instance_size["name"]): - if full_object: - return _fixed_instance_size - return _fixed_instance_size["id"] - - -def get_appliance(oneandone_conn, appliance, full_object=False): - """ - Validates the appliance exists by ID or name. - Return the appliance ID. - """ - for _appliance in oneandone_conn.list_appliances(q="IMAGE"): - if appliance in (_appliance["id"], _appliance["name"]): - if full_object: - return _appliance - return _appliance["id"] - - -def get_private_network(oneandone_conn, private_network, full_object=False): - """ - Validates the private network exists by ID or name. - Return the private network ID. - """ - for _private_network in oneandone_conn.list_private_networks(): - if private_network in (_private_network["name"], _private_network["id"]): - if full_object: - return _private_network - return _private_network["id"] - - -def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False): - """ - Validates the monitoring policy exists by ID or name. - Return the monitoring policy ID. - """ - for _monitoring_policy in oneandone_conn.list_monitoring_policies(): - if monitoring_policy in (_monitoring_policy["name"], _monitoring_policy["id"]): - if full_object: - return _monitoring_policy - return _monitoring_policy["id"] - - -def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False): - """ - Validates the firewall policy exists by ID or name. - Return the firewall policy ID. - """ - for _firewall_policy in oneandone_conn.list_firewall_policies(): - if firewall_policy in (_firewall_policy["name"], _firewall_policy["id"]): - if full_object: - return _firewall_policy - return _firewall_policy["id"] - - -def get_load_balancer(oneandone_conn, load_balancer, full_object=False): - """ - Validates the load balancer exists by ID or name. - Return the load balancer ID. - """ - for _load_balancer in oneandone_conn.list_load_balancers(): - if load_balancer in (_load_balancer["name"], _load_balancer["id"]): - if full_object: - return _load_balancer - return _load_balancer["id"] - - -def get_server(oneandone_conn, instance, full_object=False): - """ - Validates that the server exists whether by ID or name. - Returns the server if one was found. - """ - for server in oneandone_conn.list_servers(per_page=1000): - if instance in (server["id"], server["name"]): - if full_object: - return server - return server["id"] - - -def get_user(oneandone_conn, user, full_object=False): - """ - Validates that the user exists by ID or a name. - Returns the user if one was found. - """ - for _user in oneandone_conn.list_users(per_page=1000): - if user in (_user["id"], _user["name"]): - if full_object: - return _user - return _user["id"] - - -def get_role(oneandone_conn, role, full_object=False): - """ - Given a name, validates that the role exists - whether it is a proper ID or a name. - Returns the role if one was found, else None. - """ - for _role in oneandone_conn.list_roles(per_page=1000): - if role in (_role["id"], _role["name"]): - if full_object: - return _role - return _role["id"] - - -def get_vpn(oneandone_conn, vpn, full_object=False): - """ - Validates that the vpn exists by ID or a name. - Returns the vpn if one was found. - """ - for _vpn in oneandone_conn.list_vpns(per_page=1000): - if vpn in (_vpn["id"], _vpn["name"]): - if full_object: - return _vpn - return _vpn["id"] - - -def get_public_ip(oneandone_conn, public_ip, full_object=False): - """ - Validates that the public ip exists by ID or a name. - Returns the public ip if one was found. - """ - for _public_ip in oneandone_conn.list_public_ips(per_page=1000): - if public_ip in (_public_ip["id"], _public_ip["ip"]): - if full_object: - return _public_ip - return _public_ip["id"] - - -def wait_for_resource_creation_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval): - """ - Waits for the resource create operation to complete based on the timeout period. - """ - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(wait_interval) - - # Refresh the resource info - resource = get_resource(oneandone_conn, resource_type, resource_id) - - if resource_type == OneAndOneResources.server: - resource_state = resource["status"]["state"] - else: - resource_state = resource["state"] - - if (resource_type == OneAndOneResources.server and resource_state.lower() == "powered_on") or ( - resource_type != OneAndOneResources.server and resource_state.lower() == "active" - ): - return - elif resource_state.lower() == "failed": - raise Exception(f"{resource_type} creation failed for {resource_id}") - elif resource_state.lower() in ("active", "enabled", "deploying", "configuring"): - continue - else: - raise Exception(f"Unknown {resource_type} state {resource_state}") - - raise Exception(f"Timed out waiting for {resource_type} completion for {resource_id}") - - -def wait_for_resource_deletion_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval): - """ - Waits for the resource delete operation to complete based on the timeout period. - """ - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(wait_interval) - - # Refresh the operation info - logs = oneandone_conn.list_logs(q="DELETE", period="LAST_HOUR", sort="-start_date") - - if resource_type == OneAndOneResources.server: - _type = "VM" - elif resource_type == OneAndOneResources.private_network: - _type = "PRIVATENETWORK" - else: - raise Exception(f"Unsupported wait_for delete operation for {resource_type} resource") - - for log in logs: - if ( - log["resource"]["id"] == resource_id - and log["action"] == "DELETE" - and log["type"] == _type - and log["status"]["state"] == "OK" - ): - return - raise Exception(f"Timed out waiting for {resource_type} deletion for {resource_id}") diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py deleted file mode 100644 index 9aa5896879..0000000000 --- a/plugins/module_utils/oracle/oci_utils.py +++ /dev/null @@ -1,1787 +0,0 @@ -# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -# -# DEPRECATED -# -# This module utils is deprecated and will be removed in community.general 13.0.0 -# -import logging -import logging.config -import os -import tempfile -import time -import typing as t - -# (TODO: remove next line!) -from datetime import datetime # noqa: F401, pylint: disable=unused-import -from http import HTTPStatus -from operator import eq - -try: - import oci - import yaml # noqa: F401, pylint: disable=unused-import - from oci.constants import HEADER_NEXT_PAGE - from oci.exceptions import ( - ConfigFileNotFound, - InvalidConfig, - InvalidPrivateKey, - MaximumWaitTimeExceeded, - MissingPrivateKeyPassphrase, - ServiceError, - ) - from oci.identity.identity_client import IdentityClient - from oci.object_storage.models import CreateBucketDetails, UpdateBucketDetails - from oci.retry import RetryStrategyBuilder - from oci.util import Sentinel, to_dict - - HAS_OCI_PY_SDK = True -except ImportError: - HAS_OCI_PY_SDK = False - - -from ansible.module_utils.common.text.converters import to_bytes - -if t.TYPE_CHECKING: - from ansible.module_utils.basic import AnsibleModule - -__version__ = "1.6.0-dev" - -MAX_WAIT_TIMEOUT_IN_SECONDS = 1200 - -# If a resource is in one of these states it would be considered inactive -DEAD_STATES = [ - "TERMINATING", - "TERMINATED", - "FAULTY", - "FAILED", - "DELETING", - "DELETED", - "UNKNOWN_ENUM_VALUE", - "DETACHING", - "DETACHED", -] - -# If a resource is in one of these states it would be considered available -DEFAULT_READY_STATES = [ - "AVAILABLE", - "ACTIVE", - "RUNNING", - "PROVISIONED", - "ATTACHED", - "ASSIGNED", - "SUCCEEDED", - "PENDING_PROVIDER", -] - -# If a resource is in one of these states, it would be considered deleted -DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"] - - -def get_common_arg_spec(supports_create: bool = False, supports_wait: bool = False) -> dict[str, t.Any]: - """ - Return the common set of module arguments for all OCI cloud modules. - :param supports_create: Variable to decide whether to add options related to idempotency of create operation. - :param supports_wait: Variable to decide whether to add options related to waiting for completion. - :return: A dict with applicable module options. - """ - # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this - # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules - # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in - # this method would break that error handling logic. - common_args = dict( - config_file_location=dict(type="str"), - config_profile_name=dict(type="str", default="DEFAULT"), - api_user=dict(type="str"), - api_user_fingerprint=dict(type="str", no_log=True), - api_user_key_file=dict(type="path"), - api_user_key_pass_phrase=dict(type="str", no_log=True), - auth_type=dict( - type="str", - required=False, - choices=["api_key", "instance_principal"], - default="api_key", - ), - tenancy=dict(type="str"), - region=dict(type="str"), - ) - - if supports_create: - common_args.update( - key_by=dict(type="list", elements="str", no_log=False), - force_create=dict(type="bool", default=False), - ) - - if supports_wait: - common_args.update( - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS), - wait_until=dict(type="str"), - ) - - return common_args - - -def get_facts_module_arg_spec(filter_by_name: bool = False) -> dict[str, t.Any]: - # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this - # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules - # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in - # this method would break that error handling logic. - facts_module_arg_spec = get_common_arg_spec() - if filter_by_name: - facts_module_arg_spec.update(name=dict(type="str")) - else: - facts_module_arg_spec.update(display_name=dict(type="str")) - return facts_module_arg_spec - - -def get_oci_config(module: AnsibleModule, service_client_class=None): - """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging - any overrides specified for configuration attributes through Ansible module options or environment variables. The - order of precedence for deriving the effective configuration dict is: - 1. If a config file is provided, use that to setup the initial config dict. - 2. If a config profile is specified, use that config profile to setup the config dict. - 3. For each authentication attribute, check if an override is provided either through - a. Ansible Module option - b. Environment variable - and override the value in the config dict in that order.""" - config = {} - - config_file = module.params.get("config_file_location") - _debug(f"Config file through module options - {config_file} ") - if not config_file: - if "OCI_CONFIG_FILE" in os.environ: - config_file = os.environ["OCI_CONFIG_FILE"] - _debug(f"Config file through OCI_CONFIG_FILE environment variable - {config_file}") - else: - config_file = "~/.oci/config" - _debug(f"Config file (fallback) - {config_file} ") - - config_profile = module.params.get("config_profile_name") - if not config_profile: - if "OCI_CONFIG_PROFILE" in os.environ: - config_profile = os.environ["OCI_CONFIG_PROFILE"] - else: - config_profile = "DEFAULT" - try: - config = oci.config.from_file(file_location=config_file, profile_name=config_profile) - except ( - ConfigFileNotFound, - InvalidConfig, - InvalidPrivateKey, - MissingPrivateKeyPassphrase, - ) as ex: - if not _is_instance_principal_auth(module): - # When auth_type is not instance_principal, config file is required - module.fail_json(msg=str(ex)) - else: - _debug(f"Ignore {ex} as the auth_type is set to instance_principal") - # if instance_principal auth is used, an empty 'config' map is used below. - - config["additional_user_agent"] = f"Oracle-Ansible/{__version__}" - # Merge any overrides through other IAM options - _merge_auth_option( - config, - module, - module_option_name="api_user", - env_var_name="OCI_USER_ID", - config_attr_name="user", - ) - _merge_auth_option( - config, - module, - module_option_name="api_user_fingerprint", - env_var_name="OCI_USER_FINGERPRINT", - config_attr_name="fingerprint", - ) - _merge_auth_option( - config, - module, - module_option_name="api_user_key_file", - env_var_name="OCI_USER_KEY_FILE", - config_attr_name="key_file", - ) - _merge_auth_option( - config, - module, - module_option_name="api_user_key_pass_phrase", - env_var_name="OCI_USER_KEY_PASS_PHRASE", - config_attr_name="pass_phrase", - ) - _merge_auth_option( - config, - module, - module_option_name="tenancy", - env_var_name="OCI_TENANCY", - config_attr_name="tenancy", - ) - _merge_auth_option( - config, - module, - module_option_name="region", - env_var_name="OCI_REGION", - config_attr_name="region", - ) - - # Redirect calls to home region for IAM service. - do_not_redirect = module.params.get("do_not_redirect_to_home_region", False) or os.environ.get( - "OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION" - ) - if service_client_class == IdentityClient and not do_not_redirect: - _debug(f"Region passed for module invocation - {config['region']} ") - identity_client = IdentityClient(config) - region_subscriptions = identity_client.list_region_subscriptions(config["tenancy"]).data - # Replace the region in the config with the home region. - [config["region"]] = [rs.region_name for rs in region_subscriptions if rs.is_home_region is True] - _debug(f"Setting region in the config to home region - {config['region']} ") - - return config - - -def create_service_client(module: AnsibleModule, service_client_class): - """ - Creates a service client using the common module options provided by the user. - :param module: An AnsibleModule that represents user provided options for a Task - :param service_client_class: A class that represents a client to an OCI Service - :return: A fully configured client - """ - config = get_oci_config(module, service_client_class) - kwargs = {} - - if _is_instance_principal_auth(module): - try: - signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner() - except Exception as ex: - message = ( - "Failed retrieving certificates from localhost. Instance principal based authentication is only" - f"possible from within OCI compute instances. Exception: {ex}" - ) - module.fail_json(msg=message) - - kwargs["signer"] = signer - - # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation - try: - oci.config.validate_config(config, **kwargs) - except oci.exceptions.InvalidConfig as ic: - module.fail_json(msg=f"Invalid OCI configuration. Exception: {ic}") - - # Create service client class with the signer - client = service_client_class(config, **kwargs) - - return client - - -def _is_instance_principal_auth(module: AnsibleModule): - # check if auth type is overridden via module params - instance_principal_auth = "auth_type" in module.params and module.params["auth_type"] == "instance_principal" - if not instance_principal_auth: - instance_principal_auth = ( - "OCI_ANSIBLE_AUTH_TYPE" in os.environ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal" - ) - return instance_principal_auth - - -def _merge_auth_option( - config, module: AnsibleModule, module_option_name: str, env_var_name: str, config_attr_name: str -) -> None: - """Merge the values for an authentication attribute from ansible module options and - environment variables with the values specified in a configuration file""" - _debug(f"Merging {module_option_name}") - - auth_attribute = module.params.get(module_option_name) - _debug(f"\t Ansible module option {module_option_name} = {auth_attribute}") - if not auth_attribute: - if env_var_name in os.environ: - auth_attribute = os.environ[env_var_name] - _debug(f"\t Environment variable {env_var_name} = {auth_attribute}") - - # An authentication attribute has been provided through an env-variable or an ansible - # option and must override the corresponding attribute's value specified in the - # config file [profile]. - if auth_attribute: - _debug(f"Updating config attribute {config_attr_name} -> {auth_attribute} ") - config.update({config_attr_name: auth_attribute}) - - -def bucket_details_factory(bucket_details_type: t.Literal["create", "update"], module: AnsibleModule): - bucket_details = None - if bucket_details_type == "create": - bucket_details = CreateBucketDetails() - elif bucket_details_type == "update": - bucket_details = UpdateBucketDetails() - - bucket_details.compartment_id = module.params["compartment_id"] - bucket_details.name = module.params["name"] - bucket_details.public_access_type = module.params["public_access_type"] - bucket_details.metadata = module.params["metadata"] - - return bucket_details - - -def filter_resources(all_resources, filter_params) -> list: - if not filter_params: - return all_resources - filtered_resources = [] - filtered_resources.extend( - [ - resource - for resource in all_resources - for key, value in filter_params.items() - if getattr(resource, key) == value - ] - ) - return filtered_resources - - -def list_all_resources(target_fn, **kwargs) -> list: - """ - Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is - provided as a kwarg, then only resources matching the specified name are returned. - :param target_fn: The target OCI SDK paged function to call - :param kwargs: All arguments that the OCI SDK paged function expects - :return: List of all objects returned by target_fn - :raises ServiceError: When the Service returned an Error response - :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn - """ - filter_params = None - try: - response = call_with_backoff(target_fn, **kwargs) - except ValueError as ex: - if "unknown kwargs" in str(ex): - if "display_name" in kwargs: - if kwargs["display_name"]: - filter_params = {"display_name": kwargs["display_name"]} - del kwargs["display_name"] - elif "name" in kwargs: - if kwargs["name"]: - filter_params = {"name": kwargs["name"]} - del kwargs["name"] - response = call_with_backoff(target_fn, **kwargs) - - existing_resources = response.data - while response.has_next_page: - kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE)) - response = call_with_backoff(target_fn, **kwargs) - existing_resources += response.data - - # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources - # and return the matching list of resources - return filter_resources(existing_resources, filter_params) - - -def _debug(s) -> None: - get_logger("oci_utils").debug(s) - - -def get_logger(module_name: str): - oci_logging = setup_logging() - return oci_logging.getLogger(module_name) - - -def setup_logging( - default_level: str = "INFO", -): - """Setup logging configuration""" - env_log_path = "LOG_PATH" - env_log_level = "LOG_LEVEL" - - default_log_path = tempfile.gettempdir() - log_path = os.getenv(env_log_path, default_log_path) - log_level_str = os.getenv(env_log_level, default_level) - log_level = logging.getLevelName(log_level_str) - log_file_path = os.path.join(log_path, "oci_ansible_module.log") - logging.basicConfig(filename=log_file_path, filemode="a", level=log_level) - return logging - - -def check_and_update_attributes(target_instance, attr_name: str, input_value, existing_value, changed: bool) -> bool: - """ - This function checks the difference between two resource attributes of literal types and sets the attribute - value in the target instance type holding the attribute. - :param target_instance: The instance which contains the attribute whose values to be compared - :param attr_name: Name of the attribute whose value required to be compared - :param input_value: The value of the attribute provided by user - :param existing_value: The value of the attribute in the existing resource - :param changed: Flag to indicate whether there is any difference between the values - :return: Returns a boolean value indicating whether there is any difference between the values - """ - if input_value is not None and not eq(input_value, existing_value): - changed = True - target_instance.__setattr__(attr_name, input_value) - else: - target_instance.__setattr__(attr_name, existing_value) - return changed - - -def check_and_update_resource( - resource_type, - get_fn, - kwargs_get, - update_fn, - primitive_params_update, - kwargs_non_primitive_update, - module: AnsibleModule, - update_attributes, - client=None, - sub_attributes_of_update_model=None, - wait_applicable=True, - states=None, -) -> dict[str, t.Any]: - """ - This function handles update operation on a resource. It checks whether update is required and accordingly returns - the resource and the changed status. - :param wait_applicable: Indicates if the resource support wait - :param client: The resource Client class to use to perform the wait checks. This param must be specified if - wait_applicable is True - :param resource_type: The type of the resource. e.g. "private_ip" - :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip - :param kwargs_get: Dictionary containing the arguments to be used to call get function. - e.g. {"private_ip_id": module.params["private_ip_id"]} - :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip - :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id'] - :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get - function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed - to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"} - :param module: Instance of AnsibleModule - :param update_attributes: Attributes in update model. - :param states: List of lifecycle states to watch for while waiting after create_fn is called. - e.g. [module.params['wait_until'], "FAULTY"] - :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example, - {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails. - :return: Returns a dictionary containing the "changed" status and the resource. - """ - try: - result = dict(changed=False) - attributes_to_update, resource = get_attr_to_update(get_fn, kwargs_get, module, update_attributes) - - if attributes_to_update: - kwargs_update = get_kwargs_update( - attributes_to_update, - kwargs_non_primitive_update, - module, - primitive_params_update, - sub_attributes_of_update_model, - ) - resource = call_with_backoff(update_fn, **kwargs_update).data - if wait_applicable: - if client is None: - module.fail_json(msg="wait_applicable is True, but client is not specified.") - resource = wait_for_resource_lifecycle_state( - client, module, True, kwargs_get, get_fn, None, resource, states - ) - result["changed"] = True - result[resource_type] = to_dict(resource) - return result - except ServiceError as ex: - module.fail_json(msg=ex.message) - - -def get_kwargs_update( - attributes_to_update, - kwargs_non_primitive_update, - module: AnsibleModule, - primitive_params_update, - sub_attributes_of_update_model=None, -) -> dict[str, t.Any]: - kwargs_update = dict() - for param in primitive_params_update: - kwargs_update[param] = module.params[param] - for param in kwargs_non_primitive_update: - update_object = param() - for key in update_object.attribute_map: - if key in attributes_to_update: - if sub_attributes_of_update_model and key in sub_attributes_of_update_model: - setattr(update_object, key, sub_attributes_of_update_model[key]) - else: - setattr(update_object, key, module.params[key]) - kwargs_update[kwargs_non_primitive_update[param]] = update_object - return kwargs_update - - -def is_dictionary_subset(sub: dict, super_dict: dict) -> bool: - """ - This function checks if `sub` dictionary is a subset of `super` dictionary. - :param sub: subset dictionary, for example user_provided_attr_value. - :param super_dict: super dictionary, for example resources_attr_value. - :return: True if sub is contained in super. - """ - return all(sub[key] == super_dict[key] for key in sub) - - -def are_lists_equal(s: list | None, t: list | None): - if s is None and t is None: - return True - - if s is None or t is None or (len(s) != len(t)): - return False - - if len(s) == 0: - return True - - s = to_dict(s) - t = to_dict(t) - - if isinstance(s[0], dict): - # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on - # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key - # `service_name` which is not provided in the list of `services` by a user while making an update call; only - # `service_id` is provided by the user in the update call. - sorted_s = sort_list_of_dictionary(s) - sorted_t = sort_list_of_dictionary(t) - return all(is_dictionary_subset(d, sorted_t[index]) for index, d in enumerate(sorted_s)) - else: - # Handle lists of primitive types. - try: - for elem in s: - t.remove(elem) - except ValueError: - return False - return not t - - -def get_attr_to_update(get_fn, kwargs_get, module: AnsibleModule, update_attributes) -> tuple: - try: - resource = call_with_backoff(get_fn, **kwargs_get).data - except ServiceError as ex: - module.fail_json(msg=ex.message) - - attributes_to_update = [] - - for attr in update_attributes: - resources_attr_value = getattr(resource, attr, None) - user_provided_attr_value = module.params.get(attr, None) - - unequal_list_attr = ( - isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list) - ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) - unequal_attr = not isinstance(resources_attr_value, list) and to_dict(resources_attr_value) != to_dict( - user_provided_attr_value - ) - if unequal_list_attr or unequal_attr: - # only update if the user has explicitly provided a value for this attribute - # otherwise, no update is necessary because the user hasn't expressed a particular - # value for that attribute - if module.params.get(attr, None): - attributes_to_update.append(attr) - - return attributes_to_update, resource - - -def get_taggable_arg_spec(supports_create: bool = False, supports_wait: bool = False) -> dict[str, t.Any]: - """ - Returns an arg_spec that is valid for taggable OCI resources. - :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and - defined tags. - """ - tag_arg_spec = get_common_arg_spec(supports_create, supports_wait) - tag_arg_spec.update(dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))) - return tag_arg_spec - - -def add_tags_to_model_from_module(model, module: AnsibleModule): - """ - Adds free-form and defined tags from an ansible module to a resource model - :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes - :param module: An AnsibleModule representing the options provided by the user - :return: The updated model class with the tags specified by the user. - """ - freeform_tags = module.params.get("freeform_tags", None) - defined_tags = module.params.get("defined_tags", None) - return add_tags_to_model_class(model, freeform_tags, defined_tags) - - -def add_tags_to_model_class(model, freeform_tags, defined_tags): - """ - Add free-form and defined tags to a resource model. - :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes - :param freeform_tags: A dict representing the freeform_tags to be applied to the model - :param defined_tags: A dict representing the defined_tags to be applied to the model - :return: The updated model class with the tags specified by the user - """ - try: - if freeform_tags is not None: - _debug(f"Model {model} set freeform tags to {freeform_tags}") - model.__setattr__("freeform_tags", freeform_tags) - - if defined_tags is not None: - _debug(f"Model {model} set defined tags to {defined_tags}") - model.__setattr__("defined_tags", defined_tags) - except AttributeError as ae: - _debug(f"Model {model} doesn't support tags. Error {ae}") - - return model - - -def check_and_create_resource( - resource_type, - create_fn, - kwargs_create, - list_fn, - kwargs_list, - module: AnsibleModule, - model, - existing_resources=None, - exclude_attributes=None, - dead_states=None, - default_attribute_values=None, - supports_sort_by_time_created=True, -): - """ - This function checks whether there is a resource with same attributes as specified in the module options. If not, - it creates and returns the resource. - :param resource_type: Type of the resource to be created. - :param create_fn: Function used in the module to handle create operation. The function should return a dict with - keys as resource & changed. - :param kwargs_create: Dictionary of parameters for create operation. - :param list_fn: List function in sdk to list all the resources of type resource_type. - :param kwargs_list: Dictionary of parameters for list operation. - :param module: Instance of AnsibleModule - :param model: Model used to create a resource. - :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name, - dns_label. - :param dead_states: List of states which can't transition to any of the usable states of the resource. This defaults - to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"] - :param default_attribute_values: A dictionary containing default values for attributes. - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - - if module.params.get("force_create", None): - _debug(f"Force creating {resource_type}") - result = call_with_backoff(create_fn, **kwargs_create) - return result - - # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource - # in case of multiple resource matches. - if exclude_attributes is None: - exclude_attributes = {} - if default_attribute_values is None: - default_attribute_values = {} - try: - if existing_resources is None: - if supports_sort_by_time_created: - kwargs_list["sort_by"] = "TIMECREATED" - existing_resources = list_all_resources(list_fn, **kwargs_list) - except ValueError: - # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry - kwargs_list.pop("sort_by", None) - try: - existing_resources = list_all_resources(list_fn, **kwargs_list) - # Handle errors like 404 due to bad arguments to the list_all_resources call. - except ServiceError as ex: - module.fail_json(msg=ex.message) - except ServiceError as ex: - module.fail_json(msg=ex.message) - - result = dict() - - attributes_to_consider = _get_attributes_to_consider(exclude_attributes, model, module) - if "defined_tags" not in default_attribute_values: - default_attribute_values["defined_tags"] = {} - resource_matched = None - _debug(f"Trying to find a match within {len(existing_resources)} existing resources") - - for resource in existing_resources: - if _is_resource_active(resource, dead_states): - _debug( - f"Comparing user specified values {module.params} against an existing resource's " - f"values {to_dict(resource)}" - ) - if does_existing_resource_match_user_inputs( - to_dict(resource), - module, - attributes_to_consider, - exclude_attributes, - default_attribute_values, - ): - resource_matched = to_dict(resource) - break - - if resource_matched: - _debug(f"Resource with same attributes found: {resource_matched}.") - result[resource_type] = resource_matched - result["changed"] = False - else: - _debug("No matching resource found. Attempting to create a new resource.") - result = call_with_backoff(create_fn, **kwargs_create) - - return result - - -def _get_attributes_to_consider(exclude_attributes, model, module: AnsibleModule): - """ - Determine the attributes to detect if an existing resource already matches the requested resource state - :param exclude_attributes: Attributes to not consider for matching - :param model: The model class used to create the Resource - :param module: An instance of AnsibleModule that contains user's desires around a resource's state - :return: A list of attributes that needs to be matched - """ - - # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list - # of attributes to consider for matching. - if "key_by" in module.params and module.params["key_by"] is not None: - attributes_to_consider = module.params["key_by"] - else: - # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource. - attributes_to_consider = list(model.attribute_map) - if "freeform_tags" in attributes_to_consider: - attributes_to_consider.remove("freeform_tags") - # Temporarily removing node_count as the existing resource does not reflect it - if "node_count" in attributes_to_consider: - attributes_to_consider.remove("node_count") - _debug(f"attributes to consider: {attributes_to_consider}") - return attributes_to_consider - - -def _is_resource_active(resource, dead_states): - if dead_states is None: - dead_states = DEAD_STATES - - if "lifecycle_state" not in resource.attribute_map: - return True - return resource.lifecycle_state not in dead_states - - -def is_attr_assigned_default(default_attribute_values, attr, assigned_value): - if not default_attribute_values: - return False - - if attr in default_attribute_values: - default_val_for_attr = default_attribute_values.get(attr, None) - if isinstance(default_val_for_attr, dict): - # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the - # existing resource's attribute is also empty. - if not default_val_for_attr: - return not assigned_value - # only compare keys that are in default_attribute_values[attr] - # this is to ensure forward compatibility when the API returns new keys that are not known during - # the time when the module author provided default values for the attribute - keys = {} - for k, v in assigned_value.items().items(): - if k in default_val_for_attr: - keys[k] = v - - return default_val_for_attr == keys - # non-dict, normal comparison - return default_val_for_attr == assigned_value - else: - # module author has not provided a default value for attr - return True - - -def create_resource(resource_type, create_fn, kwargs_create, module: AnsibleModule): - """ - Create an OCI resource - :param resource_type: Type of the resource to be created. e.g.: "vcn" - :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn - :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn - :param module: Instance of AnsibleModule - """ - result = dict(changed=False) - try: - resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data) - _debug(f"Created {resource_type}, {resource}") - result["changed"] = True - result[resource_type] = resource - return result - except (ServiceError, TypeError) as ex: - module.fail_json(msg=str(ex)) - - -def does_existing_resource_match_user_inputs( - existing_resource, - module: AnsibleModule, - attributes_to_compare, - exclude_attributes, - default_attribute_values=None, -): - """ - Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'. - :param existing_resource: A dictionary representing an existing resource's values. - :param module: The AnsibleModule representing the options provided by the user. - :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource - matches the desire state of the resource expressed by the user in 'module'. - :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the - resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values - like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like - 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys. - :param default_attribute_values: A dictionary containing default values for attributes. - :return: True if the values for the list of attributes is the same in the existing_resource and module instances. - """ - if not default_attribute_values: - default_attribute_values = {} - for attr in attributes_to_compare: - attribute_with_default_metadata = None - if attr in existing_resource: - resources_value_for_attr = existing_resource[attr] - # Check if the user has explicitly provided the value for attr. - user_provided_value_for_attr = _get_user_provided_value(module, attr) - if user_provided_value_for_attr is not None: - res = [True] - check_if_user_value_matches_resources_attr( - attr, - resources_value_for_attr, - user_provided_value_for_attr, - exclude_attributes, - default_attribute_values, - res, - ) - if not res[0]: - _debug( - f"Mismatch on attribute '{attr}'. User provided value is {user_provided_value_for_attr} & existing resource's value" - f"is {resources_value_for_attr}." - ) - return False - else: - # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can - # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and - # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude - if exclude_attributes.get(attr) is None and resources_value_for_attr is not None: - if module.argument_spec.get(attr): - attribute_with_default_metadata = module.argument_spec.get(attr) - default_attribute_value = attribute_with_default_metadata.get("default", None) - if default_attribute_value is not None: - if existing_resource[attr] != default_attribute_value: - return False - # Check if attr has a value that is not default. For example, a custom `security_list_id` - # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a - # value that is not the default, then it must be considered a mismatch and false returned. - elif not is_attr_assigned_default(default_attribute_values, attr, existing_resource[attr]): - return False - - else: - _debug( - f"Attribute {attr} is in the create model of resource {existing_resource.__class__}" - "but doesn't exist in the get model of the resource" - ) - return True - - -def tuplize(d) -> list[tuple]: - """ - This function takes a dictionary and converts it to a list of tuples recursively. - :param d: A dictionary. - :return: List of tuples. - """ - list_of_tuples: list[tuple] = [] - key_list = sorted(list(d.keys())) - for key in key_list: - if isinstance(d[key], list): - # Convert a value which is itself a list of dict to a list of tuples. - if d[key] and isinstance(d[key][0], dict): - sub_tuples = [] - for sub_dict in d[key]: - sub_tuples.append(tuplize(sub_dict)) - # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element - # in the tuple a boolean `True` if value is None so that attributes with None value are put at last - # in the sorted list. - list_of_tuples.append((sub_tuples is None, key, sub_tuples)) - else: - list_of_tuples.append((d[key] is None, key, d[key])) - elif isinstance(d[key], dict): - tupled_value = tuplize(d[key]) - list_of_tuples.append((tupled_value is None, key, tupled_value)) - else: - list_of_tuples.append((d[key] is None, key, d[key])) - return list_of_tuples - - -def get_key_for_comparing_dict(d) -> list[tuple]: - tuple_form_of_d = tuplize(d) - return tuple_form_of_d - - -def sort_dictionary(d: dict) -> dict: - """ - This function sorts values of a dictionary recursively. - :param d: A dictionary. - :return: Dictionary with sorted elements. - """ - sorted_d: dict = {} - for key in d: - if isinstance(d[key], list): - if d[key] and isinstance(d[key][0], dict): - sorted_value = sort_list_of_dictionary(d[key]) - sorted_d[key] = sorted_value - else: - sorted_d[key] = sorted(d[key]) - elif isinstance(d[key], dict): - sorted_d[key] = sort_dictionary(d[key]) - else: - sorted_d[key] = d[key] - return sorted_d - - -def sort_list_of_dictionary(list_of_dict: list[dict]) -> list[dict]: - """ - This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of - individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used. - :param list_of_dict: List of dictionaries. - :return: A sorted dictionary. - """ - list_with_sorted_dict = [] - for d in list_of_dict: - sorted_d = sort_dictionary(d) - list_with_sorted_dict.append(sorted_d) - return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict) - - -def check_if_user_value_matches_resources_attr( - attribute_name, - resources_value_for_attr, - user_provided_value_for_attr, - exclude_attributes, - default_attribute_values, - res, -): - if isinstance(default_attribute_values.get(attribute_name), dict): - default_attribute_values = default_attribute_values.get(attribute_name) - - if isinstance(exclude_attributes.get(attribute_name), dict): - exclude_attributes = exclude_attributes.get(attribute_name) - - if isinstance(resources_value_for_attr, list) or isinstance(user_provided_value_for_attr, list): - # Perform a deep equivalence check for a List attribute - if exclude_attributes.get(attribute_name): - return - if user_provided_value_for_attr is None and default_attribute_values.get(attribute_name) is not None: - user_provided_value_for_attr = default_attribute_values.get(attribute_name) - - if resources_value_for_attr is None and user_provided_value_for_attr is None: - return - - if resources_value_for_attr is None or user_provided_value_for_attr is None: - res[0] = False - return - - if ( - resources_value_for_attr is not None - and user_provided_value_for_attr is not None - and len(resources_value_for_attr) != len(user_provided_value_for_attr) - ): - res[0] = False - return - - if user_provided_value_for_attr and isinstance(user_provided_value_for_attr[0], dict): - # Process a list of dict - sorted_user_provided_value_for_attr = sort_list_of_dictionary(user_provided_value_for_attr) - sorted_resources_value_for_attr = sort_list_of_dictionary(resources_value_for_attr) - - else: - sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr) - sorted_resources_value_for_attr = sorted(resources_value_for_attr) - - # Walk through the sorted list values of the resource's value for this attribute, and compare against user - # provided values. - for index, resources_value_for_attr_part in enumerate(sorted_resources_value_for_attr): - check_if_user_value_matches_resources_attr( - attribute_name, - resources_value_for_attr_part, - sorted_user_provided_value_for_attr[index], - exclude_attributes, - default_attribute_values, - res, - ) - - elif isinstance(resources_value_for_attr, dict): - # Perform a deep equivalence check for dict typed attributes - - if not resources_value_for_attr and user_provided_value_for_attr: - res[0] = False - for key in resources_value_for_attr: - if user_provided_value_for_attr is not None and user_provided_value_for_attr: - check_if_user_value_matches_resources_attr( - key, - resources_value_for_attr.get(key), - user_provided_value_for_attr.get(key), - exclude_attributes, - default_attribute_values, - res, - ) - else: - if exclude_attributes.get(key) is None: - if default_attribute_values.get(key) is not None: - user_provided_value_for_attr = default_attribute_values.get(key) - check_if_user_value_matches_resources_attr( - key, - resources_value_for_attr.get(key), - user_provided_value_for_attr, - exclude_attributes, - default_attribute_values, - res, - ) - else: - res[0] = is_attr_assigned_default( - default_attribute_values, - attribute_name, - resources_value_for_attr.get(key), - ) - - elif resources_value_for_attr != user_provided_value_for_attr: - if exclude_attributes.get(attribute_name) is None and default_attribute_values.get(attribute_name) is not None: - # As the user has not specified a value for an optional attribute, if the existing resource's - # current state has a DEFAULT value for that attribute, we must not consider this incongruence - # an issue and continue with other checks. If the existing resource's value for the attribute - # is not the default value, then the existing resource is not a match. - if not is_attr_assigned_default(default_attribute_values, attribute_name, resources_value_for_attr): - res[0] = False - elif user_provided_value_for_attr is not None: - res[0] = False - - -def are_dicts_equal( - option_name, - existing_resource_dict, - user_provided_dict, - exclude_list, - default_attribute_values, -): - if not user_provided_dict: - # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around - # this optional attribute. Check if existing_resource_dict matches default. - # For example, source_details attribute in volume is optional and does not have any defaults. - return is_attr_assigned_default(default_attribute_values, option_name, existing_resource_dict) - - # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal - if not existing_resource_dict and user_provided_dict: - return False - - # check if all keys of an existing resource's dict attribute matches user-provided dict's entries - for sub_attr in existing_resource_dict: - # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource. - if sub_attr in user_provided_dict: - if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]: - _debug( - f"Failed to match: Existing resource's attr {option_name} sub-attr {sub_attr} value is {existing_resource_dict[sub_attr]}, while user " - f"provided value is {user_provided_dict.get(sub_attr, None)}" - ) - return False - - # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value. - else: - if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list): - default_value_for_dict_attr = default_attribute_values.get(option_name, None) - if default_value_for_dict_attr: - # if a default value for the sub-attr was provided by the module author, fail if the existing - # resource's value for the sub-attr is not the default - if not is_attr_assigned_default( - default_value_for_dict_attr, - sub_attr, - existing_resource_dict[sub_attr], - ): - return False - else: - # No default value specified by module author for sub_attr - _debug( - f"Consider as match: Existing resource's attr {option_name} sub-attr {sub_attr} value is" - f" {existing_resource_dict[sub_attr]}, while user did" - "not provide a value for it. The module author also has not provided a default value for it" - "or marked it for exclusion. So ignoring this attribute during matching and continuing with" - "other checks" - ) - - return True - - -def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list): - """An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the - key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map - option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']}""" - for exclude_item in exclude_list: - if isinstance(exclude_item, dict): - if map_option_name in exclude_item: - if option_key in exclude_item[map_option_name]: - return True - return False - - -def create_and_wait( - resource_type, - client, - create_fn, - kwargs_create, - get_fn, - get_param, - module: AnsibleModule, - states=None, - wait_applicable=True, - kwargs_get=None, -): - """ - A utility function to create a resource and wait for the resource to get into the state as specified in the module - options. - :param wait_applicable: Specifies if wait for create is applicable for this resource - :param resource_type: Type of the resource to be created. e.g. "vcn" - :param client: OCI service client instance to call the service periodically to retrieve data. - e.g. VirtualNetworkClient() - :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn - :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn. - :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn - :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" - :param module: Instance of AnsibleModule. - :param states: List of lifecycle states to watch for while waiting after create_fn is called. - e.g. [module.params['wait_until'], "FAULTY"] - :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - try: - return create_or_update_resource_and_wait( - resource_type, - create_fn, - kwargs_create, - module, - wait_applicable, - get_fn, - get_param, - states, - client, - kwargs_get, - ) - except MaximumWaitTimeExceeded as ex: - module.fail_json(msg=str(ex)) - except ServiceError as ex: - module.fail_json(msg=ex.message) - - -def update_and_wait( - resource_type, - client, - update_fn, - kwargs_update, - get_fn, - get_param, - module: AnsibleModule, - states=None, - wait_applicable=True, - kwargs_get=None, -): - """ - A utility function to update a resource and wait for the resource to get into the state as specified in the module - options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar. - :param wait_applicable: Specifies if wait for create is applicable for this resource - :param resource_type: Type of the resource to be created. e.g. "vcn" - :param client: OCI service client instance to call the service periodically to retrieve data. - e.g. VirtualNetworkClient() - :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn - :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn. - :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn - :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" - :param module: Instance of AnsibleModule. - :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. - :param states: List of lifecycle states to watch for while waiting after update_fn is called. - e.g. [module.params['wait_until'], "FAULTY"] - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - try: - return create_or_update_resource_and_wait( - resource_type, - update_fn, - kwargs_update, - module, - wait_applicable, - get_fn, - get_param, - states, - client, - kwargs_get=kwargs_get, - ) - except MaximumWaitTimeExceeded as ex: - module.fail_json(msg=str(ex)) - except ServiceError as ex: - module.fail_json(msg=ex.message) - - -def create_or_update_resource_and_wait( - resource_type, - function, - kwargs_function, - module: AnsibleModule, - wait_applicable, - get_fn, - get_param, - states, - client, - update_target_resource_id_in_get_param=False, - kwargs_get=None, -): - """ - A utility function to create or update a resource and wait for the resource to get into the state as specified in - the module options. - :param resource_type: Type of the resource to be created. e.g. "vcn" - :param function: Function in the SDK to create or update the resource. - :param kwargs_function: Dictionary containing arguments to be used to call the create or update function - :param module: Instance of AnsibleModule. - :param wait_applicable: Specifies if wait for create is applicable for this resource - :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn - :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" - :param states: List of lifecycle states to watch for while waiting after create_fn is called. - e.g. [module.params['wait_until'], "FAULTY"] - :param client: OCI service client instance to call the service periodically to retrieve data. - e.g. VirtualNetworkClient() - :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - result = create_resource(resource_type, function, kwargs_function, module) - resource = result[resource_type] - result[resource_type] = wait_for_resource_lifecycle_state( - client, - module, - wait_applicable, - kwargs_get, - get_fn, - get_param, - resource, - states, - resource_type, - ) - return result - - -def wait_for_resource_lifecycle_state( - client, - module: AnsibleModule, - wait_applicable, - kwargs_get, - get_fn, - get_param, - resource, - states, - resource_type=None, -): - """ - A utility function to wait for the resource to get into the state as specified in - the module options. - :param client: OCI service client instance to call the service periodically to retrieve data. - e.g. VirtualNetworkClient - :param module: Instance of AnsibleModule. - :param wait_applicable: Specifies if wait for create is applicable for this resource - :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments. - :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn - :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id" - :param resource_type: Type of the resource to be created. e.g. "vcn" - :param states: List of lifecycle states to watch for while waiting after create_fn is called. - e.g. [module.params['wait_until'], "FAULTY"] - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - if wait_applicable and module.params.get("wait", None): - if resource_type == "compartment": - # An immediate attempt to retrieve a compartment after a compartment is created fails with - # 'Authorization failed or requested resource not found', 'status': 404}. - # This is because it takes few seconds for the permissions on a compartment to be ready. - # Wait for few seconds before attempting a get call on compartment. - _debug("Pausing execution for permission on the newly created compartment to be ready.") - time.sleep(15) - if kwargs_get: - _debug(f"Waiting for resource to reach READY state. get_args: {kwargs_get}") - response_get = call_with_backoff(get_fn, **kwargs_get) - else: - _debug(f"Waiting for resource with id {resource['id']} to reach READY state.") - response_get = call_with_backoff(get_fn, **{get_param: resource["id"]}) - if states is None: - states = module.params.get("wait_until") or DEFAULT_READY_STATES - resource = to_dict( - oci.wait_until( - client, - response_get, - evaluate_response=lambda r: r.data.lifecycle_state in states, - max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), - ).data - ) - return resource - - -def wait_on_work_request(client, response, module: AnsibleModule): - try: - if module.params.get("wait", None): - _debug(f"Waiting for work request with id {response.data.id} to reach SUCCEEDED state.") - wait_response = oci.wait_until( - client, - response, - evaluate_response=lambda r: r.data.status == "SUCCEEDED", - max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), - ) - else: - _debug(f"Waiting for work request with id {response.data.id} to reach ACCEPTED state.") - wait_response = oci.wait_until( - client, - response, - evaluate_response=lambda r: r.data.status == "ACCEPTED", - max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), - ) - except MaximumWaitTimeExceeded as ex: - _debug(str(ex)) - module.fail_json(msg=str(ex)) - except ServiceError as ex: - _debug(str(ex)) - module.fail_json(msg=str(ex)) - return wait_response.data - - -def delete_and_wait( - resource_type, - client, - get_fn, - kwargs_get, - delete_fn, - kwargs_delete, - module: AnsibleModule, - states=None, - wait_applicable=True, - process_work_request=False, -) -> dict[str, t.Any]: - """A utility function to delete a resource and wait for the resource to get into the state as specified in the - module options. - :param wait_applicable: Specifies if wait for delete is applicable for this resource - :param resource_type: Type of the resource to be deleted. e.g. "vcn" - :param client: OCI service client instance to call the service periodically to retrieve data. - e.g. VirtualNetworkClient() - :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn - :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]} - :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn - :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]} - :param module: Instance of AnsibleModule. - :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed, - defaults to ["TERMINATED", "DETACHED", "DELETED"]. - :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled. - :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} - """ - - states_set = {"DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"} - result: dict[str, t.Any] = dict(changed=False) - result[resource_type] = dict() - try: - resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data) - if resource: - if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set: - response = call_with_backoff(delete_fn, **kwargs_delete) - if process_work_request: - wr_id = response.headers.get("opc-work-request-id") - get_wr_response = call_with_backoff(client.get_work_request, work_request_id=wr_id) - result["work_request"] = to_dict(wait_on_work_request(client, get_wr_response, module)) - # Set changed to True as work request has been created to delete the resource. - result["changed"] = True - resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data) - else: - _debug(f"Deleted {resource_type}, {resource}") - result["changed"] = True - - if wait_applicable and module.params.get("wait", None): - if states is None: - states = module.params.get("wait_until") or DEFAULT_TERMINATED_STATES - try: - wait_response = oci.wait_until( - client, - get_fn(**kwargs_get), - evaluate_response=lambda r: r.data.lifecycle_state in states, - max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS), - succeed_on_not_found=True, - ) - except MaximumWaitTimeExceeded as ex: - module.fail_json(msg=str(ex)) - except ServiceError as ex: - if ex.status != HTTPStatus.NOT_FOUND: - module.fail_json(msg=ex.message) - else: - # While waiting for resource to get into terminated state, if the resource is not found. - _debug( - "API returned Status:404(Not Found) while waiting for resource to get into" - " terminated state." - ) - resource["lifecycle_state"] = "DELETED" - result[resource_type] = resource - return result - # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found. - if not isinstance(wait_response, Sentinel): - resource = to_dict(wait_response.data) - else: - resource["lifecycle_state"] = "DELETED" - - result[resource_type] = resource - else: - _debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False") - except ServiceError as ex: - # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone - # resource is not available, instead of the expected 404. So working around this for now. - if isinstance(client, oci.dns.DnsClient): - if ex.status == HTTPStatus.BAD_REQUEST and ex.code == "InvalidParameter": - _debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False") - elif ex.status != HTTPStatus.NOT_FOUND: - module.fail_json(msg=ex.message) - result[resource_type] = dict() - return result - - -def are_attrs_equal(current_resource, module: AnsibleModule, attributes): - """ - Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI - Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a - resource needs to be updated. - :param current_resource: A resource model instance - :param module: The AnsibleModule representing the options provided by the user - :param attributes: A list of attributes that would need to be compared in the model and the module instances. - :return: True if the values for the list of attributes is the same in the model and module instances - """ - for attr in attributes: - curr_value = getattr(current_resource, attr, None) - user_provided_value = _get_user_provided_value(module, attribute_name=attr) - - if user_provided_value is not None: - if curr_value != user_provided_value: - _debug( - "are_attrs_equal - current resource's attribute " - + attr - + " value is " - + str(curr_value) - + " and this doesn't match user provided value of " - + str(user_provided_value) - ) - return False - return True - - -def _get_user_provided_value(module: AnsibleModule, attribute_name): - """ - Returns the user provided value for "attribute_name". We consider aliases in the module. - """ - user_provided_value = module.params.get(attribute_name, None) - if user_provided_value is None: - # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using - # option X, then user provided value for attribute_name is equal to value for X. - # Get option name for attribute_name from module.aliases. - # module.aliases is a dictionary with key as alias name and its value as option name. - option_alias_for_attribute = module.aliases.get(attribute_name, None) - if option_alias_for_attribute is not None: - user_provided_value = module.params.get(option_alias_for_attribute, None) - return user_provided_value - - -def update_model_with_user_options(curr_model, update_model, module: AnsibleModule): - """ - Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different - from the values in the 'curr_model'. - :param curr_model: A resource model instance representing the state of the current resource - :param update_model: An instance of the update resource model for the current resource's type - :param module: An AnsibleModule representing the options provided by the user - :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource - state to satisfy the user's requested state. - """ - attributes = update_model.attribute_map.keys() - for attr in attributes: - curr_value_for_attr = getattr(curr_model, attr, None) - user_provided_value = _get_user_provided_value(module, attribute_name=attr) - - if curr_value_for_attr != user_provided_value: - if user_provided_value is not None: - # Only update if a user has specified a value for an option - _debug( - f"User requested {user_provided_value} for attribute {attr}, whereas the current value is {curr_value_for_attr}. So adding it " - "to the update model" - ) - setattr(update_model, attr, user_provided_value) - else: - # Always set current values of the resource in the update model if there is no request for change in - # values - setattr(update_model, attr, curr_value_for_attr) - return update_model - - -def _get_retry_strategy(): - retry_strategy_builder = RetryStrategyBuilder( - max_attempts_check=True, - max_attempts=10, - retry_max_wait_between_calls_seconds=30, - retry_base_sleep_time_seconds=3, - backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE, - ) - retry_strategy_builder.add_service_error_check( - service_error_retry_config={ - 429: [], - 400: ["QuotaExceeded", "LimitExceeded"], - 409: ["Conflict"], - }, - service_error_retry_on_any_5xx=True, - ) - return retry_strategy_builder.get_retry_strategy() - - -def call_with_backoff(fn, **kwargs): - if "retry_strategy" not in kwargs: - kwargs["retry_strategy"] = _get_retry_strategy() - try: - return fn(**kwargs) - except TypeError as te: - if "unexpected keyword argument" in str(te): - # to handle older SDKs that did not support retry_strategy - del kwargs["retry_strategy"] - return fn(**kwargs) - else: - # A validation error raised by the SDK, throw it back - raise - - -def generic_hash(obj) -> int: - """ - Compute a hash of all the fields in the object - :param obj: Object whose hash needs to be computed - :return: a hash value for the object - """ - sum = 0 - for field in obj.attribute_map.keys(): - field_value = getattr(obj, field) - if isinstance(field_value, list): - for value in field_value: - sum = sum + hash(value) - elif isinstance(field_value, dict): - for k, v in field_value.items(): - sum = sum + hash(hash(k) + hash(":") + hash(v)) - else: - sum = sum + hash(getattr(obj, field)) - return sum - - -def generic_eq(s, other) -> bool: - if other is None: - return False - return s.__dict__ == other.__dict__ - - -def generate_subclass(parent_class): - """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within - the parent class""" - dict_of_method_in_subclass = { - "__init__": parent_class.__init__, - "__hash__": generic_hash, - "__eq__": generic_eq, - } - subclass_name = "GeneratedSub" + parent_class.__name__ - generated_sub_class = type(subclass_name, (parent_class,), dict_of_method_in_subclass) - return generated_sub_class - - -def create_hashed_instance(class_type): - hashed_class = generate_subclass(class_type) - return hashed_class() - - -def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None): - if object_with_values is None: - return None - hashed_class_instances = [] - for object_with_value in object_with_values: - hashed_class_instances.append(get_hashed_object(class_type, object_with_value, attributes_class_type)) - return hashed_class_instances - - -def get_hashed_object(class_type, object_with_value, attributes_class_type=None, supported_attributes=None): - """ - Convert any class instance into hashable so that the - instances are eligible for various comparison - operation available under set() object. - :param class_type: Any class type whose instances needs to be hashable - :param object_with_value: Instance of the class type with values which - would be set in the resulting isinstance - :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance - :param supported_attributes: A list of attributes which should be considered while populating the instance - with the values in the object. This helps in avoiding new attributes of the class_type which are still not - supported by the current implementation. - :return: A hashable instance with same state of the provided object_with_value - """ - if object_with_value is None: - return None - - HashedClass = generate_subclass(class_type) - hashed_class_instance = HashedClass() - - if supported_attributes: - class_attributes = list(set(hashed_class_instance.attribute_map) & set(supported_attributes)) - else: - class_attributes = hashed_class_instance.attribute_map - - for attribute in class_attributes: - attribute_value = getattr(object_with_value, attribute) - if attributes_class_type: - for attribute_class_type in attributes_class_type: - if isinstance(attribute_value, attribute_class_type): - attribute_value = get_hashed_object(attribute_class_type, attribute_value) - hashed_class_instance.__setattr__(attribute, attribute_value) - - return hashed_class_instance - - -def update_class_type_attr_difference(update_class_details, existing_instance, attr_name, attr_class, input_attr_value): - """ - Checks the difference and updates an attribute which is represented by a class - instance. Not applicable if the attribute type is a primitive value. - For example, if a class name is A with an attribute x, then if A.x = X(), then only - this method works. - :param update_class_details The instance which should be updated if there is change in - attribute value - :param existing_instance The instance whose attribute value is compared with input - attribute value - :param attr_name Name of the attribute whose value should be compared - :param attr_class Class type of the attribute - :param input_attr_value The value of input attribute which should replaced the current - value in case of mismatch - :return: A boolean value indicating whether attribute value has been replaced - """ - changed = False - # Here existing attribute values is an instance - existing_attr_value = get_hashed_object(attr_class, getattr(existing_instance, attr_name)) - if input_attr_value is None: - update_class_details.__setattr__(attr_name, existing_attr_value) - else: - changed = not input_attr_value.__eq__(existing_attr_value) - if changed: - update_class_details.__setattr__(attr_name, input_attr_value) - else: - update_class_details.__setattr__(attr_name, existing_attr_value) - - return changed - - -def get_existing_resource(target_fn, module: AnsibleModule, **kwargs): - """ - Returns the requested resource if it exists based on the input arguments. - :param target_fn The function which should be used to find the requested resource - :param module Instance of AnsibleModule attribute value - :param kwargs A map of arguments consisting of values based on which requested resource should be searched - :return: Instance of requested resource - """ - existing_resource = None - try: - response = call_with_backoff(target_fn, **kwargs) - existing_resource = response.data - except ServiceError as ex: - if ex.status != HTTPStatus.NOT_FOUND: - module.fail_json(msg=ex.message) - - return existing_resource - - -def get_attached_instance_info( - module: AnsibleModule, lookup_attached_instance, list_attachments_fn, list_attachments_args -): - config = get_oci_config(module) - identity_client = create_service_client(module, IdentityClient) - - volume_attachments = [] - - if lookup_attached_instance: - # Get all the compartments in the tenancy - compartments = to_dict( - identity_client.list_compartments(config.get("tenancy"), compartment_id_in_subtree=True).data - ) - # For each compartment, get the volume attachments for the compartment_id with the other args in - # list_attachments_args. - for compartment in compartments: - list_attachments_args["compartment_id"] = compartment["id"] - try: - volume_attachments += list_all_resources(list_attachments_fn, **list_attachments_args) - - # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment - except ServiceError as ex: - if ex.status == HTTPStatus.NOT_FOUND: - pass - - else: - volume_attachments = list_all_resources(list_attachments_fn, **list_attachments_args) - - volume_attachments = to_dict(volume_attachments) - # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or - # ATTACHED state - - return next( - ( - volume_attachment - for volume_attachment in volume_attachments - if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"] - ), - None, - ) - - -def check_mode(fn): - def wrapper(*args, **kwargs): - if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None): - return fn(*args, **kwargs) - return None - - return wrapper - - -def check_and_return_component_list_difference( - input_component_list, existing_components, purge_components, delete_components=False -): - if input_component_list: - existing_components, changed = get_component_list_difference( - input_component_list, - existing_components, - purge_components, - delete_components, - ) - else: - existing_components = [] - changed = True - return existing_components, changed - - -def get_component_list_difference(input_component_list, existing_components, purge_components, delete_components=False): - if delete_components: - if existing_components is None: - return None, False - component_differences = set(existing_components).intersection(set(input_component_list)) - if component_differences: - return list(set(existing_components) - component_differences), True - else: - return None, False - if existing_components is None: - return input_component_list, True - if purge_components: - components_differences = set(input_component_list).symmetric_difference(set(existing_components)) - - if components_differences: - return input_component_list, True - - components_differences = set(input_component_list).difference(set(existing_components)) - if components_differences: - return list(components_differences) + existing_components, True - return None, False - - -def write_to_file(path: str | bytes, content: bytes) -> None: - with open(to_bytes(path), "wb") as dest_file: - dest_file.write(content) - - -def get_target_resource_from_list(module: AnsibleModule, list_resource_fn, target_resource_id=None, **kwargs): - """ - Returns a resource filtered by identifier from a list of resources. This method should be - used as an alternative of 'get resource' method when 'get resource' is nor provided by - resource api. This method returns a wrapper of response object but that should not be - used as an input to 'wait_until' utility as this is only a partial wrapper of response object. - :param module The AnsibleModule representing the options provided by the user - :param list_resource_fn The function which lists all the resources - :param target_resource_id The identifier of the resource which should be filtered from the list - :param kwargs A map of arguments consisting of values based on which requested resource should be searched - :return: A custom wrapper which partially wraps a response object where the data field contains the target - resource, if found. - """ - - class ResponseWrapper: - def __init__(self, data): - self.data = data - - try: - resources = list_all_resources(list_resource_fn, **kwargs) - if resources is not None: - for resource in resources: - if resource.id == target_resource_id: - # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish - # object - return ResponseWrapper(data=resource) - return ResponseWrapper(data=None) - except ServiceError as ex: - module.fail_json(msg=ex.message) diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 5c8fad89e1..70515cd55b 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -104,22 +104,3 @@ def make_process_dict(include_injected, include_deps=False): return results, raw_data return process_dict - - -def make_process_list(mod_helper, **kwargs): - # - # ATTENTION! - # - # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0 - # - process_dict = make_process_dict(mod_helper, **kwargs) - - def process_list(rc, out, err): - res_dict, raw_data = process_dict(rc, out, err) - - if kwargs.get("include_raw"): - mod_helper.vars.raw_output = raw_data - - return [entry for name, entry in res_dict.items() if name == kwargs.get("name")] - - return process_list diff --git a/plugins/module_utils/saslprep.py b/plugins/module_utils/saslprep.py deleted file mode 100644 index 475e2bdbbe..0000000000 --- a/plugins/module_utils/saslprep.py +++ /dev/null @@ -1,171 +0,0 @@ -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. - -# Copyright (c) 2020, Andrew Klychkov (@Andersson007) -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -# This module utils is deprecated and will be removed in community.general 13.0.0 - -from __future__ import annotations - -from stringprep import ( - in_table_a1, - in_table_b1, - in_table_c3, - in_table_c4, - in_table_c5, - in_table_c6, - in_table_c7, - in_table_c8, - in_table_c9, - in_table_c12, - in_table_c21_c22, - in_table_d1, - in_table_d2, -) -from unicodedata import normalize - - -def is_unicode_str(string): - return True if isinstance(string, str) else False - - -def mapping_profile(string): - """RFC4013 Mapping profile implementation.""" - # Regarding RFC4013, - # This profile specifies: - # - non-ASCII space characters [StringPrep, C.1.2] that can be - # mapped to SPACE (U+0020), and - # - the "commonly mapped to nothing" characters [StringPrep, B.1] - # that can be mapped to nothing. - - tmp = [] - for c in string: - # If not the "commonly mapped to nothing" - if not in_table_b1(c): - if in_table_c12(c): - # map non-ASCII space characters - # (that can be mapped) to Unicode space - tmp.append(" ") - else: - tmp.append(c) - - return "".join(tmp) - - -def is_ral_string(string): - """RFC3454 Check bidirectional category of the string""" - # Regarding RFC3454, - # Table D.1 lists the characters that belong - # to Unicode bidirectional categories "R" and "AL". - # If a string contains any RandALCat character, a RandALCat - # character MUST be the first character of the string, and a - # RandALCat character MUST be the last character of the string. - if in_table_d1(string[0]): - if not in_table_d1(string[-1]): - raise ValueError("RFC3454: incorrect bidirectional RandALCat string.") - return True - return False - - -def prohibited_output_profile(string): - """RFC4013 Prohibited output profile implementation.""" - # Implements: - # RFC4013, 2.3. Prohibited Output. - # This profile specifies the following characters as prohibited input: - # - Non-ASCII space characters [StringPrep, C.1.2] - # - ASCII control characters [StringPrep, C.2.1] - # - Non-ASCII control characters [StringPrep, C.2.2] - # - Private Use characters [StringPrep, C.3] - # - Non-character code points [StringPrep, C.4] - # - Surrogate code points [StringPrep, C.5] - # - Inappropriate for plain text characters [StringPrep, C.6] - # - Inappropriate for canonical representation characters [StringPrep, C.7] - # - Change display properties or deprecated characters [StringPrep, C.8] - # - Tagging characters [StringPrep, C.9] - # RFC4013, 2.4. Bidirectional Characters. - # RFC4013, 2.5. Unassigned Code Points. - - # Determine how to handle bidirectional characters (RFC3454): - if is_ral_string(string): - # If a string contains any RandALCat characters, - # The string MUST NOT contain any LCat character: - is_prohibited_bidi_ch = in_table_d2 - bidi_table = "D.2" - else: - # Forbid RandALCat characters in LCat string: - is_prohibited_bidi_ch = in_table_d1 - bidi_table = "D.1" - - RFC = "RFC4013" - for c in string: - # RFC4013 2.3. Prohibited Output: - if in_table_c12(c): - raise ValueError(f"{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).") - if in_table_c21_c22(c): - raise ValueError(f"{RFC}: prohibited control characters (C.2.1).") - if in_table_c3(c): - raise ValueError(f"{RFC}: prohibited private Use characters (C.3).") - if in_table_c4(c): - raise ValueError(f"{RFC}: prohibited non-character code points (C.4).") - if in_table_c5(c): - raise ValueError(f"{RFC}: prohibited surrogate code points (C.5).") - if in_table_c6(c): - raise ValueError(f"{RFC}: prohibited inappropriate for plain text characters (C.6).") - if in_table_c7(c): - raise ValueError(f"{RFC}: prohibited inappropriate for canonical representation characters (C.7).") - if in_table_c8(c): - raise ValueError(f"{RFC}: prohibited change display properties / deprecated characters (C.8).") - if in_table_c9(c): - raise ValueError(f"{RFC}: prohibited tagging characters (C.9).") - - # RFC4013, 2.4. Bidirectional Characters: - if is_prohibited_bidi_ch(c): - raise ValueError(f"{RFC}: prohibited bidi characters ({bidi_table}).") - - # RFC4013, 2.5. Unassigned Code Points: - if in_table_a1(c): - raise ValueError(f"{RFC}: prohibited unassigned code points (A.1).") - - -def saslprep(string): - """RFC4013 implementation. - Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454) - to prepare Unicode strings representing user names and passwords for comparison. - Regarding the RFC4013, the "SASLprep" profile is intended to be used by - Simple Authentication and Security Layer (SASL) mechanisms - (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols - exchanging simple user names and/or passwords. - - Args: - string (unicode string): Unicode string to validate and prepare. - - Returns: - Prepared unicode string. - """ - # RFC4013: "The algorithm assumes all strings are - # comprised of characters from the Unicode [Unicode] character set." - # Validate the string is a Unicode string - if not is_unicode_str(string): - raise TypeError(f"input must be of type str, not {type(string)}") - - # RFC4013: 2.1. Mapping. - string = mapping_profile(string) - - # RFC4013: 2.2. Normalization. - # "This profile specifies using Unicode normalization form KC." - string = normalize("NFKC", string) - if not string: - return "" - - # RFC4013: 2.3. Prohibited Output. - # RFC4013: 2.4. Bidirectional Characters. - # RFC4013: 2.5. Unassigned Code Points. - prohibited_output_profile(string) - - return string diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py deleted file mode 100644 index 6b92873208..0000000000 --- a/plugins/modules/atomic_container.py +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/python - -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: atomic_container -short_description: Manage the containers on the atomic host platform -description: - - Manage the containers on the atomic host platform. - - Allows to manage the lifecycle of a container on the atomic host platform. -deprecated: - removed_in: 13.0.0 - why: Project Atomic was sunset by the end of 2019. - alternative: There is none. -author: "Giuseppe Scrivano (@giuseppe)" -requirements: - - atomic -notes: - - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - backend: - description: - - Define the backend to use for the container. - required: true - choices: ["docker", "ostree"] - type: str - name: - description: - - Name of the container. - required: true - type: str - image: - description: - - The image to use to install the container. - required: true - type: str - rootfs: - description: - - Define the rootfs of the image. - type: str - state: - description: - - State of the container. - choices: ["absent", "latest", "present", "rollback"] - default: "latest" - type: str - mode: - description: - - Define if it is an user or a system container. - choices: ["user", "system"] - type: str - values: - description: - - Values for the installation of the container. - - This option is permitted only with mode 'user' or 'system'. - - The values specified here will be used at installation time as --set arguments for atomic install. - type: list - elements: str - default: [] -""" - -EXAMPLES = r""" -- name: Install the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: latest - mode: system - values: - - ETCD_NAME=etcd.server - -- name: Uninstall the etcd system container - community.general.atomic_container: - name: etcd - image: rhel/etcd - backend: ostree - state: absent - mode: system -""" - -RETURN = r""" -msg: - description: The command standard output. - returned: always - type: str - sample: 'Using default tag: latest ...' -""" - -# import module snippets -import traceback - -from ansible.module_utils.basic import AnsibleModule - - -def do_install(module, mode, rootfs, container, image, values_list, backend): - system_list = ["--system"] if mode == "system" else [] - user_list = ["--user"] if mode == "user" else [] - rootfs_list = [f"--rootfs={rootfs}"] if rootfs else [] - atomic_bin = module.get_bin_path("atomic") - args = ( - [atomic_bin, "install", f"--storage={backend}", f"--name={container}"] - + system_list - + user_list - + rootfs_list - + values_list - + [image] - ) - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_update(module, container, image, values_list): - atomic_bin = module.get_bin_path("atomic") - args = [atomic_bin, "containers", "update", f"--rebase={image}"] + values_list + [container] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=out, changed=changed) - - -def do_uninstall(module, name, backend): - atomic_bin = module.get_bin_path("atomic") - args = [atomic_bin, "uninstall", f"--storage={backend}", name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - module.exit_json(msg=out, changed=True) - - -def do_rollback(module, name): - atomic_bin = module.get_bin_path("atomic") - args = [atomic_bin, "containers", "rollback", name] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Rolling back" in out - module.exit_json(msg=out, changed=changed) - - -def core(module): - mode = module.params["mode"] - name = module.params["name"] - image = module.params["image"] - rootfs = module.params["rootfs"] - values = module.params["values"] - backend = module.params["backend"] - state = module.params["state"] - - atomic_bin = module.get_bin_path("atomic") - module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") - - values_list = [f"--set={x}" for x in values] if values else [] - - args = [ - atomic_bin, - "containers", - "list", - "--no-trunc", - "-n", - "--all", - "-f", - f"backend={backend}", - "-f", - f"container={name}", - ] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: - module.fail_json(rc=rc, msg=err) - return - present = name in out - - if state == "present" and present: - module.exit_json(msg=out, changed=False) - elif (state in ["latest", "present"]) and not present: - do_install(module, mode, rootfs, name, image, values_list, backend) - elif state == "latest": - do_update(module, name, image, values_list) - elif state == "absent": - if not present: - module.exit_json(msg="The container is not present", changed=False) - else: - do_uninstall(module, name, backend) - elif state == "rollback": - do_rollback(module, name) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - mode=dict(choices=["user", "system"]), - name=dict(required=True), - image=dict(required=True), - rootfs=dict(), - state=dict(default="latest", choices=["present", "absent", "latest", "rollback"]), - backend=dict(required=True, choices=["docker", "ostree"]), - values=dict(type="list", default=[], elements="str"), - ), - ) - - if module.params["values"] is not None and module.params["mode"] == "default": - module.fail_json(msg="values is supported only with user or system mode") - - # Verify that the platform supports atomic command - dummy = module.get_bin_path("atomic", required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg=f"Unanticipated error running atomic: {e}", exception=traceback.format_exc()) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py deleted file mode 100644 index f0e78ef379..0000000000 --- a/plugins/modules/atomic_host.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/python - -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: atomic_host -short_description: Manage the atomic host platform -description: - - Manage the atomic host platform. - - Rebooting of Atomic host platform should be done outside this module. -deprecated: - removed_in: 13.0.0 - why: Project Atomic was sunset by the end of 2019. - alternative: There is none. -author: - - Saravanan KR (@krsacme) -notes: - - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). - - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. -requirements: - - atomic -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - revision: - description: - - The version number of the atomic host to be deployed. - - Providing V(latest) will upgrade to the latest available version. - default: 'latest' - aliases: [version] - type: str -""" - -EXAMPLES = r""" -- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) - community.general.atomic_host: - revision: latest - -- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) - community.general.atomic_host: - revision: 23.130 -""" - -RETURN = r""" -msg: - description: The command standard output. - returned: always - type: str - sample: 'Already on latest' -""" -import os -import traceback - -from ansible.module_utils.basic import AnsibleModule - - -def core(module): - revision = module.params["revision"] - atomic_bin = module.get_bin_path("atomic", required=True) - - module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") - - if revision == "latest": - args = [atomic_bin, "host", "upgrade"] - else: - args = [atomic_bin, "host", "deploy", revision] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc == 77 and revision == "latest": - module.exit_json(msg="Already on latest", changed=False) - elif rc != 0: - module.fail_json(rc=rc, msg=err) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - revision=dict(type="str", default="latest", aliases=["version"]), - ), - ) - - # Verify that the platform is atomic host - if not os.path.exists("/run/ostree-booted"): - module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") - - try: - core(module) - except Exception as e: - module.fail_json(msg=f"{e}", exception=traceback.format_exc()) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py deleted file mode 100644 index cc05bb38aa..0000000000 --- a/plugins/modules/atomic_image.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python - -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: atomic_image -short_description: Manage the container images on the atomic host platform -description: - - Manage the container images on the atomic host platform. - - Allows to execute the commands specified by the RUN label in the container image when present. -deprecated: - removed_in: 13.0.0 - why: Project Atomic was sunset by the end of 2019. - alternative: There is none. -author: - - Saravanan KR (@krsacme) -notes: - - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. -requirements: - - atomic -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - backend: - description: - - Define the backend where the image is pulled. - choices: ['docker', 'ostree'] - type: str - name: - description: - - Name of the container image. - required: true - type: str - state: - description: - - The state of the container image. - - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, - if running. - choices: ['absent', 'latest', 'present'] - default: 'latest' - type: str - started: - description: - - Start or stop the container. - type: bool - default: true -""" - -EXAMPLES = r""" -- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) - community.general.atomic_image: - name: rhel7/rsyslog - state: latest - -- name: Pull busybox to the OSTree backend - community.general.atomic_image: - name: busybox - state: latest - backend: ostree -""" - -RETURN = r""" -msg: - description: The command standard output. - returned: always - type: str - sample: 'Using default tag: latest ...' -""" -import traceback - -from ansible.module_utils.basic import AnsibleModule - - -def do_upgrade(module, image): - atomic_bin = module.get_bin_path("atomic") - args = [atomic_bin, "update", "--force", image] - rc, out, err = module.run_command(args, check_rc=False) - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=err) - elif "Image is up to date" in out: - return False - - return True - - -def core(module): - image = module.params["name"] - state = module.params["state"] - started = module.params["started"] - backend = module.params["backend"] - is_upgraded = False - - module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C") - atomic_bin = module.get_bin_path("atomic") - out = {} - err = {} - rc = 0 - - if backend: - if state == "present" or state == "latest": - args = [atomic_bin, "pull", f"--storage={backend}", image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - out_run = "" - if started: - args = [atomic_bin, "run", f"--storage={backend}", image] - rc, out_run, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - - changed = "Extracting" in out or "Copying blob" in out - module.exit_json(msg=(out + out_run), changed=changed) - elif state == "absent": - args = [atomic_bin, "images", "delete", f"--storage={backend}", image] - rc, out, err = module.run_command(args, check_rc=False) - if rc < 0: - module.fail_json(rc=rc, msg=err) - else: - changed = "Unable to find" not in out - module.exit_json(msg=out, changed=changed) - return - - if state == "present" or state == "latest": - if state == "latest": - is_upgraded = do_upgrade(module, image) - - if started: - args = [atomic_bin, "run", image] - else: - args = [atomic_bin, "install", image] - elif state == "absent": - args = [atomic_bin, "uninstall", image] - - rc, out, err = module.run_command(args, check_rc=False) - - if rc < 0: - module.fail_json(rc=rc, msg=err) - elif rc == 1 and "already present" in err: - module.exit_json(restult=err, changed=is_upgraded) - elif started and "Container is running" in out: - module.exit_json(result=out, changed=is_upgraded) - else: - module.exit_json(msg=out, changed=True) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - backend=dict(type="str", choices=["docker", "ostree"]), - name=dict(type="str", required=True), - state=dict(type="str", default="latest", choices=["absent", "latest", "present"]), - started=dict(type="bool", default=True), - ), - ) - - # Verify that the platform supports atomic command - dummy = module.get_bin_path("atomic", required=True) - - try: - core(module) - except Exception as e: - module.fail_json(msg=f"{e}", exception=traceback.format_exc()) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py deleted file mode 100644 index 1f6430dcef..0000000000 --- a/plugins/modules/catapult.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2016, Jonathan Mainguy -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -# -# basis of code taken from the ansible twillio and nexmo modules - -from __future__ import annotations - -DOCUMENTATION = r""" -module: catapult -short_description: Send a sms / mms using the catapult bandwidth API -description: - - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API. -deprecated: - removed_in: 13.0.0 - why: >- - DNS fails to resolve the API endpoint used by the module since Oct 2024. - See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - src: - type: str - description: - - One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)). - required: true - dest: - type: list - elements: str - description: - - The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)). - required: true - msg: - type: str - description: - - The contents of the text message (must be 2048 characters or less). - required: true - media: - type: str - description: - - For MMS messages, a media URL to the location of the media to be sent with the message. - user_id: - type: str - description: - - User ID from API account page. - required: true - api_token: - type: str - description: - - API Token from API account page. - required: true - api_secret: - type: str - description: - - API Secret from API account page. - required: true - -author: "Jonathan Mainguy (@Jmainguy)" -notes: - - Will return changed even if the media URL is wrong. - - Will return changed if the destination number is invalid. -""" - -EXAMPLES = r""" -- name: Send a mms to multiple users - community.general.catapult: - src: "+15035555555" - dest: - - "+12525089000" - - "+12018994225" - media: "http://example.com/foobar.jpg" - msg: "Task is complete" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" - -- name: Send a sms to a single user - community.general.catapult: - src: "+15035555555" - dest: "+12018994225" - msg: "Consider yourself notified" - user_id: "{{ user_id }}" - api_token: "{{ api_token }}" - api_secret: "{{ api_secret }}" -""" - - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def send(module, src, dest, msg, media, user_id, api_token, api_secret): - """ - Send the message - """ - AGENT = "Ansible" - URI = f"https://api.catapult.inetwork.com/v1/users/{user_id}/messages" - data = {"from": src, "to": dest, "text": msg} - if media: - data["media"] = media - - headers = {"User-Agent": AGENT, "Content-type": "application/json"} - - # Hack module params to have the Basic auth params that fetch_url expects - module.params["url_username"] = api_token.replace("\n", "") - module.params["url_password"] = api_secret.replace("\n", "") - - return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - src=dict(required=True), - dest=dict(required=True, type="list", elements="str"), - msg=dict(required=True), - user_id=dict(required=True), - api_token=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - media=dict(), - ), - ) - - src = module.params["src"] - dest = module.params["dest"] - msg = module.params["msg"] - media = module.params["media"] - user_id = module.params["user_id"] - api_token = module.params["api_token"] - api_secret = module.params["api_secret"] - - for number in dest: - rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) - if info["status"] != 201: - body = json.loads(info["body"]) - fail_msg = body["message"] - module.fail_json(msg=fail_msg) - - changed = True - module.exit_json(changed=changed) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 3f85fda3ab..5ff59d4c48 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -83,16 +83,12 @@ options: description: - Controls the module behavior. See notes below for more details. - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + V(compatibility) was removed from community.general 13.0.0. - 'O(mode=new): The O(name) parameter may refer to a module name, a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. This is the default mode from community.general 9.0.0 onwards.' - - 'O(mode=compatibility): This was the default mode before community.general 9.0.0. O(name) must be either a module - name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified), - then nothing happens. Otherwise, it is installed using the C(cpanm) executable. O(name) cannot be an URL, or a git - URL. C(cpanm) version specifiers do not work in this mode.' - - 'B(ATTENTION): V(compatibility) mode is deprecated and will be removed in community.general 13.0.0.' type: str - choices: [compatibility, new] + choices: [new] default: new version_added: 3.0.0 name_check: @@ -184,7 +180,7 @@ class CPANMinus(ModuleHelper): install_recommendations=dict(type="bool"), install_suggestions=dict(type="bool"), executable=dict(type="path"), - mode=dict(type="str", default="new", choices=["compatibility", "new"]), + mode=dict(type="str", default="new", choices=["new"]), name_check=dict(type="str"), ), required_one_of=[("name", "from_path")], @@ -204,17 +200,8 @@ class CPANMinus(ModuleHelper): def __init_module__(self): v = self.vars - if v.mode == "compatibility": - if v.name_check: - self.do_raise("Parameter name_check can only be used with mode=new") - self.deprecate( - "'mode=compatibility' is deprecated, use 'mode=new' instead", - version="13.0.0", - collection_name="community.general", - ) - else: - if v.name and v.from_path: - self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") + if v.name and v.from_path: + self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") self.command = v.executable if v.executable else self.command self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) @@ -260,22 +247,15 @@ class CPANMinus(ModuleHelper): def __run__(self): def process(rc, out, err): - if self.vars.mode == "compatibility" and rc != 0: - self.do_raise(msg=err, cmd=self.vars.cmd_args) return "is up to date" not in err and "is up to date" not in out v = self.vars pkg_param = "from_path" if v.from_path else "name" - if v.mode == "compatibility": - if self._is_package_installed(v.name, v.locallib, v.version): - return - pkg_spec = v[pkg_param] - else: - installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False - if installed: - return - pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) + installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False + if installed: + return + pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) with self.runner( [ diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py deleted file mode 100644 index 801dc170c2..0000000000 --- a/plugins/modules/dimensiondata_network.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2016 Dimension Data -# Authors: -# - Aimon Bustardo -# - Bert Diwa -# - Adam Friedman -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: dimensiondata_network -short_description: Create, update, and delete MCP 1.0 & 2.0 networks -extends_documentation_fragment: - - community.general.dimensiondata - - community.general.dimensiondata_wait - - community.general.attributes - -description: - - Create, update, and delete MCP 1.0 & 2.0 networks. -deprecated: - removed_in: 13.0.0 - why: Service and its endpoints are no longer available. - alternative: There is none. -author: 'Aimon Bustardo (@aimonb)' -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - description: - - The name of the network domain to create. - required: true - type: str - description: - description: - - Additional description of the network domain. - type: str - service_plan: - description: - - The service plan, either "ESSENTIALS" or "ADVANCED". - - MCP 2.0 Only. - choices: [ESSENTIALS, ADVANCED] - default: ESSENTIALS - type: str - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present - type: str -""" - -EXAMPLES = r""" -- name: Create an MCP 1.0 network - community.general.dimensiondata_network: - region: na - location: NA5 - name: mynet - -- name: Create an MCP 2.0 network - community.general.dimensiondata_network: - region: na - mcp_user: my_user - mcp_password: my_password - location: NA9 - name: mynet - service_plan: ADVANCED - -- name: Delete a network - community.general.dimensiondata_network: - region: na - location: NA1 - name: mynet - state: absent -""" - -RETURN = r""" -network: - description: Dictionary describing the network. - returned: On success when O(state=present). - type: complex - contains: - id: - description: Network ID. - type: str - sample: "8c787000-a000-4050-a215-280893411a7d" - name: - description: Network name. - type: str - sample: "My network" - description: - description: Network description. - type: str - sample: "My network description" - location: - description: Datacenter location. - type: str - sample: NA3 - status: - description: Network status. (MCP 2.0 only). - type: str - sample: NORMAL - private_net: - description: Private network subnet. (MCP 1.0 only). - type: str - sample: "10.2.3.0" - multicast: - description: Multicast enabled? (MCP 1.0 only). - type: bool - sample: false -""" -import traceback - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule - -if HAS_LIBCLOUD: - from libcloud.common.dimensiondata import DimensionDataAPIException - from libcloud.compute.base import NodeLocation - - -class DimensionDataNetworkModule(DimensionDataModule): - """ - The dimensiondata_network module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data network module. - """ - - super().__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(type="str", required=True), - description=dict(type="str"), - service_plan=dict(default="ESSENTIALS", choices=["ADVANCED", "ESSENTIALS"]), - state=dict(default="present", choices=["present", "absent"]), - ), - required_together=DimensionDataModule.required_together(), - ) - ) - - self.name = self.module.params["name"] - self.description = self.module.params["description"] - self.service_plan = self.module.params["service_plan"] - self.state = self.module.params["state"] - - def state_present(self): - network = self._get_network() - - if network: - self.module.exit_json(changed=False, msg="Network already exists", network=self._network_to_dict(network)) - - network = self._create_network() - - self.module.exit_json( - changed=True, - msg=f'Created network "{self.name}" in datacenter "{self.location}".', - network=self._network_to_dict(network), - ) - - def state_absent(self): - network = self._get_network() - - if not network: - self.module.exit_json( - changed=False, msg=f'Network "{self.name}" does not exist', network=self._network_to_dict(network) - ) - - self._delete_network(network) - - def _get_network(self): - if self.mcp_version == "1.0": - networks = self.driver.list_networks(location=self.location) - else: - networks = self.driver.ex_list_network_domains(location=self.location) - - matched_network = [network for network in networks if network.name == self.name] - if matched_network: - return matched_network[0] - - return None - - def _network_to_dict(self, network): - network_dict = dict(id=network.id, name=network.name, description=network.description) - - if isinstance(network.location, NodeLocation): - network_dict["location"] = network.location.id - else: - network_dict["location"] = network.location - - if self.mcp_version == "1.0": - network_dict["private_net"] = network.private_net - network_dict["multicast"] = network.multicast - network_dict["status"] = None - else: - network_dict["private_net"] = None - network_dict["multicast"] = None - network_dict["status"] = network.status - - return network_dict - - def _create_network(self): - # Make sure service_plan argument is defined - if self.mcp_version == "2.0" and "service_plan" not in self.module.params: - self.module.fail_json(msg="service_plan required when creating network and location is MCP 2.0") - - # Create network - try: - if self.mcp_version == "1.0": - network = self.driver.ex_create_network(self.location, self.name, description=self.description) - else: - network = self.driver.ex_create_network_domain( - self.location, self.name, self.module.params["service_plan"], description=self.description - ) - except DimensionDataAPIException as e: - self.module.fail_json(msg=f"Failed to create new network: {e}", exception=traceback.format_exc()) - - if self.module.params["wait"] is True: - network = self._wait_for_network_state(network.id, "NORMAL") - - return network - - def _delete_network(self, network): - try: - if self.mcp_version == "1.0": - deleted = self.driver.ex_delete_network(network) - else: - deleted = self.driver.ex_delete_network_domain(network) - - if deleted: - self.module.exit_json(changed=True, msg=f"Deleted network with id {network.id}") - - self.module.fail_json(f"Unexpected failure deleting network with id {network.id}") - - except DimensionDataAPIException as e: - self.module.fail_json(msg=f"Failed to delete network: {e}", exception=traceback.format_exc()) - - def _wait_for_network_state(self, net_id, state_to_wait_for): - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, - self.driver.ex_get_network_domain, - self.module.params["wait_poll_interval"], - self.module.params["wait_time"], - net_id, - ) - except DimensionDataAPIException as e: - self.module.fail_json( - msg=f"Network did not reach {state_to_wait_for} state in time: {e}", - exception=traceback.format_exc(), - ) - - -def main(): - module = DimensionDataNetworkModule() - if module.state == "present": - module.state_present() - elif module.state == "absent": - module.state_absent() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py deleted file mode 100644 index 9f86a0e1e2..0000000000 --- a/plugins/modules/dimensiondata_vlan.py +++ /dev/null @@ -1,530 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2016 Dimension Data -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Authors: -# - Adam Friedman - -from __future__ import annotations - -DOCUMENTATION = r""" -module: dimensiondata_vlan -short_description: Manage a VLAN in a Cloud Control network domain -extends_documentation_fragment: - - community.general.dimensiondata - - community.general.dimensiondata_wait - - community.general.attributes - -description: - - Manage VLANs in Cloud Control network domains. -deprecated: - removed_in: 13.0.0 - why: Service and its endpoints are no longer available. - alternative: There is none. -author: 'Adam Friedman (@tintoy)' -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - name: - description: - - The name of the target VLAN. - type: str - required: true - description: - description: - - A description of the VLAN. - type: str - default: '' - network_domain: - description: - - The ID or name of the target network domain. - required: true - type: str - private_ipv4_base_address: - description: - - The base address for the VLAN's IPv4 network (for example V(192.168.1.0)). - type: str - default: '' - private_ipv4_prefix_size: - description: - - The size of the IPv4 address space, for example V(24). - - Required, if O(private_ipv4_base_address) is specified. - type: int - default: 0 - state: - description: - - The desired state for the target VLAN. - - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist). - choices: [present, absent, readonly] - default: present - type: str - allow_expand: - description: - - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently - possesses. - - If V(false), the module fails under these conditions. - - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). - type: bool - default: false -""" - -EXAMPLES = r""" -- name: Add or update VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - description: A test VLAN - private_ipv4_base_address: 192.168.23.0 - private_ipv4_prefix_size: 24 - state: present - wait: true - -- name: Read / get VLAN details - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan1 - state: readonly - wait: true - -- name: Delete a VLAN - community.general.dimensiondata_vlan: - region: na - location: NA5 - network_domain: test_network - name: my_vlan_1 - state: absent - wait: true -""" - -RETURN = r""" -vlan: - description: Dictionary describing the VLAN. - returned: On success when O(state=present) - type: complex - contains: - id: - description: VLAN ID. - type: str - sample: "aaaaa000-a000-4050-a215-2808934ccccc" - name: - description: VLAN name. - type: str - sample: "My VLAN" - description: - description: VLAN description. - type: str - sample: "My VLAN description" - location: - description: Datacenter location. - type: str - sample: NA3 - private_ipv4_base_address: - description: The base address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.0 - private_ipv4_prefix_size: - description: The prefix size for the VLAN's private IPV4 network. - type: int - sample: 24 - private_ipv4_gateway_address: - description: The gateway address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.1 - private_ipv6_base_address: - description: The base address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:0 - private_ipv6_prefix_size: - description: The prefix size for the VLAN's IPV6 network. - type: int - sample: 64 - private_ipv6_gateway_address: - description: The gateway address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:1 - status: - description: VLAN status. - type: str - sample: NORMAL -""" - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.dimensiondata import ( - DimensionDataModule, - UnknownNetworkError, -) - -try: - from libcloud.common.dimensiondata import DimensionDataAPIException, DimensionDataVlan - - HAS_LIBCLOUD = True - -except ImportError: - DimensionDataVlan = None - - HAS_LIBCLOUD = False - - -class DimensionDataVlanModule(DimensionDataModule): - """ - The dimensiondata_vlan module for Ansible. - """ - - def __init__(self): - """ - Create a new Dimension Data VLAN module. - """ - - super().__init__( - module=AnsibleModule( - argument_spec=DimensionDataModule.argument_spec_with_wait( - name=dict(required=True, type="str"), - description=dict(default="", type="str"), - network_domain=dict(required=True, type="str"), - private_ipv4_base_address=dict(default="", type="str"), - private_ipv4_prefix_size=dict(default=0, type="int"), - allow_expand=dict(default=False, type="bool"), - state=dict(default="present", choices=["present", "absent", "readonly"]), - ), - required_together=DimensionDataModule.required_together(), - ) - ) - - self.name = self.module.params["name"] - self.description = self.module.params["description"] - self.network_domain_selector = self.module.params["network_domain"] - self.private_ipv4_base_address = self.module.params["private_ipv4_base_address"] - self.private_ipv4_prefix_size = self.module.params["private_ipv4_prefix_size"] - self.state = self.module.params["state"] - self.allow_expand = self.module.params["allow_expand"] - - if self.wait and self.state != "present": - self.module.fail_json(msg='The wait parameter is only supported when state is "present".') - - def state_present(self): - """ - Ensure that the target VLAN is present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - if self.module.check_mode: - self.module.exit_json( - msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}" (should be present).', - changed=True, - ) - - vlan = self._create_vlan(network_domain) - self.module.exit_json( - msg=f'Created VLAN "{self.name}" in network domain "{self.network_domain_selector}".', - vlan=vlan_to_dict(vlan), - changed=True, - ) - else: - diff = VlanDiff(vlan, self.module.params) - if not diff.has_changes(): - self.module.exit_json( - msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (no changes detected).', - vlan=vlan_to_dict(vlan), - changed=False, - ) - - return - - try: - diff.ensure_legal_change() - except InvalidVlanChangeError as invalid_vlan_change: - self.module.fail_json( - msg=f'Unable to update VLAN "{self.name}" in network domain "{self.network_domain_selector}": {invalid_vlan_change}' - ) - - if diff.needs_expand() and not self.allow_expand: - self.module.fail_json( - msg=f"The configured private IPv4 network size ({self.private_ipv4_prefix_size}-bit prefix) for " - f"the VLAN differs from its current network size ({vlan.private_ipv4_range_size}-bit prefix) " - "and needs to be expanded. Use allow_expand=true if this is what you want." - ) - - if self.module.check_mode: - self.module.exit_json( - msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (changes detected).', - vlan=vlan_to_dict(vlan), - changed=True, - ) - - if diff.needs_edit(): - vlan.name = self.name - vlan.description = self.description - - self.driver.ex_update_vlan(vlan) - - if diff.needs_expand(): - vlan.private_ipv4_range_size = self.private_ipv4_prefix_size - self.driver.ex_expand_vlan(vlan) - - self.module.exit_json( - msg=f'Updated VLAN "{self.name}" in network domain "{self.network_domain_selector}".', - vlan=vlan_to_dict(vlan), - changed=True, - ) - - def state_readonly(self): - """ - Read the target VLAN's state. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if vlan: - self.module.exit_json(vlan=vlan_to_dict(vlan), changed=False) - else: - self.module.fail_json( - msg=f'VLAN "{self.name}" does not exist in network domain "{self.network_domain_selector}".' - ) - - def state_absent(self): - """ - Ensure that the target VLAN is not present. - """ - - network_domain = self._get_network_domain() - - vlan = self._get_vlan(network_domain) - if not vlan: - self.module.exit_json( - msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}".', changed=False - ) - - return - - if self.module.check_mode: - self.module.exit_json( - msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (should be absent).', - vlan=vlan_to_dict(vlan), - changed=True, - ) - - self._delete_vlan(vlan) - - self.module.exit_json( - msg=f'Deleted VLAN "{self.name}" from network domain "{self.network_domain_selector}".', changed=True - ) - - def _get_vlan(self, network_domain): - """ - Retrieve the target VLAN details from CloudControl. - - :param network_domain: The target network domain. - :return: The VLAN, or None if the target VLAN was not found. - :rtype: DimensionDataVlan - """ - - vlans = self.driver.ex_list_vlans(location=self.location, network_domain=network_domain) - matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] - if matching_vlans: - return matching_vlans[0] - - return None - - def _create_vlan(self, network_domain): - vlan = self.driver.ex_create_vlan( - network_domain, self.name, self.private_ipv4_base_address, self.description, self.private_ipv4_prefix_size - ) - - if self.wait: - vlan = self._wait_for_vlan_state(vlan.id, "NORMAL") - - return vlan - - def _delete_vlan(self, vlan): - try: - self.driver.ex_delete_vlan(vlan) - - # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). - if self.wait: - self._wait_for_vlan_state(vlan, "NOT_FOUND") - - except DimensionDataAPIException as api_exception: - self.module.fail_json( - msg=f'Failed to delete VLAN "{vlan.id}" due to unexpected error from the CloudControl API: {api_exception.msg}' - ) - - def _wait_for_vlan_state(self, vlan, state_to_wait_for): - network_domain = self._get_network_domain() - - wait_poll_interval = self.module.params["wait_poll_interval"] - wait_time = self.module.params["wait_time"] - - # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. - - try: - return self.driver.connection.wait_for_state( - state_to_wait_for, self.driver.ex_get_vlan, wait_poll_interval, wait_time, vlan - ) - - except DimensionDataAPIException as api_exception: - if api_exception.code != "RESOURCE_NOT_FOUND": - raise - - return DimensionDataVlan( - id=vlan.id, - status="NOT_FOUND", - name="", - description="", - private_ipv4_range_address="", - private_ipv4_range_size=0, - ipv4_gateway="", - ipv6_range_address="", - ipv6_range_size=0, - ipv6_gateway="", - location=self.location, - network_domain=network_domain, - ) - - def _get_network_domain(self): - """ - Retrieve the target network domain from the Cloud Control API. - - :return: The network domain. - """ - - try: - return self.get_network_domain(self.network_domain_selector, self.location) - except UnknownNetworkError: - self.module.fail_json( - msg=f'Cannot find network domain "{self.network_domain_selector}" in datacenter "{self.location}".' - ) - - return None - - -class InvalidVlanChangeError(Exception): - """ - Error raised when an illegal change to VLAN state is attempted. - """ - - pass - - -class VlanDiff: - """ - Represents differences between VLAN information (from CloudControl) and module parameters. - """ - - def __init__(self, vlan, module_params): - """ - - :param vlan: The VLAN information from CloudControl. - :type vlan: DimensionDataVlan - :param module_params: The module parameters. - :type module_params: dict - """ - - self.vlan = vlan - self.module_params = module_params - - self.name_changed = module_params["name"] != vlan.name - self.description_changed = module_params["description"] != vlan.description - self.private_ipv4_base_address_changed = ( - module_params["private_ipv4_base_address"] != vlan.private_ipv4_range_address - ) - self.private_ipv4_prefix_size_changed = ( - module_params["private_ipv4_prefix_size"] != vlan.private_ipv4_range_size - ) - - # Is configured prefix size greater than or less than the actual prefix size? - private_ipv4_prefix_size_difference = module_params["private_ipv4_prefix_size"] - vlan.private_ipv4_range_size - self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 - self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 - - def has_changes(self): - """ - Does the VlanDiff represent any changes between the VLAN and module configuration? - - :return: True, if there are change changes; otherwise, False. - """ - - return self.needs_edit() or self.needs_expand() - - def ensure_legal_change(self): - """ - Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. - - - private_ipv4_base_address cannot be changed - - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size - - :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. - """ - - # Cannot change base address for private IPv4 network. - if self.private_ipv4_base_address_changed: - raise InvalidVlanChangeError("Cannot change the private IPV4 base address for an existing VLAN.") - - # Cannot shrink private IPv4 network (by increasing prefix size). - if self.private_ipv4_prefix_size_increased: - raise InvalidVlanChangeError( - "Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported)." - ) - - def needs_edit(self): - """ - Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? - - :return: True, if an Edit operation is required; otherwise, False. - """ - - return self.name_changed or self.description_changed - - def needs_expand(self): - """ - Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? - - The VLAN's network is expanded by reducing the size of its network prefix. - - :return: True, if an Expand operation is required; otherwise, False. - """ - - return self.private_ipv4_prefix_size_decreased - - -def vlan_to_dict(vlan): - return { - "id": vlan.id, - "name": vlan.name, - "description": vlan.description, - "location": vlan.location.id, - "private_ipv4_base_address": vlan.private_ipv4_range_address, - "private_ipv4_prefix_size": vlan.private_ipv4_range_size, - "private_ipv4_gateway_address": vlan.ipv4_gateway, - "ipv6_base_address": vlan.ipv6_range_address, - "ipv6_prefix_size": vlan.ipv6_range_size, - "ipv6_gateway_address": vlan.ipv6_gateway, - "status": vlan.status, - } - - -def main(): - module = DimensionDataVlanModule() - - if module.state == "present": - module.state_present() - elif module.state == "readonly": - module.state_readonly() - elif module.state == "absent": - module.state_absent() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py index 196dbc0c0d..32367a80cc 100644 --- a/plugins/modules/github_repo.py +++ b/plugins/modules/github_repo.py @@ -75,8 +75,9 @@ options: force_defaults: description: - If V(true), overwrite current O(description) and O(private) attributes with defaults. - - V(true) is deprecated for this option and will not be allowed starting in community.general 13.0.0. V(false) will be the default value then. + - The default value changed from V(true) to V(false) in community.general 13.0.0. type: bool + default: false version_added: 4.1.0 requirements: - PyGithub>=1.54 @@ -239,7 +240,7 @@ def main(): private=dict(type="bool"), description=dict(type="str"), api_url=dict(type="str", default="https://api.github.com"), - force_defaults=dict(type="bool"), + force_defaults=dict(type="bool", default=False), ) module = AnsibleModule( argument_spec=module_args, @@ -249,14 +250,6 @@ def main(): mutually_exclusive=[("username", "access_token")], ) - if module.params["force_defaults"] is None: - module.deprecate( - "'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", - version="13.0.0", - collection_name="community.general", - ) - module.params["force_defaults"] = True - if not HAS_GITHUB_PACKAGE: module.fail_json(msg=missing_required_lib("PyGithub"), exception=GITHUB_IMP_ERR) diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py index 5b306eb440..d58f3fde7d 100644 --- a/plugins/modules/locale_gen.py +++ b/plugins/modules/locale_gen.py @@ -38,16 +38,11 @@ options: notes: - Currently the module is B(only supported for Debian, Ubuntu, and Arch Linux) systems. - This module requires the package C(locales) installed in Debian and Ubuntu systems. - - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/) - exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error. + - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else it raises an error. + Support for C(/var/lib/locales/supported.d/) (the V(ubuntu_legacy) mechanism) has been removed in community.general 13.0.0. - When using V(glibc) mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen). - - When using V(ubuntu_legacy) mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running - C(locale-gen). - Please note that the module asserts the availability of the locale by checking the files C(/usr/share/i18n/SUPPORTED) and C(/usr/local/share/i18n/SUPPORTED), but the C(/usr/local) one is not supported by Archlinux. - - Please note that the code path that uses V(ubuntu_legacy) mechanism has not been tested for a while, because recent versions of - Ubuntu is already using the V(glibc) mechanism. There is no support for V(ubuntu_legacy), given our inability to test it. - Therefore, that mechanism is B(deprecated) and will be removed in community.general 13.0.0. """ EXAMPLES = r""" @@ -70,7 +65,6 @@ mechanism: type: str choices: - glibc - - ubuntu_legacy returned: success sample: glibc version_added: 10.2.0 @@ -114,10 +108,6 @@ class LocaleGen(StateModuleHelper): def __init_module__(self): self.mechanisms = dict( - ubuntu_legacy=dict( - available=SUPPORTED_LOCALES, - apply_change=self.apply_change_ubuntu_legacy, - ), glibc=dict( available=SUPPORTED_LOCALES, apply_change=self.apply_change_glibc, @@ -127,18 +117,8 @@ class LocaleGen(StateModuleHelper): if os.path.exists(ETC_LOCALE_GEN): self.vars.ubuntu_mode = False self.vars.mechanism = "glibc" - elif os.path.exists(VAR_LIB_LOCALES): - self.vars.ubuntu_mode = True - self.vars.mechanism = "ubuntu_legacy" - self.module.deprecate( - "On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from" - " in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version," - " please create an issue in the community.general repository", - version="13.0.0", - collection_name="community.general", - ) else: - self.do_raise(f'{VAR_LIB_LOCALES} and {ETC_LOCALE_GEN} are missing. Is the package "locales" installed?') + self.do_raise(f'{ETC_LOCALE_GEN} is missing. Is the package "locales" installed?') self.runner = locale_runner(self.module) @@ -269,34 +249,6 @@ class LocaleGen(StateModuleHelper): with runner() as ctx: ctx.run() - def apply_change_ubuntu_legacy(self, target_state, names): - """Create or remove locale. - - Keyword arguments: - target_state -- Desired state, either present or absent. - names -- Name list including encoding such as de_CH.UTF-8. - """ - runner = locale_gen_runner(self.module) - - if target_state == "present": - # Create locale. - # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local - with runner() as ctx: - ctx.run() - else: - # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - with open(VAR_LIB_LOCALES_LOCAL) as fr: - content = fr.readlines() - with open(VAR_LIB_LOCALES_LOCAL, "w") as fw: - for line in content: - locale, charset = line.split(" ") - if locale not in names: - fw.write(line) - # Purge locales and regenerate. - # Please provide a patch if you know how to avoid regenerating the locales to keep! - with runner("purge") as ctx: - ctx.run() - @check_mode_skip def __state_fallback__(self): if self.vars.state_tracking == self.vars.state: diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py deleted file mode 100644 index 84b0fbaf08..0000000000 --- a/plugins/modules/oci_vcn.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017, 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oci_vcn -short_description: Manage Virtual Cloud Networks(VCN) in OCI -deprecated: - removed_in: 13.0.0 - why: Superseded by official Oracle collection. - alternative: Use module C(oci_network_vcn) from the C(oracle.oci) collection. -description: - - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud - Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases). -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - cidr_block: - description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present). - type: str - compartment_id: - description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This - option is mutually exclusive with O(vcn_id). - type: str - display_name: - description: A user-friendly name. Does not have to be unique, and it is changeable. - type: str - aliases: ['name'] - dns_label: - description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully - qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)). - Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric - string that begins with a letter. The value cannot be changed. - type: str - state: - description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN. - type: str - default: present - choices: ['present', 'absent'] - vcn_id: - description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present). - This option is mutually exclusive with O(compartment_id). - type: str - aliases: ['id'] -author: "Rohit Chaware (@rohitChaware)" -extends_documentation_fragment: - - community.general.oracle - - community.general.oracle_creatable_resource - - community.general.oracle_wait_options - - community.general.oracle_tags - - community.general.attributes -""" - -EXAMPLES = r""" -- name: Create a VCN - community.general.oci_vcn: - cidr_block: '10.0.0.0/16' - compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' - display_name: my_vcn - dns_label: ansiblevcn - -- name: Updates the specified VCN's display name - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - display_name: ansible_vcn - -- name: Delete the specified VCN - community.general.oci_vcn: - vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx - state: absent -""" - -RETURN = r""" -vcn: - description: Information about the VCN. - returned: On successful create and update operation - type: dict - sample: - { - "cidr_block": "10.0.0.0/16", - "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", - "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", - "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", - "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", - "display_name": "ansible_vcn", - "dns_label": "ansiblevcn", - "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", - "lifecycle_state": "AVAILABLE", - "time_created": "2017-11-13T20:22:40.626000+00:00", - "vcn_domain_name": "ansiblevcn.oraclevcn.com" - } -""" - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils - -try: - from oci.core.models import CreateVcnDetails, UpdateVcnDetails - from oci.core.virtual_network_client import VirtualNetworkClient - - HAS_OCI_PY_SDK = True -except ImportError: - HAS_OCI_PY_SDK = False - - -def delete_vcn(virtual_network_client, module): - result = oci_utils.delete_and_wait( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - delete_fn=virtual_network_client.delete_vcn, - kwargs_delete={"vcn_id": module.params["vcn_id"]}, - module=module, - ) - return result - - -def update_vcn(virtual_network_client, module): - result = oci_utils.check_and_update_resource( - resource_type="vcn", - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - kwargs_get={"vcn_id": module.params["vcn_id"]}, - update_fn=virtual_network_client.update_vcn, - primitive_params_update=["vcn_id"], - kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, - module=module, - update_attributes=list(UpdateVcnDetails().attribute_map.keys()), - ) - return result - - -def create_vcn(virtual_network_client, module): - create_vcn_details = CreateVcnDetails() - for attribute in create_vcn_details.attribute_map.keys(): - if attribute in module.params: - setattr(create_vcn_details, attribute, module.params[attribute]) - - result = oci_utils.create_and_wait( - resource_type="vcn", - create_fn=virtual_network_client.create_vcn, - kwargs_create={"create_vcn_details": create_vcn_details}, - client=virtual_network_client, - get_fn=virtual_network_client.get_vcn, - get_param="vcn_id", - module=module, - ) - return result - - -def main(): - module_args = oci_utils.get_taggable_arg_spec(supports_create=True, supports_wait=True) - module_args.update( - dict( - cidr_block=dict(type="str"), - compartment_id=dict(type="str"), - display_name=dict(type="str", aliases=["name"]), - dns_label=dict(type="str"), - state=dict(type="str", default="present", choices=["absent", "present"]), - vcn_id=dict(type="str", aliases=["id"]), - ) - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=False, - mutually_exclusive=[["compartment_id", "vcn_id"]], - ) - - if not HAS_OCI_PY_SDK: - module.fail_json(msg=missing_required_lib("oci")) - - virtual_network_client = oci_utils.create_service_client(module, VirtualNetworkClient) - - exclude_attributes = {"display_name": True, "dns_label": True} - state = module.params["state"] - vcn_id = module.params["vcn_id"] - - if state == "absent": - if vcn_id is not None: - result = delete_vcn(virtual_network_client, module) - else: - module.fail_json(msg="Specify vcn_id with state as 'absent' to delete a VCN.") - - else: - if vcn_id is not None: - result = update_vcn(virtual_network_client, module) - else: - result = oci_utils.check_and_create_resource( - resource_type="vcn", - create_fn=create_vcn, - kwargs_create={ - "virtual_network_client": virtual_network_client, - "module": module, - }, - list_fn=virtual_network_client.list_vcns, - kwargs_list={"compartment_id": module.params["compartment_id"]}, - module=module, - model=CreateVcnDetails(), - exclude_attributes=exclude_attributes, - ) - - module.exit_json(**result) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py deleted file mode 100644 index 55a24818d4..0000000000 --- a/plugins/modules/oneandone_firewall_policy.py +++ /dev/null @@ -1,503 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_firewall_policy -short_description: Configure 1&1 firewall policy -description: - - Create, remove, reconfigure, update firewall policies. This module has a dependency on 1and1 >= 1.0. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a firewall policy state to create, remove, or update. - type: str - default: 'present' - choices: ["present", "absent", "update"] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - name: - description: - - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. - type: str - firewall_policy: - description: - - The identifier (id or name) of the firewall policy used with update state. - type: str - rules: - description: - - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three - optional parameters (port_from, port_to, and source). - type: list - elements: dict - default: [] - add_server_ips: - description: - - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state. - type: list - elements: str - default: [] - remove_server_ips: - description: - - A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state. - type: list - elements: str - default: [] - add_rules: - description: - - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. - Used in combination with update state. - type: list - elements: dict - default: [] - remove_rules: - description: - - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state. - type: list - elements: str - default: [] - description: - description: - - Firewall policy description. maxLength=256. - type: str - wait: - description: - - Wait for the instance to be in state 'running' before returning. - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods. - type: int - default: 5 - -requirements: - - "1and1" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -""" - -EXAMPLES = r""" -- name: Create a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - name: ansible-firewall-policy - description: Testing creation of firewall policies with ansible - rules: - - protocol: TCP - port_from: 80 - port_to: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible-firewall-policy - -- name: Update a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - state: update - firewall_policy: ansible-firewall-policy - name: ansible-firewall-policy-updated - description: Testing creation of firewall policies with ansible - updated - -- name: Add server to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - add_server_ips: - - server_identifier (id or name) - - "server_identifier #2 (id or name)" - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - description: Adding rules to an existing firewall policy - add_rules: - - protocol: TCP - port_from: 70 - port_to: 70 - source: 0.0.0.0 - - protocol: TCP - port_from: 60 - port_to: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a firewall policy - community.general.oneandone_firewall_policy: - auth_token: oneandone_private_api_key - firewall_policy: ansible-firewall-policy-updated - remove_rules: - - "rule_id #1" - - "rule_id #2" - - '...' - wait: true - wait_timeout: 500 - state: update -""" - -RETURN = r""" -firewall_policy: - description: Information about the firewall policy that was processed. - type: dict - sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} - returned: always -""" - -import os - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_firewall_policy, - get_server, - wait_for_resource_creation_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): - """ - Assigns servers to a firewall policy. - """ - try: - attach_servers = [] - - for _server_id in server_ids: - server = get_server(oneandone_conn, _server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"] - ) - attach_servers.append(attach_server) - - if module.check_mode: - return bool(attach_servers) - - firewall_policy = oneandone_conn.attach_server_firewall_policy( - firewall_id=firewall_id, server_ips=attach_servers - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): - """ - Unassigns a server/IP from a firewall policy. - """ - try: - if module.check_mode: - firewall_server = oneandone_conn.get_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id) - return bool(firewall_server) - - firewall_policy = oneandone_conn.remove_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): - """ - Adds new rules to a firewall policy. - """ - try: - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"] - ) - firewall_rules.append(firewall_rule) - - if module.check_mode: - firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) - return bool(firewall_rules and firewall_policy_id) - - firewall_policy = oneandone_conn.add_firewall_policy_rule( - firewall_id=firewall_id, firewall_policy_rules=firewall_rules - ) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): - """ - Removes a rule from a firewall policy. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_firewall_policy_rule(firewall_id=firewall_id, rule_id=rule_id) - return bool(rule) - - firewall_policy = oneandone_conn.remove_firewall_rule(firewall_id=firewall_id, rule_id=rule_id) - return firewall_policy - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_firewall_policy(module, oneandone_conn): - """ - Updates a firewall policy based on input arguments. - Firewall rules and server ips can be added/removed to/from - firewall policy. Firewall policy name and description can be - updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - firewall_policy_id = module.params.get("firewall_policy") - name = module.params.get("name") - description = module.params.get("description") - add_server_ips = module.params.get("add_server_ips") - remove_server_ips = module.params.get("remove_server_ips") - add_rules = module.params.get("add_rules") - remove_rules = module.params.get("remove_rules") - - changed = False - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) - if firewall_policy is None: - _check_mode(module, False) - - if name or description: - _check_mode(module, True) - firewall_policy = oneandone_conn.modify_firewall( - firewall_id=firewall_policy["id"], name=name, description=description - ) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips)) - - firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id) - - _remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) - changed = True - - if add_rules: - firewall_policy = _add_firewall_rules(module, oneandone_conn, firewall_policy["id"], add_rules) - _check_mode(module, firewall_policy) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id) - - _remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id) - _check_mode(module, chk_changed) - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) - changed = True - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def create_firewall_policy(module, oneandone_conn): - """ - Create a new firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get("name") - description = module.params.get("description") - rules = module.params.get("rules") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - firewall_rules = [] - - for rule in rules: - firewall_rule = oneandone.client.FirewallPolicyRule( - protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"] - ) - firewall_rules.append(firewall_rule) - - firewall_policy_obj = oneandone.client.FirewallPolicy(name=name, description=description) - - _check_mode(module, True) - firewall_policy = oneandone_conn.create_firewall_policy( - firewall_policy=firewall_policy_obj, firewall_policy_rules=firewall_rules - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.firewall_policy, firewall_policy["id"], wait_timeout, wait_interval - ) - - firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) # refresh - changed = True if firewall_policy else False - - _check_mode(module, False) - - return (changed, firewall_policy) - except Exception as e: - module.fail_json(msg=str(e)) - - -def remove_firewall_policy(module, oneandone_conn): - """ - Removes a firewall policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - fp_id = module.params.get("name") - firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) - if module.check_mode: - if firewall_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) - - changed = True if firewall_policy else False - - return (changed, {"id": firewall_policy["id"], "name": firewall_policy["name"]}) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - name=dict(type="str"), - firewall_policy=dict(type="str"), - description=dict(type="str"), - rules=dict(type="list", elements="dict", default=[]), - add_server_ips=dict(type="list", elements="str", default=[]), - remove_server_ips=dict(type="list", elements="str", default=[]), - add_rules=dict(type="list", elements="dict", default=[]), - remove_rules=dict(type="list", elements="str", default=[]), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "update"]), - ), - supports_check_mode=True, - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("name"): - module.fail_json(msg="'name' parameter is required to delete a firewall policy.") - try: - (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == "update": - if not module.params.get("firewall_policy"): - module.fail_json(msg="'firewall_policy' parameter is required to update a firewall policy.") - try: - (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == "present": - for param in ("name", "rules"): - if not module.params.get(param): - module.fail_json(msg=f"{param} parameter is required for new firewall policies.") - try: - (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, firewall_policy=firewall_policy) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py deleted file mode 100644 index a4737d1e5a..0000000000 --- a/plugins/modules/oneandone_load_balancer.py +++ /dev/null @@ -1,634 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_load_balancer -short_description: Configure 1&1 load balancer -description: - - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a load balancer state to create, remove, or update. - type: str - default: 'present' - choices: ["present", "absent", "update"] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - load_balancer: - description: - - The identifier (id or name) of the load balancer used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - name: - description: - - Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128. - type: str - health_check_test: - description: - - Type of the health check. At the moment, HTTP is not allowed. - type: str - choices: ["NONE", "TCP", "HTTP", "ICMP"] - health_check_interval: - description: - - Health check period in seconds. minimum=5, maximum=300, multipleOf=1. - type: str - health_check_path: - description: - - URL to call for checking. Required for HTTP health check. maxLength=1000. - type: str - health_check_parse: - description: - - Regular expression to check. Required for HTTP health check. maxLength=64. - type: str - persistence: - description: - - Persistence. - type: bool - persistence_time: - description: - - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1. - type: str - method: - description: - - Balancing procedure. - type: str - choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"] - datacenter: - description: - - ID or country code of the datacenter where the load balancer is created. - - If not specified, it defaults to V(US). - type: str - choices: ["US", "ES", "DE", "GB"] - rules: - description: - - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server - parameters, in addition to source parameter, which is optional. - type: list - elements: dict - default: [] - description: - description: - - Description of the load balancer. maxLength=256. - type: str - add_server_ips: - description: - - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update). - type: list - elements: str - default: [] - remove_server_ips: - description: - - A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update). - type: list - elements: str - default: [] - add_rules: - description: - - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter. - Used in combination with O(state=update). - type: list - elements: dict - default: [] - remove_rules: - description: - - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update). - type: list - elements: str - default: [] - wait: - description: - - Wait for the instance to be in state 'running' before returning. - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods. - type: int - default: 5 - -requirements: - - "1and1" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -""" - -EXAMPLES = r""" -- name: Create a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - description: Testing creation of load balancer with ansible - health_check_test: TCP - health_check_interval: 40 - persistence: true - persistence_time: 1200 - method: ROUND_ROBIN - datacenter: US - rules: - - protocol: TCP - port_balancer: 80 - port_server: 80 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - -- name: Destroy a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - name: ansible load balancer - wait: true - wait_timeout: 500 - state: absent - -- name: Update a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer - name: ansible load balancer updated - description: Testing the update of a load balancer with ansible - wait: true - wait_timeout: 500 - state: update - -- name: Add server to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding server to a load balancer with ansible - add_server_ips: - - server identifier (id or name) - wait: true - wait_timeout: 500 - state: update - -- name: Remove server from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Removing server from a load balancer with ansible - remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) - wait: true - wait_timeout: 500 - state: update - -- name: Add rules to a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - add_rules: - - protocol: TCP - port_balancer: 70 - port_server: 70 - source: 0.0.0.0 - - protocol: TCP - port_balancer: 60 - port_server: 60 - source: 0.0.0.0 - wait: true - wait_timeout: 500 - state: update - -- name: Remove rules from a load balancer - community.general.oneandone_load_balancer: - auth_token: oneandone_private_api_key - load_balancer: ansible load balancer updated - description: Adding rules to a load balancer with ansible - remove_rules: - - "rule_id #1" - - "rule_id #2" - - '...' - wait: true - wait_timeout: 500 - state: update -""" - -RETURN = r""" -load_balancer: - description: Information about the load balancer that was processed. - type: dict - sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"} - returned: always -""" - -import os - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_datacenter, - get_load_balancer, - get_server, - wait_for_resource_creation_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ["US", "ES", "DE", "GB"] -HEALTH_CHECK_TESTS = ["NONE", "TCP", "HTTP", "ICMP"] -METHODS = ["ROUND_ROBIN", "LEAST_CONNECTIONS"] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): - """ - Assigns servers to a load balancer. - """ - try: - attach_servers = [] - - for server_id in server_ids: - server = get_server(oneandone_conn, server_id, True) - attach_server = oneandone.client.AttachServer( - server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"] - ) - attach_servers.append(attach_server) - - if module.check_mode: - return bool(attach_servers) - - load_balancer = oneandone_conn.attach_load_balancer_server( - load_balancer_id=load_balancer_id, server_ips=attach_servers - ) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): - """ - Unassigns a server/IP from a load balancer. - """ - try: - if module.check_mode: - lb_server = oneandone_conn.get_load_balancer_server( - load_balancer_id=load_balancer_id, server_ip_id=server_ip_id - ) - return bool(lb_server) - - load_balancer = oneandone_conn.remove_load_balancer_server( - load_balancer_id=load_balancer_id, server_ip_id=server_ip_id - ) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): - """ - Adds new rules to a load_balancer. - """ - try: - load_balancer_rules = [] - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule["protocol"], - port_balancer=rule["port_balancer"], - port_server=rule["port_server"], - source=rule["source"], - ) - load_balancer_rules.append(load_balancer_rule) - - if module.check_mode: - lb_id = get_load_balancer(oneandone_conn, load_balancer_id) - return bool(load_balancer_rules and lb_id) - - load_balancer = oneandone_conn.add_load_balancer_rule( - load_balancer_id=load_balancer_id, load_balancer_rules=load_balancer_rules - ) - - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): - """ - Removes a rule from a load_balancer. - """ - try: - if module.check_mode: - rule = oneandone_conn.get_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id) - return bool(rule) - - load_balancer = oneandone_conn.remove_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id) - return load_balancer - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_load_balancer(module, oneandone_conn): - """ - Updates a load_balancer based on input arguments. - Load balancer rules and server ips can be added/removed to/from - load balancer. Load balancer name, description, health_check_test, - health_check_interval, persistence, persistence_time, and method - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - load_balancer_id = module.params.get("load_balancer") - name = module.params.get("name") - description = module.params.get("description") - health_check_test = module.params.get("health_check_test") - health_check_interval = module.params.get("health_check_interval") - health_check_path = module.params.get("health_check_path") - health_check_parse = module.params.get("health_check_parse") - persistence = module.params.get("persistence") - persistence_time = module.params.get("persistence_time") - method = module.params.get("method") - add_server_ips = module.params.get("add_server_ips") - remove_server_ips = module.params.get("remove_server_ips") - add_rules = module.params.get("add_rules") - remove_rules = module.params.get("remove_rules") - - changed = False - - load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) - if load_balancer is None: - _check_mode(module, False) - - if ( - name - or description - or health_check_test - or health_check_interval - or health_check_path - or health_check_parse - or persistence - or persistence_time - or method - ): - _check_mode(module, True) - load_balancer = oneandone_conn.modify_load_balancer( - load_balancer_id=load_balancer["id"], - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - health_check_path=health_check_path, - health_check_parse=health_check_parse, - persistence=persistence, - persistence_time=persistence_time, - method=method, - ) - changed = True - - if add_server_ips: - if module.check_mode: - _check_mode(module, _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips)) - - load_balancer = _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips) - changed = True - - if remove_server_ips: - chk_changed = False - for server_ip_id in remove_server_ips: - if module.check_mode: - chk_changed |= _remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id) - - _remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) - changed = True - - if add_rules: - load_balancer = _add_load_balancer_rules(module, oneandone_conn, load_balancer["id"], add_rules) - _check_mode(module, load_balancer) - changed = True - - if remove_rules: - chk_changed = False - for rule_id in remove_rules: - if module.check_mode: - chk_changed |= _remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id) - - _remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id) - _check_mode(module, chk_changed) - load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) - changed = True - - try: - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_load_balancer(module, oneandone_conn): - """ - Create a new load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get("name") - description = module.params.get("description") - health_check_test = module.params.get("health_check_test") - health_check_interval = module.params.get("health_check_interval") - health_check_path = module.params.get("health_check_path") - health_check_parse = module.params.get("health_check_parse") - persistence = module.params.get("persistence") - persistence_time = module.params.get("persistence_time") - method = module.params.get("method") - datacenter = module.params.get("datacenter") - rules = module.params.get("rules") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - load_balancer_rules = [] - - datacenter_id = None - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json(msg=f"datacenter {datacenter} not found.") - - for rule in rules: - load_balancer_rule = oneandone.client.LoadBalancerRule( - protocol=rule["protocol"], - port_balancer=rule["port_balancer"], - port_server=rule["port_server"], - source=rule["source"], - ) - load_balancer_rules.append(load_balancer_rule) - - _check_mode(module, True) - load_balancer_obj = oneandone.client.LoadBalancer( - health_check_path=health_check_path, - health_check_parse=health_check_parse, - name=name, - description=description, - health_check_test=health_check_test, - health_check_interval=health_check_interval, - persistence=persistence, - persistence_time=persistence_time, - method=method, - datacenter_id=datacenter_id, - ) - - load_balancer = oneandone_conn.create_load_balancer( - load_balancer=load_balancer_obj, load_balancer_rules=load_balancer_rules - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.load_balancer, load_balancer["id"], wait_timeout, wait_interval - ) - - load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) # refresh - changed = True if load_balancer else False - - _check_mode(module, False) - - return (changed, load_balancer) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_load_balancer(module, oneandone_conn): - """ - Removes a load_balancer. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - lb_id = module.params.get("name") - load_balancer_id = get_load_balancer(oneandone_conn, lb_id) - if module.check_mode: - if load_balancer_id is None: - _check_mode(module, False) - _check_mode(module, True) - load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) - - changed = True if load_balancer else False - - return (changed, {"id": load_balancer["id"], "name": load_balancer["name"]}) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - load_balancer=dict(type="str"), - name=dict(type="str"), - description=dict(type="str"), - health_check_test=dict(choices=HEALTH_CHECK_TESTS), - health_check_interval=dict(type="str"), - health_check_path=dict(type="str"), - health_check_parse=dict(type="str"), - persistence=dict(type="bool"), - persistence_time=dict(type="str"), - method=dict(choices=METHODS), - datacenter=dict(choices=DATACENTERS), - rules=dict(type="list", elements="dict", default=[]), - add_server_ips=dict(type="list", elements="str", default=[]), - remove_server_ips=dict(type="list", elements="str", default=[]), - add_rules=dict(type="list", elements="dict", default=[]), - remove_rules=dict(type="list", elements="str", default=[]), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "update"]), - ), - supports_check_mode=True, - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg="auth_token parameter is required.") - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("name"): - module.fail_json(msg="'name' parameter is required for deleting a load balancer.") - try: - (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == "update": - if not module.params.get("load_balancer"): - module.fail_json(msg="'load_balancer' parameter is required for updating a load balancer.") - try: - (changed, load_balancer) = update_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == "present": - for param in ( - "name", - "health_check_test", - "health_check_interval", - "persistence", - "persistence_time", - "method", - "rules", - ): - if not module.params.get(param): - module.fail_json(msg=f"{param} parameter is required for new load balancers.") - try: - (changed, load_balancer) = create_load_balancer(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, load_balancer=load_balancer) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py deleted file mode 100644 index 53ec39ae7f..0000000000 --- a/plugins/modules/oneandone_monitoring_policy.py +++ /dev/null @@ -1,948 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_monitoring_policy -short_description: Configure 1&1 monitoring policy -description: - - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency - on 1and1 >= 1.0. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a monitoring policy's state to create, remove, update. - type: str - default: present - choices: ["present", "absent", "update"] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - name: - description: - - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. - type: str - monitoring_policy: - description: - - The identifier (id or name) of the monitoring policy used with update state. - type: str - agent: - description: - - Set true for using agent. - type: str - email: - description: - - User's email. maxLength=128. - type: str - description: - description: - - Monitoring policy description. maxLength=256. - type: str - thresholds: - description: - - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions. - Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and - value is used to advise when the value is exceeded. - type: list - elements: dict - default: [] - suboptions: - cpu: - description: - - Consumption limits of CPU. - required: true - ram: - description: - - Consumption limits of RAM. - required: true - disk: - description: - - Consumption limits of hard disk. - required: true - internal_ping: - description: - - Response limits of internal ping. - required: true - transfer: - description: - - Consumption limits for transfer. - required: true - ports: - description: - - Array of ports that are to be monitored. - type: list - elements: dict - default: [] - suboptions: - protocol: - description: - - Internet protocol. - choices: ["TCP", "UDP"] - required: true - port: - description: - - Port number. minimum=1, maximum=65535. - required: true - alert_if: - description: - - Case of alert. - choices: ["RESPONDING", "NOT_RESPONDING"] - required: true - email_notification: - description: - - Set true for sending e-mail notifications. - required: true - processes: - description: - - Array of processes that are to be monitored. - type: list - elements: dict - default: [] - suboptions: - process: - description: - - Name of the process. maxLength=50. - required: true - alert_if: - description: - - Case of alert. - choices: ["RUNNING", "NOT_RUNNING"] - required: true - add_ports: - description: - - Ports to add to the monitoring policy. - type: list - elements: dict - default: [] - add_processes: - description: - - Processes to add to the monitoring policy. - type: list - elements: dict - default: [] - add_servers: - description: - - Servers to add to the monitoring policy. - type: list - elements: str - default: [] - remove_ports: - description: - - Ports to remove from the monitoring policy. - type: list - elements: str - default: [] - remove_processes: - description: - - Processes to remove from the monitoring policy. - type: list - elements: str - default: [] - remove_servers: - description: - - Servers to remove from the monitoring policy. - type: list - elements: str - default: [] - update_ports: - description: - - Ports to be updated on the monitoring policy. - type: list - elements: dict - default: [] - update_processes: - description: - - Processes to be updated on the monitoring policy. - type: list - elements: dict - default: [] - wait: - description: - - Wait for the instance to be in state 'running' before returning. - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods. - type: int - default: 5 - -requirements: - - "1and1" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -""" - -EXAMPLES = r""" -- name: Create a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - name: ansible monitoring policy - description: Testing creation of a monitoring policy with ansible - email: your@emailaddress.com - agent: true - thresholds: - - cpu: - warning: - value: 80 - alert: false - critical: - value: 92 - alert: false - - ram: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - disk: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - internal_ping: - warning: - value: 50 - alert: false - critical: - value: 100 - alert: false - - transfer: - warning: - value: 1000 - alert: false - critical: - value: 2000 - alert: false - ports: - - protocol: TCP - port: 22 - alert_if: RESPONDING - email_notification: false - processes: - - process: test - alert_if: NOT_RUNNING - email_notification: false - wait: true - -- name: Destroy a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - state: absent - name: ansible monitoring policy - -- name: Update a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy - name: ansible monitoring policy updated - description: Testing creation of a monitoring policy with ansible updated - email: another@emailaddress.com - thresholds: - - cpu: - warning: - value: 70 - alert: false - critical: - value: 90 - alert: false - - ram: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - disk: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - internal_ping: - warning: - value: 60 - alert: false - critical: - value: 90 - alert: false - - transfer: - warning: - value: 900 - alert: false - critical: - value: 1900 - alert: false - wait: true - state: update - -- name: Add a port to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_ports: - - protocol: TCP - port: 33 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Update existing ports of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_ports: - - id: existing_port_id - protocol: TCP - port: 34 - alert_if: RESPONDING - email_notification: false - - id: existing_port_id - protocol: TCP - port: 23 - alert_if: RESPONDING - email_notification: false - wait: true - state: update - -- name: Remove a port from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_ports: - - port_id - state: update - -- name: Add a process to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_processes: - - process: test_2 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Update existing processes of a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - update_processes: - - id: process_id - process: test_1 - alert_if: NOT_RUNNING - email_notification: false - - id: process_id - process: test_3 - alert_if: NOT_RUNNING - email_notification: false - wait: true - state: update - -- name: Remove a process from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_processes: - - process_id - wait: true - state: update - -- name: Add server to a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - add_servers: - - server id or name - wait: true - state: update - -- name: Remove server from a monitoring policy - community.general.oneandone_monitoring_policy: - auth_token: oneandone_private_api_key - monitoring_policy: ansible monitoring policy updated - remove_servers: - - server01 - wait: true - state: update -""" - -RETURN = r""" -monitoring_policy: - description: Information about the monitoring policy that was processed. - type: dict - sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} - returned: always -""" - -import os - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_monitoring_policy, - get_server, - wait_for_resource_creation_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): - """ - Adds new ports to a monitoring policy. - """ - try: - monitoring_policy_ports = [] - - for _port in ports: - monitoring_policy_port = oneandone.client.Port( - protocol=_port["protocol"], - port=_port["port"], - alert_if=_port["alert_if"], - email_notification=_port["email_notification"], - ) - monitoring_policy_ports.append(monitoring_policy_port) - - if module.check_mode: - return bool(monitoring_policy_ports) - - monitoring_policy = oneandone_conn.add_port( - monitoring_policy_id=monitoring_policy_id, ports=monitoring_policy_ports - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): - """ - Removes a port from a monitoring policy. - """ - try: - if module.check_mode: - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, port_id=port_id - ) - return bool(monitoring_policy) - - monitoring_policy = oneandone_conn.delete_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, port_id=port_id - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): - """ - Modifies a monitoring policy port. - """ - try: - if module.check_mode: - cm_port = oneandone_conn.get_monitoring_policy_port( - monitoring_policy_id=monitoring_policy_id, port_id=port_id - ) - return bool(cm_port) - - monitoring_policy_port = oneandone.client.Port( - protocol=port["protocol"], - port=port["port"], - alert_if=port["alert_if"], - email_notification=port["email_notification"], - ) - - monitoring_policy = oneandone_conn.modify_port( - monitoring_policy_id=monitoring_policy_id, port_id=port_id, port=monitoring_policy_port - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): - """ - Adds new processes to a monitoring policy. - """ - try: - monitoring_policy_processes = [] - - for _process in processes: - monitoring_policy_process = oneandone.client.Process( - process=_process["process"], - alert_if=_process["alert_if"], - email_notification=_process["email_notification"], - ) - monitoring_policy_processes.append(monitoring_policy_process) - - if module.check_mode: - mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) - return bool(monitoring_policy_processes and mp_id) - - monitoring_policy = oneandone_conn.add_process( - monitoring_policy_id=monitoring_policy_id, processes=monitoring_policy_processes - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): - """ - Removes a process from a monitoring policy. - """ - try: - if module.check_mode: - process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, process_id=process_id - ) - return bool(process) - - monitoring_policy = oneandone_conn.delete_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, process_id=process_id - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): - """ - Modifies a monitoring policy process. - """ - try: - if module.check_mode: - cm_process = oneandone_conn.get_monitoring_policy_process( - monitoring_policy_id=monitoring_policy_id, process_id=process_id - ) - return bool(cm_process) - - monitoring_policy_process = oneandone.client.Process( - process=process["process"], alert_if=process["alert_if"], email_notification=process["email_notification"] - ) - - monitoring_policy = oneandone_conn.modify_process( - monitoring_policy_id=monitoring_policy_id, process_id=process_id, process=monitoring_policy_process - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): - """ - Attaches servers to a monitoring policy. - """ - try: - attach_servers = [] - - for _server_id in servers: - server_id = get_server(oneandone_conn, _server_id) - attach_server = oneandone.client.AttachServer(server_id=server_id) - attach_servers.append(attach_server) - - if module.check_mode: - return bool(attach_servers) - - monitoring_policy = oneandone_conn.attach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, servers=attach_servers - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): - """ - Detaches a server from a monitoring policy. - """ - try: - if module.check_mode: - mp_server = oneandone_conn.get_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, server_id=server_id - ) - return bool(mp_server) - - monitoring_policy = oneandone_conn.detach_monitoring_policy_server( - monitoring_policy_id=monitoring_policy_id, server_id=server_id - ) - return monitoring_policy - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def update_monitoring_policy(module, oneandone_conn): - """ - Updates a monitoring_policy based on input arguments. - Monitoring policy ports, processes and servers can be added/removed to/from - a monitoring policy. Monitoring policy name, description, email, - thresholds for cpu, ram, disk, transfer and internal_ping - can be updated as well. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - monitoring_policy_id = module.params.get("monitoring_policy") - name = module.params.get("name") - description = module.params.get("description") - email = module.params.get("email") - thresholds = module.params.get("thresholds") - add_ports = module.params.get("add_ports") - update_ports = module.params.get("update_ports") - remove_ports = module.params.get("remove_ports") - add_processes = module.params.get("add_processes") - update_processes = module.params.get("update_processes") - remove_processes = module.params.get("remove_processes") - add_servers = module.params.get("add_servers") - remove_servers = module.params.get("remove_servers") - - changed = False - - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) - if monitoring_policy is None: - _check_mode(module, False) - - _monitoring_policy = oneandone.client.MonitoringPolicy(name=name, description=description, email=email) - - _thresholds = None - - if thresholds: - threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]["warning"]["value"], - warning_alert=str(threshold[key]["warning"]["alert"]).lower(), - critical_value=threshold[key]["critical"]["value"], - critical_alert=str(threshold[key]["critical"]["alert"]).lower(), - ) - _thresholds.append(_threshold) - - if name or description or email or thresholds: - _check_mode(module, True) - monitoring_policy = oneandone_conn.modify_monitoring_policy( - monitoring_policy_id=monitoring_policy["id"], - monitoring_policy=_monitoring_policy, - thresholds=_thresholds, - ) - changed = True - - if add_ports: - if module.check_mode: - _check_mode(module, _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports)) - - monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports) - changed = True - - if update_ports: - chk_changed = False - for update_port in update_ports: - if module.check_mode: - chk_changed |= _modify_port( - module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port - ) - - _modify_port(module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) - changed = True - - if remove_ports: - chk_changed = False - for port_id in remove_ports: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_port( - module, oneandone_conn, monitoring_policy["id"], port_id - ) - - _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy["id"], port_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) - changed = True - - if add_processes: - monitoring_policy = _add_processes(module, oneandone_conn, monitoring_policy["id"], add_processes) - _check_mode(module, monitoring_policy) - changed = True - - if update_processes: - chk_changed = False - for update_process in update_processes: - if module.check_mode: - chk_changed |= _modify_process( - module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process - ) - - _modify_process(module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) - changed = True - - if remove_processes: - chk_changed = False - for process_id in remove_processes: - if module.check_mode: - chk_changed |= _delete_monitoring_policy_process( - module, oneandone_conn, monitoring_policy["id"], process_id - ) - - _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy["id"], process_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) - changed = True - - if add_servers: - monitoring_policy = _attach_monitoring_policy_server( - module, oneandone_conn, monitoring_policy["id"], add_servers - ) - _check_mode(module, monitoring_policy) - changed = True - - if remove_servers: - chk_changed = False - for _server_id in remove_servers: - server_id = get_server(oneandone_conn, _server_id) - - if module.check_mode: - chk_changed |= _detach_monitoring_policy_server( - module, oneandone_conn, monitoring_policy["id"], server_id - ) - - _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy["id"], server_id) - _check_mode(module, chk_changed) - monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True) - changed = True - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_monitoring_policy(module, oneandone_conn): - """ - Creates a new monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - name = module.params.get("name") - description = module.params.get("description") - email = module.params.get("email") - agent = module.params.get("agent") - thresholds = module.params.get("thresholds") - ports = module.params.get("ports") - processes = module.params.get("processes") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - _monitoring_policy = oneandone.client.MonitoringPolicy( - name, - description, - email, - agent, - ) - - _monitoring_policy.specs["agent"] = str(_monitoring_policy.specs["agent"]).lower() - - threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"] - - _thresholds = [] - for threshold in thresholds: - key = list(threshold.keys())[0] - if key in threshold_entities: - _threshold = oneandone.client.Threshold( - entity=key, - warning_value=threshold[key]["warning"]["value"], - warning_alert=str(threshold[key]["warning"]["alert"]).lower(), - critical_value=threshold[key]["critical"]["value"], - critical_alert=str(threshold[key]["critical"]["alert"]).lower(), - ) - _thresholds.append(_threshold) - - _ports = [] - for port in ports: - _port = oneandone.client.Port( - protocol=port["protocol"], - port=port["port"], - alert_if=port["alert_if"], - email_notification=str(port["email_notification"]).lower(), - ) - _ports.append(_port) - - _processes = [] - for process in processes: - _process = oneandone.client.Process( - process=process["process"], - alert_if=process["alert_if"], - email_notification=str(process["email_notification"]).lower(), - ) - _processes.append(_process) - - _check_mode(module, True) - monitoring_policy = oneandone_conn.create_monitoring_policy( - monitoring_policy=_monitoring_policy, thresholds=_thresholds, ports=_ports, processes=_processes - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, - OneAndOneResources.monitoring_policy, - monitoring_policy["id"], - wait_timeout, - wait_interval, - ) - - changed = True if monitoring_policy else False - - _check_mode(module, False) - - return (changed, monitoring_policy) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_monitoring_policy(module, oneandone_conn): - """ - Removes a monitoring policy. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - mp_id = module.params.get("name") - monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) - if module.check_mode: - if monitoring_policy_id is None: - _check_mode(module, False) - _check_mode(module, True) - monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) - - changed = True if monitoring_policy else False - - return (changed, {"id": monitoring_policy["id"], "name": monitoring_policy["name"]}) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - name=dict(type="str"), - monitoring_policy=dict(type="str"), - agent=dict(type="str"), - email=dict(type="str"), - description=dict(type="str"), - thresholds=dict(type="list", elements="dict", default=[]), - ports=dict(type="list", elements="dict", default=[]), - processes=dict(type="list", elements="dict", default=[]), - add_ports=dict(type="list", elements="dict", default=[]), - update_ports=dict(type="list", elements="dict", default=[]), - remove_ports=dict(type="list", elements="str", default=[]), - add_processes=dict(type="list", elements="dict", default=[]), - update_processes=dict(type="list", elements="dict", default=[]), - remove_processes=dict(type="list", elements="str", default=[]), - add_servers=dict(type="list", elements="str", default=[]), - remove_servers=dict(type="list", elements="str", default=[]), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "update"]), - ), - supports_check_mode=True, - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg="auth_token parameter is required.") - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("name"): - module.fail_json(msg="'name' parameter is required to delete a monitoring policy.") - try: - (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - elif state == "update": - if not module.params.get("monitoring_policy"): - module.fail_json(msg="'monitoring_policy' parameter is required to update a monitoring policy.") - try: - (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == "present": - for param in ("name", "agent", "email", "thresholds", "ports", "processes"): - if not module.params.get(param): - module.fail_json(msg=f"{param} parameter is required for a new monitoring policy.") - try: - (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, monitoring_policy=monitoring_policy) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py deleted file mode 100644 index 1e873845b2..0000000000 --- a/plugins/modules/oneandone_private_network.py +++ /dev/null @@ -1,418 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_private_network -short_description: Configure 1&1 private networking -description: - - Create, remove, reconfigure, update a private network. This module has a dependency on 1and1 >= 1.0. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a network's state to create, remove, or update. - type: str - default: 'present' - choices: ["present", "absent", "update"] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - private_network: - description: - - The identifier (id or name) of the network used with update state. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - name: - description: - - Private network name used with present state. Used as identifier (id or name) when used with absent state. - type: str - description: - description: - - Set a description for the network. - type: str - datacenter: - description: - - The identifier of the datacenter where the private network is created. - type: str - choices: [US, ES, DE, GB] - network_address: - description: - - Set a private network space, for example V(192.168.1.0). - type: str - subnet_mask: - description: - - Set the netmask for the private network, for example V(255.255.255.0). - type: str - add_members: - description: - - List of server identifiers (name or id) to be added to the private network. - type: list - elements: str - default: [] - remove_members: - description: - - List of server identifiers (name or id) to be removed from the private network. - type: list - elements: str - default: [] - wait: - description: - - Wait for the instance to be in state 'running' before returning. - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods. - type: int - default: 5 - -requirements: - - "1and1" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -""" - -EXAMPLES = r""" -- name: Create a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - name: backup_network - description: Testing creation of a private network with ansible - network_address: 70.35.193.100 - subnet_mask: 255.0.0.0 - datacenter: US - -- name: Destroy a private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: absent - name: backup_network - -- name: Modify the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - network_address: 192.168.2.0 - subnet_mask: 255.255.255.0 - -- name: Add members to the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - add_members: - - server identifier (id or name) - -- name: Remove members from the private network - community.general.oneandone_private_network: - auth_token: oneandone_private_api_key - state: update - private_network: backup_network - remove_members: - - server identifier (id or name) -""" - -RETURN = r""" -private_network: - description: Information about the private network. - type: dict - sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"} - returned: always -""" - -import os - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_datacenter, - get_private_network, - get_server, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ["US", "ES", "DE", "GB"] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def _add_servers(module, oneandone_conn, name, members): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - return bool(private_network_id and members) - - network = oneandone_conn.attach_private_network_servers( - private_network_id=private_network_id, server_ids=members - ) - - return network - except Exception as e: - module.fail_json(msg=str(e)) - - -def _remove_member(module, oneandone_conn, name, member_id): - try: - private_network_id = get_private_network(oneandone_conn, name) - - if module.check_mode: - if private_network_id: - network_member = oneandone_conn.get_private_network_server( - private_network_id=private_network_id, server_id=member_id - ) - if network_member: - return True - return False - - network = oneandone_conn.remove_private_network_server(private_network_id=name, server_id=member_id) - - return network - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def create_network(module, oneandone_conn): - """ - Create new private network - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any network was added. - """ - name = module.params.get("name") - description = module.params.get("description") - network_address = module.params.get("network_address") - subnet_mask = module.params.get("subnet_mask") - datacenter = module.params.get("datacenter") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - module.fail_json(msg=f"datacenter {datacenter} not found.") - - try: - _check_mode(module, True) - network = oneandone_conn.create_private_network( - private_network=oneandone.client.PrivateNetwork( - name=name, - description=description, - network_address=network_address, - subnet_mask=subnet_mask, - datacenter_id=datacenter_id, - ) - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.private_network, network["id"], wait_timeout, wait_interval - ) - network = get_private_network(oneandone_conn, network["id"], True) - - changed = True if network else False - - _check_mode(module, False) - - return (changed, network) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_network(module, oneandone_conn): - """ - Modifies a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - """ - try: - _private_network_id = module.params.get("private_network") - _name = module.params.get("name") - _description = module.params.get("description") - _network_address = module.params.get("network_address") - _subnet_mask = module.params.get("subnet_mask") - _add_members = module.params.get("add_members") - _remove_members = module.params.get("remove_members") - - changed = False - - private_network = get_private_network(oneandone_conn, _private_network_id, True) - if private_network is None: - _check_mode(module, False) - - if _name or _description or _network_address or _subnet_mask: - _check_mode(module, True) - private_network = oneandone_conn.modify_private_network( - private_network_id=private_network["id"], - name=_name, - description=_description, - network_address=_network_address, - subnet_mask=_subnet_mask, - ) - changed = True - - if _add_members: - instances = [] - - for member in _add_members: - instance_id = get_server(oneandone_conn, member) - instance_obj = oneandone.client.AttachServer(server_id=instance_id) - - instances.extend([instance_obj]) - private_network = _add_servers(module, oneandone_conn, private_network["id"], instances) - _check_mode(module, private_network) - changed = True - - if _remove_members: - chk_changed = False - for member in _remove_members: - instance = get_server(oneandone_conn, member, True) - - if module.check_mode: - chk_changed |= _remove_member(module, oneandone_conn, private_network["id"], instance["id"]) - _check_mode(module, instance and chk_changed) - - _remove_member(module, oneandone_conn, private_network["id"], instance["id"]) - private_network = get_private_network(oneandone_conn, private_network["id"], True) - changed = True - - return (changed, private_network) - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def remove_network(module, oneandone_conn): - """ - Removes a private network. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - """ - try: - pn_id = module.params.get("name") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - private_network_id = get_private_network(oneandone_conn, pn_id) - if module.check_mode: - if private_network_id is None: - _check_mode(module, False) - _check_mode(module, True) - private_network = oneandone_conn.delete_private_network(private_network_id) - wait_for_resource_deletion_completion( - oneandone_conn, OneAndOneResources.private_network, private_network["id"], wait_timeout, wait_interval - ) - - changed = True if private_network else False - - return (changed, {"id": private_network["id"], "name": private_network["name"]}) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - private_network=dict(type="str"), - name=dict(type="str"), - description=dict(type="str"), - network_address=dict(type="str"), - subnet_mask=dict(type="str"), - add_members=dict(type="list", elements="str", default=[]), - remove_members=dict(type="list", elements="str", default=[]), - datacenter=dict(choices=DATACENTERS), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "update"]), - ), - supports_check_mode=True, - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg="auth_token parameter is required.") - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("name"): - module.fail_json(msg="'name' parameter is required for deleting a network.") - try: - (changed, private_network) = remove_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == "update": - if not module.params.get("private_network"): - module.fail_json(msg="'private_network' parameter is required for updating a network.") - try: - (changed, private_network) = update_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == "present": - if not module.params.get("name"): - module.fail_json(msg="'name' parameter is required for new networks.") - try: - (changed, private_network) = create_network(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, private_network=private_network) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py deleted file mode 100644 index 67d6505f44..0000000000 --- a/plugins/modules/oneandone_public_ip.py +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_public_ip -short_description: Configure 1&1 public IPs -description: - - Create, update, and remove public IPs. This module has a dependency on 1and1 >= 1.0. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a public IP state to create, remove, or update. - type: str - default: 'present' - choices: ["present", "absent", "update"] - auth_token: - description: - - Authenticating API token provided by 1&1. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - reverse_dns: - description: - - Reverse DNS name. maxLength=256. - type: str - datacenter: - description: - - ID of the datacenter where the IP is created (only for unassigned IPs). - type: str - choices: [US, ES, DE, GB] - default: US - type: - description: - - Type of IP. Currently, only IPV4 is available. - type: str - choices: ["IPV4", "IPV6"] - default: 'IPV4' - public_ip_id: - description: - - The ID of the public IP used with update and delete states. - type: str - wait: - description: - - Wait for the instance to be in state 'running' before returning. - default: true - type: bool - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the _wait_for methods. - type: int - default: 5 - -requirements: - - "1and1" - -author: - - Amel Ajdinovic (@aajdinov) - - Ethan Devenport (@edevenport) -""" - -EXAMPLES = r""" -- name: Create a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - reverse_dns: example.com - datacenter: US - type: IPV4 - -- name: Update a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - reverse_dns: secondexample.com - state: update - -- name: Delete a public IP - community.general.oneandone_public_ip: - auth_token: oneandone_private_api_key - public_ip_id: public ip id - state: absent -""" - -RETURN = r""" -public_ip: - description: Information about the public IP that was processed. - type: dict - sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"} - returned: always -""" - -import os - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_datacenter, - get_public_ip, - wait_for_resource_creation_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ["US", "ES", "DE", "GB"] - -TYPES = ["IPV4", "IPV6"] - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def create_public_ip(module, oneandone_conn): - """ - Create new public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was added. - """ - reverse_dns = module.params.get("reverse_dns") - datacenter = module.params.get("datacenter") - ip_type = module.params.get("type") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - if datacenter is not None: - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json(msg=f"datacenter {datacenter} not found.") - - try: - _check_mode(module, True) - public_ip = oneandone_conn.create_public_ip( - reverse_dns=reverse_dns, ip_type=ip_type, datacenter_id=datacenter_id - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval - ) - public_ip = oneandone_conn.get_public_ip(public_ip["id"]) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def update_public_ip(module, oneandone_conn): - """ - Update a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was changed. - """ - reverse_dns = module.params.get("reverse_dns") - public_ip_id = module.params.get("public_ip_id") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json(msg=f"public IP {public_ip_id} not found.") - - try: - _check_mode(module, True) - public_ip = oneandone_conn.modify_public_ip(ip_id=public_ip["id"], reverse_dns=reverse_dns) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval - ) - public_ip = oneandone_conn.get_public_ip(public_ip["id"]) - - changed = True if public_ip else False - - return (changed, public_ip) - except Exception as e: - module.fail_json(msg=str(e)) - - -def delete_public_ip(module, oneandone_conn): - """ - Delete a public IP - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any public IP was deleted. - """ - public_ip_id = module.params.get("public_ip_id") - - public_ip = get_public_ip(oneandone_conn, public_ip_id, True) - if public_ip is None: - _check_mode(module, False) - module.fail_json(msg=f"public IP {public_ip_id} not found.") - - try: - _check_mode(module, True) - deleted_public_ip = oneandone_conn.delete_public_ip(ip_id=public_ip["id"]) - - changed = True if deleted_public_ip else False - - return (changed, {"id": public_ip["id"]}) - except Exception as e: - module.fail_json(msg=str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - public_ip_id=dict(type="str"), - reverse_dns=dict(type="str"), - datacenter=dict(choices=DATACENTERS, default="US"), - type=dict(choices=TYPES, default="IPV4"), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "update"]), - ), - supports_check_mode=True, - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg="auth_token parameter is required.") - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("public_ip_id"): - module.fail_json(msg="'public_ip_id' parameter is required to delete a public ip.") - try: - (changed, public_ip) = delete_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - elif state == "update": - if not module.params.get("public_ip_id"): - module.fail_json(msg="'public_ip_id' parameter is required to update a public ip.") - try: - (changed, public_ip) = update_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - elif state == "present": - try: - (changed, public_ip) = create_public_ip(module, oneandone_conn) - except Exception as e: - module.fail_json(msg=str(e)) - - module.exit_json(changed=changed, public_ip=public_ip) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py deleted file mode 100644 index 57ca25ea4a..0000000000 --- a/plugins/modules/oneandone_server.py +++ /dev/null @@ -1,656 +0,0 @@ -#!/usr/bin/python -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: oneandone_server -short_description: Create, destroy, start, stop, and reboot a 1&1 Host server -description: - - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait - for it to be 'running' before returning. -deprecated: - removed_in: 13.0.0 - why: DNS fails to resolve the API endpoint used by the module. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - description: - - Define a server's state to create, remove, start or stop it. - type: str - default: present - choices: ["present", "absent", "running", "stopped"] - auth_token: - description: - - Authenticating API token provided by 1&1. Overrides the E(ONEANDONE_AUTH_TOKEN) environment variable. - type: str - api_url: - description: - - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. - type: str - datacenter: - description: - - The datacenter location. - type: str - default: US - choices: ["US", "ES", "DE", "GB"] - hostname: - description: - - The hostname or ID of the server. Only used when state is 'present'. - type: str - description: - description: - - The description of the server. - type: str - appliance: - description: - - The operating system name or ID for the server. It is required only for 'present' state. - type: str - fixed_instance_size: - description: - - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive - with vcore, cores_per_processor, ram, and hdds parameters. - - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).' - type: str - vcore: - description: - - The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters. - type: int - cores_per_processor: - description: - - The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters. - type: int - ram: - description: - - The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters. - type: float - hdds: - description: - - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided - with O(vcore), O(cores_per_processor), and O(ram) parameters. - type: list - elements: dict - private_network: - description: - - The private network name or ID. - type: str - firewall_policy: - description: - - The firewall policy name or ID. - type: str - load_balancer: - description: - - The load balancer name or ID. - type: str - monitoring_policy: - description: - - The monitoring policy name or ID. - type: str - server: - description: - - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. - type: str - count: - description: - - The number of servers to create. - type: int - default: 1 - ssh_key: - description: - - User's public SSH key (contents, not path). - type: raw - server_type: - description: - - The type of server to be built. - type: str - default: "cloud" - choices: ["cloud", "baremetal", "k8s_node"] - wait: - description: - - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if - you do not want to wait for each individual server to be deleted before moving on with other tasks). - type: bool - default: true - wait_timeout: - description: - - How long before wait gives up, in seconds. - type: int - default: 600 - wait_interval: - description: - - Defines the number of seconds to wait when using the wait_for methods. - type: int - default: 5 - auto_increment: - description: - - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting - the count where there is a %02d or %03d in the hostname string. - type: bool - default: true - -requirements: - - "1and1" - -author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -""" - -EXAMPLES = r""" -- name: Create three servers and enumerate their names - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - fixed_instance_size: XL - datacenter: US - appliance: C5A349786169F140BCBC335675014C08 - auto_increment: true - count: 3 - -- name: Create three servers, passing in an ssh_key - community.general.oneandone_server: - auth_token: oneandone_private_api_key - hostname: node%02d - vcore: 2 - cores_per_processor: 4 - ram: 8.0 - hdds: - - size: 50 - is_main: false - datacenter: ES - appliance: C5A349786169F140BCBC335675014C08 - count: 3 - wait: true - wait_timeout: 600 - wait_interval: 10 - ssh_key: SSH_PUBLIC_KEY - -- name: Removing server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: absent - server: 'node01' - -- name: Starting server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: running - server: 'node01' - -- name: Stopping server - community.general.oneandone_server: - auth_token: oneandone_private_api_key - state: stopped - server: 'node01' -""" - -RETURN = r""" -servers: - description: Information about each server that was processed. - type: list - sample: - - {"hostname": "my-server", "id": "server-id"} - returned: always -""" - -import os -import time - -from ansible.module_utils.basic import AnsibleModule - -from ansible_collections.community.general.plugins.module_utils.oneandone import ( - OneAndOneResources, - get_appliance, - get_datacenter, - get_firewall_policy, - get_fixed_instance_size, - get_load_balancer, - get_monitoring_policy, - get_private_network, - get_server, - wait_for_resource_creation_completion, - wait_for_resource_deletion_completion, -) - -HAS_ONEANDONE_SDK = True - -try: - import oneandone.client -except ImportError: - HAS_ONEANDONE_SDK = False - -DATACENTERS = ["US", "ES", "DE", "GB"] - -ONEANDONE_SERVER_STATES = ( - "DEPLOYING", - "POWERED_OFF", - "POWERED_ON", - "POWERING_ON", - "POWERING_OFF", -) - - -def _check_mode(module, result): - if module.check_mode: - module.exit_json(changed=result) - - -def _create_server( - module, - oneandone_conn, - hostname, - description, - fixed_instance_size_id, - vcore, - cores_per_processor, - ram, - hdds, - datacenter_id, - appliance_id, - ssh_key, - private_network_id, - firewall_policy_id, - load_balancer_id, - monitoring_policy_id, - server_type, - wait, - wait_timeout, - wait_interval, -): - try: - existing_server = get_server(oneandone_conn, hostname) - - if existing_server: - if module.check_mode: - return False - return None - - if module.check_mode: - return True - - server = oneandone_conn.create_server( - oneandone.client.Server( - name=hostname, - description=description, - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - appliance_id=appliance_id, - datacenter_id=datacenter_id, - rsa_key=ssh_key, - private_network_id=private_network_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - monitoring_policy_id=monitoring_policy_id, - server_type=server_type, - ), - hdds, - ) - - if wait: - wait_for_resource_creation_completion( - oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval - ) - server = oneandone_conn.get_server(server["id"]) # refresh - - return server - except Exception as ex: - module.fail_json(msg=str(ex)) - - -def _insert_network_data(server): - for addr_data in server["ips"]: - if addr_data["type"] == "IPV6": - server["public_ipv6"] = addr_data["ip"] - elif addr_data["type"] == "IPV4": - server["public_ipv4"] = addr_data["ip"] - return server - - -def create_server(module, oneandone_conn): - """ - Create new server - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object - - Returns a dictionary containing a 'changed' attribute indicating whether - any server was added, and a 'servers' attribute with the list of the - created servers' hostname, id and ip addresses. - """ - hostname = module.params.get("hostname") - description = module.params.get("description") - auto_increment = module.params.get("auto_increment") - count = module.params.get("count") - fixed_instance_size = module.params.get("fixed_instance_size") - vcore = module.params.get("vcore") - cores_per_processor = module.params.get("cores_per_processor") - ram = module.params.get("ram") - hdds = module.params.get("hdds") - datacenter = module.params.get("datacenter") - appliance = module.params.get("appliance") - ssh_key = module.params.get("ssh_key") - private_network = module.params.get("private_network") - monitoring_policy = module.params.get("monitoring_policy") - firewall_policy = module.params.get("firewall_policy") - load_balancer = module.params.get("load_balancer") - server_type = module.params.get("server_type") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - datacenter_id = get_datacenter(oneandone_conn, datacenter) - if datacenter_id is None: - _check_mode(module, False) - module.fail_json(msg=f"datacenter {datacenter} not found.") - - fixed_instance_size_id = None - if fixed_instance_size: - fixed_instance_size_id = get_fixed_instance_size(oneandone_conn, fixed_instance_size) - if fixed_instance_size_id is None: - _check_mode(module, False) - module.fail_json(msg=f"fixed_instance_size {fixed_instance_size} not found.") - - appliance_id = get_appliance(oneandone_conn, appliance) - if appliance_id is None: - _check_mode(module, False) - module.fail_json(msg=f"appliance {appliance} not found.") - - private_network_id = None - if private_network: - private_network_id = get_private_network(oneandone_conn, private_network) - if private_network_id is None: - _check_mode(module, False) - module.fail_json(msg=f"private network {private_network} not found.") - - monitoring_policy_id = None - if monitoring_policy: - monitoring_policy_id = get_monitoring_policy(oneandone_conn, monitoring_policy) - if monitoring_policy_id is None: - _check_mode(module, False) - module.fail_json(msg=f"monitoring policy {monitoring_policy} not found.") - - firewall_policy_id = None - if firewall_policy: - firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_policy) - if firewall_policy_id is None: - _check_mode(module, False) - module.fail_json(msg=f"firewall policy {firewall_policy} not found.") - - load_balancer_id = None - if load_balancer: - load_balancer_id = get_load_balancer(oneandone_conn, load_balancer) - if load_balancer_id is None: - _check_mode(module, False) - module.fail_json(msg=f"load balancer {load_balancer} not found.") - - if auto_increment: - hostnames = _auto_increment_hostname(count, hostname) - descriptions = _auto_increment_description(count, description) - else: - hostnames = [hostname] * count - descriptions = [description] * count - - hdd_objs = [] - if hdds: - for hdd in hdds: - hdd_objs.append(oneandone.client.Hdd(size=hdd["size"], is_main=hdd["is_main"])) - - servers = [] - for index, name in enumerate(hostnames): - server = _create_server( - module=module, - oneandone_conn=oneandone_conn, - hostname=name, - description=descriptions[index], - fixed_instance_size_id=fixed_instance_size_id, - vcore=vcore, - cores_per_processor=cores_per_processor, - ram=ram, - hdds=hdd_objs, - datacenter_id=datacenter_id, - appliance_id=appliance_id, - ssh_key=ssh_key, - private_network_id=private_network_id, - monitoring_policy_id=monitoring_policy_id, - firewall_policy_id=firewall_policy_id, - load_balancer_id=load_balancer_id, - server_type=server_type, - wait=wait, - wait_timeout=wait_timeout, - wait_interval=wait_interval, - ) - if server: - servers.append(server) - - changed = False - - if servers: - for server in servers: - if server: - _check_mode(module, True) - _check_mode(module, False) - servers = [_insert_network_data(_server) for _server in servers] - changed = True - - _check_mode(module, False) - - return (changed, servers) - - -def remove_server(module, oneandone_conn): - """ - Removes a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary containing a 'changed' attribute indicating whether - the server was removed, and a 'removed_server' attribute with - the removed server's hostname and id. - """ - server_id = module.params.get("server") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - changed = False - removed_server = None - - server = get_server(oneandone_conn, server_id, True) - if server: - _check_mode(module, True) - try: - oneandone_conn.delete_server(server_id=server["id"]) - if wait: - wait_for_resource_deletion_completion( - oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval - ) - changed = True - except Exception as ex: - module.fail_json(msg=f"failed to terminate the server: {ex}") - - removed_server = {"id": server["id"], "hostname": server["name"]} - _check_mode(module, False) - - return (changed, removed_server) - - -def startstop_server(module, oneandone_conn): - """ - Starts or Stops a server. - - module : AnsibleModule object - oneandone_conn: authenticated oneandone object. - - Returns a dictionary with a 'changed' attribute indicating whether - anything has changed for the server as a result of this function - being run, and a 'server' attribute with basic information for - the server. - """ - state = module.params.get("state") - server_id = module.params.get("server") - wait = module.params.get("wait") - wait_timeout = module.params.get("wait_timeout") - wait_interval = module.params.get("wait_interval") - - changed = False - - # Resolve server - server = get_server(oneandone_conn, server_id, True) - if server: - # Attempt to change the server state, only if it is not already there - # or on its way. - try: - if state == "stopped" and server["status"]["state"] == "POWERED_ON": - _check_mode(module, True) - oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_OFF", method="SOFTWARE") - elif state == "running" and server["status"]["state"] == "POWERED_OFF": - _check_mode(module, True) - oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_ON", method="SOFTWARE") - except Exception as ex: - module.fail_json(msg=f"failed to set server {server_id} to state {state}: {ex}") - - _check_mode(module, False) - - # Make sure the server has reached the desired state - if wait: - operation_completed = False - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(wait_interval) - server = oneandone_conn.get_server(server["id"]) # refresh - server_state = server["status"]["state"] - if state == "stopped" and server_state == "POWERED_OFF": - operation_completed = True - break - if state == "running" and server_state == "POWERED_ON": - operation_completed = True - break - if not operation_completed: - module.fail_json(msg=f"Timeout waiting for server {server_id} to get to state {state}") - - changed = True - server = _insert_network_data(server) - - _check_mode(module, False) - - return (changed, server) - - -def _auto_increment_hostname(count, hostname): - """ - Allow a custom incremental count in the hostname when defined with the - string formatting (%) operator. Otherwise, increment using name-01, - name-02, name-03, and so forth. - """ - if "%" not in hostname: - hostname = f"{hostname}-%01d" - - return [hostname % i for i in range(1, count + 1)] - - -def _auto_increment_description(count, description): - """ - Allow the incremental count in the description when defined with the - string formatting (%) operator. Otherwise, repeat the same description. - """ - if "%" in description: - return [description % i for i in range(1, count + 1)] - else: - return [description] * count - - -def main(): - module = AnsibleModule( - argument_spec=dict( - auth_token=dict(type="str", default=os.environ.get("ONEANDONE_AUTH_TOKEN"), no_log=True), - api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")), - hostname=dict(type="str"), - description=dict(type="str"), - appliance=dict(type="str"), - fixed_instance_size=dict(type="str"), - vcore=dict(type="int"), - cores_per_processor=dict(type="int"), - ram=dict(type="float"), - hdds=dict(type="list", elements="dict"), - count=dict(type="int", default=1), - ssh_key=dict(type="raw", no_log=False), - auto_increment=dict(type="bool", default=True), - server=dict(type="str"), - datacenter=dict(choices=DATACENTERS, default="US"), - private_network=dict(type="str"), - firewall_policy=dict(type="str"), - load_balancer=dict(type="str"), - monitoring_policy=dict(type="str"), - server_type=dict(type="str", default="cloud", choices=["cloud", "baremetal", "k8s_node"]), - wait=dict(type="bool", default=True), - wait_timeout=dict(type="int", default=600), - wait_interval=dict(type="int", default=5), - state=dict(type="str", default="present", choices=["present", "absent", "running", "stopped"]), - ), - supports_check_mode=True, - mutually_exclusive=( - ["fixed_instance_size", "vcore"], - ["fixed_instance_size", "cores_per_processor"], - ["fixed_instance_size", "ram"], - ["fixed_instance_size", "hdds"], - ), - required_together=(["vcore", "cores_per_processor", "ram", "hdds"],), - ) - - if not HAS_ONEANDONE_SDK: - module.fail_json(msg="1and1 required for this module") - - if not module.params.get("auth_token"): - module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.') - - if not module.params.get("api_url"): - oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token")) - else: - oneandone_conn = oneandone.client.OneAndOneService( - api_token=module.params.get("auth_token"), api_url=module.params.get("api_url") - ) - - state = module.params.get("state") - - if state == "absent": - if not module.params.get("server"): - module.fail_json(msg="'server' parameter is required for deleting a server.") - try: - (changed, servers) = remove_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state in ("running", "stopped"): - if not module.params.get("server"): - module.fail_json(msg="'server' parameter is required for starting/stopping a server.") - try: - (changed, servers) = startstop_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - elif state == "present": - for param in ("hostname", "appliance", "datacenter"): - if not module.params.get(param): - module.fail_json(msg=f"{param} parameter is required for new server.") - try: - (changed, servers) = create_server(module, oneandone_conn) - except Exception as ex: - module.fail_json(msg=str(ex)) - - module.exit_json(changed=changed, servers=servers) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py deleted file mode 100644 index e8ca14958d..0000000000 --- a/plugins/modules/pushbullet.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/python -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -author: "Willy Barro (@willybarro)" -requirements: [pushbullet.py] -module: pushbullet -short_description: Sends notifications to Pushbullet -description: - - This module sends push notifications through Pushbullet to channels or devices. -deprecated: - removed_in: 13.0.0 - why: Module relies on Python package pushbullet.py which is not maintained and supports only up to Python 3.2. - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - api_key: - type: str - description: - - Push bullet API token. - required: true - channel: - type: str - description: - - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet - page. - device: - type: str - description: - - The device NAME you wish to send a push notification, as seen on the Pushbullet main page. - push_type: - type: str - description: - - Thing you wish to push. - default: note - choices: ["note", "link"] - title: - type: str - description: - - Title of the notification. - required: true - body: - type: str - description: - - Body of the notification, for example details of the fault you are alerting. - url: - type: str - description: - - URL field, used when O(push_type=link). -notes: - - Requires C(pushbullet.py) Python package on the remote host. You can install it through C(pip) with C(pip install pushbullet.py). - - See U(https://github.com/randomchars/pushbullet.py). -""" - -EXAMPLES = r""" -- name: Sends a push notification to a device - community.general.pushbullet: - api_key: "ABC123abc123ABC123abc123ABC123ab" - device: "Chrome" - title: "You may see this on Google Chrome" - -- name: Sends a link to a device - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - device: Chrome - push_type: link - title: Ansible Documentation - body: https://docs.ansible.com/ - -- name: Sends a push notification to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: "Broadcasting a message to the #my-awesome-channel folks" - -- name: Sends a push notification with title and body to a channel - community.general.pushbullet: - api_key: ABC123abc123ABC123abc123ABC123ab - channel: my-awesome-channel - title: ALERT! Signup service is down - body: Error rate on signup service is over 90% for more than 2 minutes -""" - -import traceback - -PUSHBULLET_IMP_ERR = None -try: - from pushbullet import PushBullet - from pushbullet.errors import InvalidKeyError, PushError -except ImportError: - PUSHBULLET_IMP_ERR = traceback.format_exc() - pushbullet_found = False -else: - pushbullet_found = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -# =========================================== -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(type="str", required=True, no_log=True), - channel=dict(type="str"), - device=dict(type="str"), - push_type=dict(type="str", default="note", choices=["note", "link"]), - title=dict(type="str", required=True), - body=dict(type="str"), - url=dict(type="str"), - ), - mutually_exclusive=(["channel", "device"],), - supports_check_mode=True, - ) - - api_key = module.params["api_key"] - channel = module.params["channel"] - device = module.params["device"] - push_type = module.params["push_type"] - title = module.params["title"] - body = module.params["body"] - url = module.params["url"] - - if not pushbullet_found: - module.fail_json(msg=missing_required_lib("pushbullet.py"), exception=PUSHBULLET_IMP_ERR) - - # Init pushbullet - try: - pb = PushBullet(api_key) - target = None - except InvalidKeyError: - module.fail_json(msg="Invalid api_key") - - # Checks for channel/device - if device is None and channel is None: - module.fail_json(msg="You need to provide a channel or a device.") - - # Search for given device - if device is not None: - devices_by_nickname = {} - for d in pb.devices: - devices_by_nickname[d.nickname] = d - - if device in devices_by_nickname: - target = devices_by_nickname[device] - else: - str_devices_by_nickname = "', '".join(devices_by_nickname) - module.fail_json(msg=f"Device '{device}' not found. Available devices: '{str_devices_by_nickname}'") - - # Search for given channel - if channel is not None: - channels_by_tag = {} - for c in pb.channels: - channels_by_tag[c.channel_tag] = c - - if channel in channels_by_tag: - target = channels_by_tag[channel] - else: - str_channels_by_tag = "', '".join(channels_by_tag) - module.fail_json(msg=f"Channel '{channel}' not found. Available channels: '{str_channels_by_tag}'") - - # If in check mode, exit saying that we succeeded - if module.check_mode: - module.exit_json(changed=False, msg="OK") - - # Send push notification - try: - if push_type == "link": - target.push_link(title, url, body) - else: - target.push_note(title, body) - module.exit_json(changed=False, msg="OK") - except PushError as e: - module.fail_json(msg=f"An error occurred, Pushbullet's response: {e}") - - module.fail_json(msg="An unknown error has occurred") - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py index eda70c202b..4652dde888 100644 --- a/plugins/modules/rocketchat.py +++ b/plugins/modules/rocketchat.py @@ -101,9 +101,10 @@ options: description: - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its inception, but is no longer supported by Rocket.Chat 7.4.0. - - The default value of the option, V(true), is B(deprecated) since community.general 11.2.0 and will change to V(false) in community.general 13.0.0. + - The default value changed from V(true) to V(false) in community.general 13.0.0. - This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version. type: bool + default: false version_added: 10.5.0 """ @@ -229,7 +230,7 @@ def main(): validate_certs=dict(default=True, type="bool"), color=dict(type="str", default="normal", choices=["normal", "good", "warning", "danger"]), attachments=dict(type="list", elements="dict"), - is_pre740=dict(type="bool"), + is_pre740=dict(type="bool", default=False), ) ) @@ -246,15 +247,6 @@ def main(): attachments = module.params["attachments"] is_pre740 = module.params["is_pre740"] - if is_pre740 is None: - module.deprecate( - "The default value 'true' for 'is_pre740' is deprecated and will change to 'false' in community.general 13.0.0." - " You can explicitly set 'is_pre740' in your task to avoid this deprecation warning", - version="13.0.0", - collection_name="community.general", - ) - is_pre740 = True - payload = build_payload_for_rocketchat( module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740 ) diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py deleted file mode 100644 index f6f03aeaad..0000000000 --- a/plugins/modules/sensu_check.py +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: sensu_check -short_description: Manage Sensu checks -description: - - Manage the checks that should be run on a machine by I(Sensu). - - Most options do not have a default and are not added to the check definition unless specified. - - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified - for your convenience. -deprecated: - removed_in: 13.0.0 - why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - name: - type: str - description: - - The name of the check. - - This is the key that is used to determine whether a check exists. - required: true - state: - type: str - description: - - Whether the check should be present or not. - choices: ['present', 'absent'] - default: present - path: - type: str - description: - - Path to the JSON file of the check to be added/removed. - - It is created if it does not exist (unless O(state=absent)). - - The parent folders need to exist when O(state=present), otherwise an error is thrown. - default: /etc/sensu/conf.d/checks.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow - clobbered it incorrectly. - type: bool - default: false - command: - type: str - description: - - Path to the sensu check to run (not required when O(state=absent)). - handlers: - type: list - elements: str - description: - - List of handlers to notify when the check fails. - subscribers: - type: list - elements: str - description: - - List of subscribers/channels this check should run for. - - See sensu_subscribers to subscribe a machine to a channel. - interval: - type: int - description: - - Check interval in seconds. - timeout: - type: int - description: - - Timeout for the check. - - If not specified, it defaults to 10. - ttl: - type: int - description: - - Time to live in seconds until the check is considered stale. - handle: - description: - - Whether the check should be handled or not. - - Default is V(false). - type: bool - subdue_begin: - type: str - description: - - When to disable handling of check failures. - subdue_end: - type: str - description: - - When to enable handling of check failures. - dependencies: - type: list - elements: str - description: - - Other checks this one depends on. - - If dependencies fail handling of this check is disabled. - metric: - description: - - Whether the check is a metric. - type: bool - default: false - standalone: - description: - - Whether the check should be scheduled by the sensu client or server. - - This option obviates the need for specifying the O(subscribers) option. - - Default is V(false). - type: bool - publish: - description: - - Whether the check should be scheduled at all. - - You can still issue it using the sensu API. - - Default is V(false). - type: bool - occurrences: - type: int - description: - - Number of event occurrences before the handler should take action. - - If not specified, defaults to 1. - refresh: - type: int - description: - - Number of seconds handlers should wait before taking second action. - aggregate: - description: - - Classifies the check as an aggregate check, making it available using the aggregate API. - - Default is V(false). - type: bool - low_flap_threshold: - type: int - description: - - The low threshold for flap detection. - high_flap_threshold: - type: int - description: - - The high threshold for flap detection. - custom: - type: dict - description: - - A hash/dictionary of custom parameters for mixing to the configuration. - - You cannot rewrite other module parameters using this. - source: - type: str - description: - - The check source, used to create a JIT Sensu client for an external resource (for example a network switch). -author: "Anders Ingemann (@andsens)" -""" - -EXAMPLES = r""" -# Fetch metrics about the CPU load every 60 seconds, -# the sensu server has a handler called 'relay' which forwards stats to graphite -- name: Get cpu metrics - community.general.sensu_check: - name: cpu_load - command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb - metric: true - handlers: relay - subscribers: common - interval: 60 - -# Check whether nginx is running -- name: Check nginx process - community.general.sensu_check: - name: nginx_running - command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid - handlers: default - subscribers: nginx - interval: 60 - -# Stop monitoring the disk capacity. -# Note that the check will still show up in the sensu dashboard, -# to remove it completely you need to issue a DELETE request to the sensu api. -- name: Check disk - community.general.sensu_check: - name: check_disk_capacity - state: absent -""" - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule - - -def sensu_check(module, path, name, state="present", backup=False): - changed = False - reasons = [] - - stream = None - try: - try: - stream = open(path) - config = json.load(stream) - except OSError as e: - if e.errno == 2: # File not found, non-fatal - if state == "absent": - reasons.append("file did not exist and state is `absent'") - return changed, reasons - config = {} - else: - module.fail_json(msg=f"{e}", exception=traceback.format_exc()) - except ValueError: - msg = f"{path} contains invalid JSON" - module.fail_json(msg=msg) - finally: - if stream: - stream.close() - - if "checks" not in config: - if state == "absent": - reasons.append("`checks' section did not exist and state is `absent'") - return changed, reasons - config["checks"] = {} - changed = True - reasons.append("`checks' section did not exist") - - if state == "absent": - if name in config["checks"]: - del config["checks"][name] - changed = True - reasons.append("check was present and state is `absent'") - - if state == "present": - if name not in config["checks"]: - check = {} - config["checks"][name] = check - changed = True - reasons.append("check was absent and state is `present'") - else: - check = config["checks"][name] - simple_opts = [ - "command", - "handlers", - "subscribers", - "interval", - "timeout", - "ttl", - "handle", - "dependencies", - "standalone", - "publish", - "occurrences", - "refresh", - "aggregate", - "low_flap_threshold", - "high_flap_threshold", - "source", - ] - for opt in simple_opts: - if module.params[opt] is not None: - if opt not in check or check[opt] != module.params[opt]: - check[opt] = module.params[opt] - changed = True - reasons.append(f"`{opt}' did not exist or was different") - else: - if opt in check: - del check[opt] - changed = True - reasons.append(f"`{opt}' was removed") - - if module.params["custom"]: - # Convert to json - custom_params = module.params["custom"] - overwrited_fields = set(custom_params.keys()) & set( - simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"] - ) - if overwrited_fields: - msg = f'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {list(overwrited_fields)}' - module.fail_json(msg=msg) - - for k, v in custom_params.items(): - if k in config["checks"][name]: - if config["checks"][name][k] != v: - changed = True - reasons.append(f"`custom param {k}' was changed") - else: - changed = True - reasons.append(f"`custom param {k}' was added") - check[k] = v - simple_opts += custom_params.keys() - - # Remove obsolete custom params - for opt in set(config["checks"][name].keys()) - set( - simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"] - ): - changed = True - reasons.append(f"`custom param {opt}' was deleted") - del check[opt] - - if module.params["metric"]: - if "type" not in check or check["type"] != "metric": - check["type"] = "metric" - changed = True - reasons.append("`type' was not defined or not `metric'") - if not module.params["metric"] and "type" in check: - del check["type"] - changed = True - reasons.append("`type' was defined") - - if module.params["subdue_begin"] is not None and module.params["subdue_end"] is not None: - subdue = { - "begin": module.params["subdue_begin"], - "end": module.params["subdue_end"], - } - if "subdue" not in check or check["subdue"] != subdue: - check["subdue"] = subdue - changed = True - reasons.append("`subdue' did not exist or was different") - else: - if "subdue" in check: - del check["subdue"] - changed = True - reasons.append("`subdue' was removed") - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - try: - stream = open(path, "w") - stream.write(json.dumps(config, indent=2) + "\n") - except OSError as e: - module.fail_json(msg=f"{e}", exception=traceback.format_exc()) - finally: - if stream: - stream.close() - - return changed, reasons - - -def main(): - arg_spec = { - "name": {"type": "str", "required": True}, - "path": {"type": "str", "default": "/etc/sensu/conf.d/checks.json"}, - "state": {"type": "str", "default": "present", "choices": ["present", "absent"]}, - "backup": {"type": "bool", "default": False}, - "command": {"type": "str"}, - "handlers": {"type": "list", "elements": "str"}, - "subscribers": {"type": "list", "elements": "str"}, - "interval": {"type": "int"}, - "timeout": {"type": "int"}, - "ttl": {"type": "int"}, - "handle": {"type": "bool"}, - "subdue_begin": {"type": "str"}, - "subdue_end": {"type": "str"}, - "dependencies": {"type": "list", "elements": "str"}, - "metric": {"type": "bool", "default": False}, - "standalone": {"type": "bool"}, - "publish": {"type": "bool"}, - "occurrences": {"type": "int"}, - "refresh": {"type": "int"}, - "aggregate": {"type": "bool"}, - "low_flap_threshold": {"type": "int"}, - "high_flap_threshold": {"type": "int"}, - "custom": {"type": "dict"}, - "source": {"type": "str"}, - } - - required_together = [["subdue_begin", "subdue_end"]] - - module = AnsibleModule(argument_spec=arg_spec, required_together=required_together, supports_check_mode=True) - if module.params["state"] != "absent" and module.params["command"] is None: - module.fail_json(msg="missing required arguments: command") - - path = module.params["path"] - name = module.params["name"] - state = module.params["state"] - backup = module.params["backup"] - - changed, reasons = sensu_check(module, path, name, state, backup) - - module.exit_json(path=path, changed=changed, msg="OK", name=name, reasons=reasons) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py deleted file mode 100644 index 110818dcb9..0000000000 --- a/plugins/modules/sensu_client.py +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: sensu_client -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu client configuration -description: - - Manages Sensu client configuration. - - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html). -deprecated: - removed_in: 13.0.0 - why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - type: str - description: - - Whether the client should be present or not. - choices: ['present', 'absent'] - default: present - name: - type: str - description: - - A unique name for the client. The name cannot contain special characters or spaces. - - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu). - address: - type: str - description: - - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. - - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided - by Sensu). - subscriptions: - type: list - elements: str - description: - - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (for example V(webserver)). - - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. - - The subscriptions array items must be strings. - safe_mode: - description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request - and execute the check. - type: bool - default: false - redact: - type: list - elements: str - description: - - Client definition attributes to redact (values) when logging and sending client keepalives. - socket: - type: dict - description: - - The socket definition scope, used to configure the Sensu client socket. - keepalives: - description: - - If Sensu should monitor keepalives for this client. - type: bool - default: true - keepalive: - type: dict - description: - - The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds - and so). - registration: - type: dict - description: - - The registration definition scope, used to configure Sensu registration event handlers. - deregister: - description: - - If a deregistration event should be created upon Sensu client process stop. - - Default is V(false). - type: bool - deregistration: - type: dict - description: - - The deregistration definition scope, used to configure automated Sensu client de-registration. - ec2: - type: dict - description: - - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). - chef: - type: dict - description: - - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). - puppet: - type: dict - description: - - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). - servicenow: - type: dict - description: - - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users - only). -""" - -EXAMPLES = r""" -# Minimum possible configuration -- name: Configure Sensu client - community.general.sensu_client: - subscriptions: - - default - -# With customization -- name: Configure Sensu client - community.general.sensu_client: - name: "{{ ansible_fqdn }}" - address: "{{ ansible_default_ipv4['address'] }}" - subscriptions: - - default - - webserver - redact: - - password - socket: - bind: 127.0.0.1 - port: 3030 - keepalive: - thresholds: - warning: 180 - critical: 300 - handlers: - - email - custom: - - broadcast: irc - occurrences: 3 - register: client - notify: - - Restart sensu-client - -- name: Secure Sensu client configuration file - ansible.builtin.file: - path: "{{ client['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" - -- name: Delete the Sensu client configuration - community.general.sensu_client: - state: "absent" -""" - -RETURN = r""" -config: - description: Effective client configuration, when state is present. - returned: success - type: dict - sample: - { - "name": "client", - "subscriptions": [ - "default" - ] - } -file: - description: Path to the client configuration file. - returned: success - type: str - sample: "/etc/sensu/conf.d/client.json" -""" - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type="str", choices=["present", "absent"], default="present"), - name=dict( - type="str", - ), - address=dict( - type="str", - ), - subscriptions=dict(type="list", elements="str"), - safe_mode=dict(type="bool", default=False), - redact=dict(type="list", elements="str"), - socket=dict(type="dict"), - keepalives=dict(type="bool", default=True), - keepalive=dict(type="dict"), - registration=dict(type="dict"), - deregister=dict(type="bool"), - deregistration=dict(type="dict"), - ec2=dict(type="dict"), - chef=dict(type="dict"), - puppet=dict(type="dict"), - servicenow=dict(type="dict"), - ), - required_if=[["state", "present", ["subscriptions"]]], - ) - - state = module.params["state"] - path = "/etc/sensu/conf.d/client.json" - - if state == "absent": - if os.path.exists(path): - if module.check_mode: - msg = f"{path} would have been deleted" - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = f"{path} deleted successfully" - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = "Exception when trying to delete {path}: {exception}" - module.fail_json(msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it is okay if the file doesn't exist - msg = f"{path} already does not exist" - module.exit_json(msg=msg) - - # Build client configuration from module arguments - config = {"client": {}} - args = [ - "name", - "address", - "subscriptions", - "safe_mode", - "redact", - "socket", - "keepalives", - "keepalive", - "registration", - "deregister", - "deregistration", - "ec2", - "chef", - "puppet", - "servicenow", - ] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config["client"][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path)) - except (OSError, ValueError): - # File either doesn't exist or it is invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json(msg="Client configuration is already up to date", config=config["client"], file=path) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}") - - if module.check_mode: - module.exit_json( - msg="Client configuration would have been updated", changed=True, config=config["client"], file=path - ) - - try: - with open(path, "w") as client: - client.write(json.dumps(config, indent=4)) - module.exit_json(msg="Client configuration updated", changed=True, config=config["client"], file=path) - except OSError as e: - module.fail_json(msg=f"Unable to write file {path}: {e}") - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py deleted file mode 100644 index 8f7a702747..0000000000 --- a/plugins/modules/sensu_handler.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: sensu_handler -author: "David Moreau Simard (@dmsimard)" -short_description: Manages Sensu handler configuration -description: - - Manages Sensu handler configuration. - - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html). -deprecated: - removed_in: 13.0.0 - why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - state: - type: str - description: - - Whether the handler should be present or not. - choices: ['present', 'absent'] - default: present - name: - type: str - description: - - A unique name for the handler. The name cannot contain special characters or spaces. - required: true - type: - type: str - description: - - The handler type. - choices: ['pipe', 'tcp', 'udp', 'transport', 'set'] - filter: - type: str - description: - - The Sensu event filter (name) to use when filtering events for the handler. - filters: - type: list - elements: str - description: - - An array of Sensu event filters (names) to use when filtering events for the handler. - - Each array item must be a string. - severities: - type: list - elements: str - description: - - An array of check result severities the handler handles. - - 'NOTE: event resolution bypasses this filtering.' - - "Example: [ 'warning', 'critical', 'unknown' ]." - mutator: - type: str - description: - - The Sensu event mutator (name) to use to mutate event data for the handler. - timeout: - type: int - description: - - The handler execution duration timeout in seconds (hard stop). - - Only used by pipe and tcp handler types. - default: 10 - handle_silenced: - description: - - If events matching one or more silence entries should be handled. - type: bool - default: false - handle_flapping: - description: - - If events in the flapping state should be handled. - type: bool - default: false - command: - type: str - description: - - The handler command to be executed. - - The event data is passed to the process using STDIN. - - 'NOTE: the O(command) attribute is only required for Pipe handlers (that is, handlers configured with O(type=pipe)).' - socket: - type: dict - description: - - The socket definition scope, used to configure the TCP/UDP handler socket. - - 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp) - or O(type=udp)).' - pipe: - type: dict - description: - - The pipe definition scope, used to configure the Sensu transport pipe. - - 'NOTE: the O(pipe) attribute is only required for Transport handlers (that is, handlers configured with O(type=transport)).' - handlers: - type: list - elements: str - description: - - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the O(handlers) attribute is only required for handler sets (that is, handlers configured with O(type=set)).' -""" - -EXAMPLES = r""" -# Configure a handler that sends event data as STDIN (pipe) -- name: Configure IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - type: "pipe" - command: "/usr/local/bin/notify-irc.sh" - severities: - - "ok" - - "critical" - - "warning" - - "unknown" - timeout: 15 - notify: - - Restart sensu-client - - Restart sensu-server - -# Delete a handler -- name: Delete IRC Sensu handler - community.general.sensu_handler: - name: "irc_handler" - state: "absent" - -# Example of a TCP handler -- name: Configure TCP Sensu handler - community.general.sensu_handler: - name: "tcp_handler" - type: "tcp" - timeout: 30 - socket: - host: "10.0.1.99" - port: 4444 - register: handler - notify: - - Restart sensu-client - - Restart sensu-server - -- name: Secure Sensu handler configuration file - ansible.builtin.file: - path: "{{ handler['file'] }}" - owner: "sensu" - group: "sensu" - mode: "0600" -""" - -RETURN = r""" -config: - description: Effective handler configuration, when state is present. - returned: success - type: dict - sample: - { - "name": "irc", - "type": "pipe", - "command": "/usr/local/bin/notify-irc.sh" - } -file: - description: Path to the handler configuration file. - returned: success - type: str - sample: "/etc/sensu/conf.d/handlers/irc.json" -name: - description: Name of the handler. - returned: success - type: str - sample: "irc" -""" - -import json -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - supports_check_mode=True, - argument_spec=dict( - state=dict(type="str", choices=["present", "absent"], default="present"), - name=dict(type="str", required=True), - type=dict(type="str", choices=["pipe", "tcp", "udp", "transport", "set"]), - filter=dict(type="str"), - filters=dict(type="list", elements="str"), - severities=dict(type="list", elements="str"), - mutator=dict(type="str"), - timeout=dict(type="int", default=10), - handle_silenced=dict(type="bool", default=False), - handle_flapping=dict(type="bool", default=False), - command=dict(type="str"), - socket=dict(type="dict"), - pipe=dict(type="dict"), - handlers=dict(type="list", elements="str"), - ), - required_if=[ - ["state", "present", ["type"]], - ["type", "pipe", ["command"]], - ["type", "tcp", ["socket"]], - ["type", "udp", ["socket"]], - ["type", "transport", ["pipe"]], - ["type", "set", ["handlers"]], - ], - ) - - state = module.params["state"] - name = module.params["name"] - path = f"/etc/sensu/conf.d/handlers/{name}.json" - - if state == "absent": - if os.path.exists(path): - if module.check_mode: - msg = f"{path} would have been deleted" - module.exit_json(msg=msg, changed=True) - else: - try: - os.remove(path) - msg = f"{path} deleted successfully" - module.exit_json(msg=msg, changed=True) - except OSError as e: - msg = "Exception when trying to delete {path}: {exception}" - module.fail_json(msg=msg.format(path=path, exception=str(e))) - else: - # Idempotency: it is okay if the file doesn't exist - msg = f"{path} already does not exist" - module.exit_json(msg=msg) - - # Build handler configuration from module arguments - config = {"handlers": {name: {}}} - args = [ - "type", - "filter", - "filters", - "severities", - "mutator", - "timeout", - "handle_silenced", - "handle_flapping", - "command", - "socket", - "pipe", - "handlers", - ] - - for arg in args: - if arg in module.params and module.params[arg] is not None: - config["handlers"][name][arg] = module.params[arg] - - # Load the current config, if there is one, so we can compare - current_config = None - try: - current_config = json.load(open(path)) - except (OSError, ValueError): - # File either doesn't exist or it is invalid JSON - pass - - if current_config is not None and current_config == config: - # Config is the same, let's not change anything - module.exit_json( - msg="Handler configuration is already up to date", config=config["handlers"][name], file=path, name=name - ) - - # Validate that directory exists before trying to write to it - if not module.check_mode and not os.path.exists(os.path.dirname(path)): - try: - os.makedirs(os.path.dirname(path)) - except OSError as e: - module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}") - - if module.check_mode: - module.exit_json( - msg="Handler configuration would have been updated", - changed=True, - config=config["handlers"][name], - file=path, - name=name, - ) - - try: - with open(path, "w") as handler: - handler.write(json.dumps(config, indent=4)) - module.exit_json( - msg="Handler configuration updated", changed=True, config=config["handlers"][name], file=path, name=name - ) - except OSError as e: - module.fail_json(msg=f"Unable to write file {path}: {e}") - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py deleted file mode 100644 index c5e876f805..0000000000 --- a/plugins/modules/sensu_silence.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2017, Steven Bambling -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: sensu_silence -author: Steven Bambling (@smbambling) -short_description: Manage Sensu silence entries -description: - - Create and clear (delete) a silence entries using the Sensu API for subscriptions and checks. -deprecated: - removed_in: 13.0.0 - why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - check: - type: str - description: - - Specifies the check which the silence entry applies to. - creator: - type: str - description: - - Specifies the entity responsible for this entry. - expire: - type: int - description: - - If specified, the silence entry is automatically cleared after this number of seconds. - expire_on_resolve: - description: - - If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved. - type: bool - reason: - type: str - description: - - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was - created. - state: - type: str - description: - - Specifies to create or clear (delete) a silence entry using the Sensu API. - default: present - choices: ['present', 'absent'] - subscription: - type: str - description: - - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev). - required: true - url: - type: str - description: - - Specifies the URL of the Sensu monitoring host server. - default: http://127.0.01:4567 -""" - -EXAMPLES = r""" -# Silence ALL checks for a given client -- name: Silence server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - creator: "{{ ansible_user_id }}" - reason: Performing maintenance - -# Silence specific check for a client -- name: Silence CPU_Usage check for server1.example.dev - community.general.sensu_silence: - subscription: client:server1.example.dev - check: CPU_Usage - creator: "{{ ansible_user_id }}" - reason: Investigation alert issue - -# Silence multiple clients from a dict - silence: - server1.example.dev: - reason: 'Deployment in progress' - server2.example.dev: - reason: 'Deployment in progress' - -- name: Silence several clients from a dict - community.general.sensu_silence: - subscription: "client:{{ item.key }}" - reason: "{{ item.value.reason }}" - creator: "{{ ansible_user_id }}" - with_dict: "{{ silence }}" -""" - -RETURN = r""" -""" - -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def query(module, url, check, subscription): - headers = { - "Content-Type": "application/json", - } - - url = url + "/silenced" - - request_data = { - "check": check, - "subscription": subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url(module, url, method="GET", headers=headers, data=json.dumps(request_data)) - - if info["status"] == 500: - module.fail_json(msg=f"Failed to query silence {subscription}. Reason: {info}") - - try: - json_out = json.loads(response.read()) - except Exception: - json_out = "" - - return False, json_out, False - - -def clear(module, url, check, subscription): - # Test if silence exists before clearing - (rc, out, changed) = query(module, url, check, subscription) - - d = {i["subscription"]: i["check"] for i in out} - subscription_exists = subscription in d - if check and subscription_exists: - exists = check == d[subscription] - else: - exists = subscription_exists - - # If check/subscription doesn't exist - # exit with changed state of False - if not exists: - return False, out, changed - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - "Content-Type": "application/json", - } - - url = url + "/silenced/clear" - - request_data = { - "check": check, - "subscription": subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data)) - - if info["status"] != 204: - module.fail_json(msg=f"Failed to silence {subscription}. Reason: {info}") - - try: - json_out = json.loads(response.read()) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def create(module, url, check, creator, expire, expire_on_resolve, reason, subscription): - (rc, out, changed) = query(module, url, check, subscription) - for i in out: - if i["subscription"] == subscription: - if ( - (check is None or check == i["check"]) - and (creator == "" or creator == i["creator"]) - and (reason == "" or reason == i["reason"]) - and (expire is None or expire == i["expire"]) - and (expire_on_resolve is None or expire_on_resolve == i["expire_on_resolve"]) - ): - return False, out, False - - # module.check_mode is inherited from the AnsibleMOdule class - if not module.check_mode: - headers = { - "Content-Type": "application/json", - } - - url = url + "/silenced" - - request_data = { - "check": check, - "creator": creator, - "expire": expire, - "expire_on_resolve": expire_on_resolve, - "reason": reason, - "subscription": subscription, - } - - # Remove keys with None value - for k, v in dict(request_data).items(): - if v is None: - del request_data[k] - - response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data)) - - if info["status"] != 201: - module.fail_json(msg=f"Failed to silence {subscription}. Reason: {info['msg']}") - - try: - json_out = json.loads(response.read()) - except Exception: - json_out = "" - - return False, json_out, True - return False, out, True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - check=dict(), - creator=dict(), - expire=dict(type="int"), - expire_on_resolve=dict(type="bool"), - reason=dict(), - state=dict(default="present", choices=["present", "absent"]), - subscription=dict(required=True), - url=dict(default="http://127.0.01:4567"), - ), - supports_check_mode=True, - ) - - url = module.params["url"] - check = module.params["check"] - creator = module.params["creator"] - expire = module.params["expire"] - expire_on_resolve = module.params["expire_on_resolve"] - reason = module.params["reason"] - subscription = module.params["subscription"] - state = module.params["state"] - - if state == "present": - (rc, out, changed) = create(module, url, check, creator, expire, expire_on_resolve, reason, subscription) - - if state == "absent": - (rc, out, changed) = clear(module, url, check, subscription) - - if rc != 0: - module.fail_json(msg="failed", result=out) - module.exit_json(msg="success", result=out, changed=changed) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/sensu_subscription.py b/plugins/modules/sensu_subscription.py deleted file mode 100644 index 1d79001fc5..0000000000 --- a/plugins/modules/sensu_subscription.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: sensu_subscription -short_description: Manage Sensu subscriptions -description: - - Manage which I(sensu channels) a machine should subscribe to. -deprecated: - removed_in: 13.0.0 - why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. - alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: full - diff_mode: - support: none -options: - name: - type: str - description: - - The name of the channel. - required: true - state: - type: str - description: - - Whether the machine should subscribe or unsubscribe from the channel. - choices: ['present', 'absent'] - default: present - path: - type: str - description: - - Path to the subscriptions JSON file. - default: /etc/sensu/conf.d/subscriptions.json - backup: - description: - - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow - clobbered it incorrectly. - type: bool - default: false -requirements: [] -author: Anders Ingemann (@andsens) -""" - -RETURN = r""" -reasons: - description: The reasons why the module changed or did not change something. - returned: success - type: list - sample: ["channel subscription was absent and state is 'present'"] -""" - -EXAMPLES = r""" -# Subscribe to the nginx channel -- name: Subscribe to nginx checks - community.general.sensu_subscription: name=nginx - -# Unsubscribe from the common checks channel -- name: Unsubscribe from common checks - community.general.sensu_subscription: name=common state=absent -""" - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule - - -def sensu_subscription(module, path, name, state="present", backup=False): - changed = False - reasons = [] - - try: - config = json.load(open(path)) - except OSError as e: - if e.errno == 2: # File not found, non-fatal - if state == "absent": - reasons.append("file did not exist and state is 'absent'") - return changed, reasons - config = {} - else: - module.fail_json(msg=f"{e}", exception=traceback.format_exc()) - except ValueError: - msg = f"{path} contains invalid JSON" - module.fail_json(msg=msg) - - if "client" not in config: - if state == "absent": - reasons.append("'client' did not exist and state is 'absent'") - return changed, reasons - config["client"] = {} - changed = True - reasons.append("'client' did not exist") - - if "subscriptions" not in config["client"]: - if state == "absent": - reasons.append("'client.subscriptions' did not exist and state is 'absent'") - return changed, reasons - config["client"]["subscriptions"] = [] - changed = True - reasons.append("'client.subscriptions' did not exist") - - if name not in config["client"]["subscriptions"]: - if state == "absent": - reasons.append("channel subscription was absent") - return changed, reasons - config["client"]["subscriptions"].append(name) - changed = True - reasons.append("channel subscription was absent and state is 'present'") - else: - if state == "absent": - config["client"]["subscriptions"].remove(name) - changed = True - reasons.append("channel subscription was present and state is 'absent'") - - if changed and not module.check_mode: - if backup: - module.backup_local(path) - try: - open(path, "w").write(json.dumps(config, indent=2) + "\n") - except OSError as e: - module.fail_json(msg=f"Failed to write to file {path}: {e}", exception=traceback.format_exc()) - - return changed, reasons - - -def main(): - arg_spec = { - "name": {"type": "str", "required": True}, - "path": {"type": "str", "default": "/etc/sensu/conf.d/subscriptions.json"}, - "state": {"type": "str", "default": "present", "choices": ["present", "absent"]}, - "backup": {"type": "bool", "default": False}, - } - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - path = module.params["path"] - name = module.params["name"] - state = module.params["state"] - backup = module.params["backup"] - - changed, reasons = sensu_subscription(module, path, name, state, backup) - - module.exit_json(path=path, name=name, changed=changed, msg="OK", reasons=reasons) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py deleted file mode 100644 index 410a9d7430..0000000000 --- a/plugins/modules/spotinst_aws_elastigroup.py +++ /dev/null @@ -1,1473 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import annotations - -DOCUMENTATION = r""" -module: spotinst_aws_elastigroup -short_description: Create, update or delete Spotinst AWS Elastigroups -author: Spotinst (@talzur) -description: - - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. You must have a credentials file in this - location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token = ). - - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). -deprecated: - removed_in: 13.0.0 - why: Module relies on unsupported Python package. - alternative: Use the module M(spot.cloud_modules.aws_elastigroup) instead. -requirements: - - spotinst_sdk >= 1.0.38 -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - - credentials_path: - description: - - Optional parameter that allows to set a non-default credentials path. - default: ~/.spotinst/credentials - type: path - - account_id: - description: - - Optional parameter that allows to set an account-id inside the module configuration. - - By default this is retrieved from the credentials path. - type: str - - token: - description: - - A Personal API Access Token issued by Spotinst. - - When not specified, the module tries to obtain it, in that order, from environment variable E(SPOTINST_TOKEN), or - from the credentials path. - type: str - - availability_vs_cost: - description: - - The strategy orientation. - - 'The choices available are: V(availabilityOriented), V(costOriented), V(balanced).' - required: true - type: str - - availability_zones: - description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; - keys allowed are name (String), subnet_id (String), placement_group_name (String),. - required: true - type: list - elements: dict - - block_device_mappings: - description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and - EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), - no_device (String), ebs (Object, expects the following keys- delete_on_termination(Boolean), encrypted(Boolean), iops - (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). - type: list - elements: dict - - chef: - description: - - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user - (String), pem_key (String), chef_version (String). - type: dict - - draining_timeout: - description: - - Time for instance to be drained from incoming requests and deregistered from ELB before termination. - type: int - - ebs_optimized: - description: - - Enable EBS optimization for supported instances which are not enabled by default. Note - additional charges are applied. - type: bool - - ebs_volume_pool: - description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; - keys allowed are - volume_ids (List of Strings), device_name (String). - type: list - elements: dict - - ecs: - description: - - The ECS integration configuration.; Expects the following key - cluster_name (String). - type: dict - - elastic_ips: - description: - - List of ElasticIps Allocation IDs (example V(eipalloc-9d4e16f8)) to associate to the group instances. - type: list - elements: str - - fallback_to_od: - description: - - In case of no spots available, Elastigroup launches an On-demand instance instead. - type: bool - - health_check_grace_period: - description: - - The amount of time, in seconds, after the instance has launched to start and check its health. - - If not specified, it defaults to V(300). - type: int - - health_check_unhealthy_duration_before_replacement: - description: - - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. - type: int - - health_check_type: - description: - - The service to use for the health check. - - 'The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2).' - type: str - - iam_role_name: - description: - - The instance profile iamRole name. - - Only use O(iam_role_arn) or O(iam_role_name). - type: str - - iam_role_arn: - description: - - The instance profile iamRole arn. - - Only use O(iam_role_arn) or O(iam_role_name). - type: str - - id: - description: - - The group ID if it already exists and you want to update, or delete it. This does not work unless the O(uniqueness_by) - field is set to ID. When this is set, and the O(uniqueness_by) field is set, the group is either updated or deleted, - but not created. - type: str - - image_id: - description: - - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error is be - returned. - required: true - type: str - - key_pair: - description: - - Specify a Key Pair to attach to the instances. - type: str - - kubernetes: - description: - - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String). - type: dict - - lifetime_period: - description: - - Lifetime period. - type: int - - load_balancers: - description: - - List of classic ELB names. - type: list - elements: str - - max_size: - description: - - The upper limit number of instances that you can scale up to. - required: true - type: int - - mesosphere: - description: - - The Mesosphere integration configuration. Expects the following key - api_server (String). - type: dict - - min_size: - description: - - The lower limit number of instances that you can scale down to. - required: true - type: int - - monitoring: - description: - - Describes whether instance Enhanced Monitoring is enabled. - type: str - - name: - description: - - Unique name for elastigroup to be created, updated or deleted. - required: true - type: str - - network_interfaces: - description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys - allowed are - description (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address - (Boolean), delete_on_termination (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address - (String), subnet_id (String), associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress - (String, required) and primary (Boolean)). - type: list - elements: dict - - on_demand_count: - description: - - Required if risk is not set. - - Number of on demand instances to launch. All other instances are spot instances.; Either set this parameter or the - O(risk) parameter. - type: int - - on_demand_instance_type: - description: - - On-demand instance type that is provisioned. - type: str - - opsworks: - description: - - The elastigroup OpsWorks integration configuration.; Expects the following key - layer_id (String). - type: dict - - persistence: - description: - - The Stateful elastigroup configuration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices - (Boolean), should_persist_private_ip (Boolean). - type: dict - - product: - description: - - Operation system type. - - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon - VPC)).' - required: true - type: str - - rancher: - description: - - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key - (String), master_host (String). - type: dict - - right_scale: - description: - - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String). - type: dict - - risk: - description: - - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). - type: int - - roll_config: - description: - - Roll configuration. - - If you would like the group to roll after updating, please use this feature. - - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, - Optional). - type: dict - - scheduled_tasks: - description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). - - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity - (Integer), adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), - grace_period (Integer), task_type (String, required), is_enabled (Boolean).' - type: list - elements: dict - - security_group_ids: - description: - - One or more security group IDs. - - In case of update it overrides the existing Security Group with the new given array. - required: true - type: list - elements: str - - shutdown_script: - description: - - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. - type: str - - signals: - description: - - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), - timeout (Integer). - type: list - elements: dict - - spin_up_time: - description: - - Spin up time, in seconds, for the instance. - type: int - - spot_instance_types: - description: - - Spot instance type that is provisioned. - required: true - type: list - elements: str - - state: - choices: - - present - - absent - description: - - Create or delete the elastigroup. - default: present - type: str - - tags: - description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value). - type: list - elements: dict - - target: - description: - - The number of instances to launch. - required: true - type: int - - target_group_arns: - description: - - List of target group arns instances should be registered to. - type: list - elements: str - - tenancy: - description: - - Dedicated or shared tenancy. - - 'The available choices are: V(default), V(dedicated).' - type: str - - terminate_at_end_of_billing_hour: - description: - - Terminate at the end of billing hour. - type: bool - - unit: - description: - - The capacity unit to launch instances by. - - 'The available choices are: V(instance), V(weight).' - type: str - - up_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; - keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions - (List of Objects, Keys allowed are name (String, required) and value (String)), statistic (String, required) evaluation_periods - (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, - required), operator (String, required), action_type (String, required), adjustment (String), min_target_capacity (String), - target (String), maximum (String), minimum (String). - type: list - elements: dict - - down_scaling_policies: - description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; - keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions - ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), evaluation_periods - (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, - required), operator (String, required), action_type (String, required), adjustment (String), max_target_capacity (String), - target (String), maximum (String), minimum (String). - type: list - elements: dict - - target_tracking_policies: - description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; - keys allowed are - policy_name (String, required), namespace (String, required), source (String, required), metric_name - (String, required), statistic (String, required), unit (String, required), cooldown (String, required), target (String, - required). - type: list - elements: dict - - uniqueness_by: - choices: - - id - - name - description: - - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property - is set, you must set a group_id in order to update or delete a group, otherwise a group is created. - default: name - type: str - - user_data: - description: - - Base64-encoded MIME user data. Encode before setting the value. - type: str - - utilize_reserved_instances: - description: - - In case of any available Reserved Instances, Elastigroup utilizes your reservations before purchasing Spot instances. - type: bool - - wait_for_instances: - description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin. - type: bool - default: false - - wait_timeout: - description: - - How long the module should wait for instances before failing the action. - - Only works if O(wait_for_instances=true). - type: int - - do_not_update: - description: - - TODO document. - type: list - elements: str - default: [] - - multai_token: - description: - - Token used for Multai configuration. - type: str - - multai_load_balancers: - description: - - Configuration parameters for Multai load balancers. - type: list - elements: dict - - elastic_beanstalk: - description: - - Placeholder parameter for future implementation of Elastic Beanstalk configurations. - type: dict -""" -EXAMPLES = r""" -# Basic configuration YAML example - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id -# In organizations with more than one account, it is required to specify an account_id - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: true - wait_timeout: 600 - register: result - - - name: Store private ips to file - ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips - with_items: "{{ result.instances }}" - - ansible.builtin.debug: var=result - -# In this example we have set up block device mapping with ephemeral devices - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - -# In this example we create a basic group configuration with a network interface defined. -# Each network interface must have a device index - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: true - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target - register: result - - ansible.builtin.debug: var=result - - -# In this example we create a basic group configuration with a target tracking scaling policy defined - -- hosts: localhost - tasks: - - name: Create elastigroup - community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: true - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id - register: result - - ansible.builtin.debug: var=result -""" - -RETURN = r""" -instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: - - "spotInstanceRequestId": "sir-regs25zp" - "instanceId": "i-09640ad8678234c" - "instanceType": "m4.large" - "product": "Linux/UNIX" - "availabilityZone": "us-west-2b" - "privateIp": "180.0.2.244" - "createdAt": "2017-07-17T12:46:18.000Z" - "status": "fulfilled" -group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" -""" - -HAS_SPOTINST_SDK = False - -import os -import time - -from ansible.module_utils.basic import AnsibleModule - -try: - import spotinst_sdk as spotinst - from spotinst_sdk import SpotinstClientException - - HAS_SPOTINST_SDK = True - -except ImportError: - pass - -eni_fields = ( - "description", - "device_index", - "secondary_private_ip_address_count", - "associate_public_ip_address", - "delete_on_termination", - "groups", - "network_interface_id", - "private_ip_address", - "subnet_id", - "associate_ipv6_address", -) - -private_ip_fields = ("private_ip_address", "primary") - -capacity_fields = ( - dict(ansible_field_name="min_size", spotinst_field_name="minimum"), - dict(ansible_field_name="max_size", spotinst_field_name="maximum"), - "target", - "unit", -) - -lspec_fields = ( - "user_data", - "key_pair", - "tenancy", - "shutdown_script", - "monitoring", - "ebs_optimized", - "image_id", - "health_check_type", - "health_check_grace_period", - "health_check_unhealthy_duration_before_replacement", - "security_group_ids", -) - -iam_fields = ( - dict(ansible_field_name="iam_role_name", spotinst_field_name="name"), - dict(ansible_field_name="iam_role_arn", spotinst_field_name="arn"), -) - -scheduled_task_fields = ( - "adjustment", - "adjustment_percentage", - "batch_size_percentage", - "cron_expression", - "frequency", - "grace_period", - "task_type", - "is_enabled", - "scale_target_capacity", - "scale_min_capacity", - "scale_max_capacity", -) - -scaling_policy_fields = ( - "policy_name", - "namespace", - "metric_name", - "dimensions", - "statistic", - "evaluation_periods", - "period", - "threshold", - "cooldown", - "unit", - "operator", -) - -tracking_policy_fields = ( - "policy_name", - "namespace", - "source", - "metric_name", - "statistic", - "unit", - "cooldown", - "target", - "threshold", -) - -action_fields = ( - dict(ansible_field_name="action_type", spotinst_field_name="type"), - "adjustment", - "min_target_capacity", - "max_target_capacity", - "target", - "minimum", - "maximum", -) - -signal_fields = ("name", "timeout") - -multai_lb_fields = ("balancer_id", "project_id", "target_set_id", "az_awareness", "auto_weight") - -persistence_fields = ("should_persist_root_device", "should_persist_block_devices", "should_persist_private_ip") - -strategy_fields = ( - "risk", - "utilize_reserved_instances", - "fallback_to_od", - "on_demand_count", - "availability_vs_cost", - "draining_timeout", - "spin_up_time", - "lifetime_period", -) - -ebs_fields = ("delete_on_termination", "encrypted", "iops", "snapshot_id", "volume_type", "volume_size") - -bdm_fields = ("device_name", "virtual_name", "no_device") - -kubernetes_fields = ("api_server", "token") - -right_scale_fields = ("account_id", "refresh_token") - -rancher_fields = ("access_key", "secret_key", "master_host", "version") - -chef_fields = ("chef_server", "organization", "user", "pem_key", "chef_version") - -az_fields = ("name", "subnet_id", "placement_group_name") - -opsworks_fields = ("layer_id",) - -scaling_strategy_fields = ("terminate_at_end_of_billing_hour",) - -mesosphere_fields = ("api_server",) - -ecs_fields = ("cluster_name",) - -multai_fields = ("multai_token",) - - -def handle_elastigroup(client, module): - has_changed = False - group_id = None - message = "None" - - name = module.params.get("name") - state = module.params.get("state") - uniqueness_by = module.params.get("uniqueness_by") - external_group_id = module.params.get("id") - - if uniqueness_by == "id": - if external_group_id is None: - should_create = True - else: - should_create = False - group_id = external_group_id - else: - groups = client.get_elastigroups() - should_create, group_id = find_group_with_same_name(groups, name) - - if should_create is True: - if state == "present": - eg = expand_elastigroup(module, is_update=False) - module.debug(f" [INFO] {message}\n") - group = client.create_elastigroup(group=eg) - group_id = group["id"] - message = "Created group Successfully." - has_changed = True - - elif state == "absent": - message = "Cannot delete non-existent group." - has_changed = False - else: - eg = expand_elastigroup(module, is_update=True) - - if state == "present": - group = client.update_elastigroup(group_update=eg, group_id=group_id) - message = "Updated group successfully." - - try: - roll_config = module.params.get("roll_config") - if roll_config: - eg_roll = spotinst.aws_elastigroup.Roll( - batch_size_percentage=roll_config.get("batch_size_percentage"), - grace_period=roll_config.get("grace_period"), - health_check_type=roll_config.get("health_check_type"), - ) - client.roll_group(group_roll=eg_roll, group_id=group_id) - message = "Updated and started rolling the group successfully." - - except SpotinstClientException as exc: - message = f"Updated group successfully, but failed to perform roll. Error:{exc}" - has_changed = True - - elif state == "absent": - try: - client.delete_elastigroup(group_id=group_id) - except SpotinstClientException as exc: - if "GROUP_DOESNT_EXIST" in exc.message: - pass - else: - module.fail_json(msg=f"Error while attempting to delete group : {exc.message}") - - message = "Deleted group successfully." - has_changed = True - - return group_id, message, has_changed - - -def retrieve_group_instances(client, module, group_id): - wait_timeout = module.params.get("wait_timeout") - wait_for_instances = module.params.get("wait_for_instances") - - health_check_type = module.params.get("health_check_type") - - if wait_timeout is None: - wait_timeout = 300 - - wait_timeout = time.time() + wait_timeout - target = module.params.get("target") - state = module.params.get("state") - instances = list() - - if state == "present" and group_id is not None and wait_for_instances is True: - is_amount_fulfilled = False - while is_amount_fulfilled is False and wait_timeout > time.time(): - instances = list() - amount_of_fulfilled_instances = 0 - - if health_check_type is not None: - healthy_instances = client.get_instance_healthiness(group_id=group_id) - - for healthy_instance in healthy_instances: - if healthy_instance.get("healthStatus") == "HEALTHY": - amount_of_fulfilled_instances += 1 - instances.append(healthy_instance) - - else: - active_instances = client.get_elastigroup_active_instances(group_id=group_id) - - for active_instance in active_instances: - if active_instance.get("private_ip") is not None: - amount_of_fulfilled_instances += 1 - instances.append(active_instance) - - if amount_of_fulfilled_instances >= target: - is_amount_fulfilled = True - - time.sleep(10) - - return instances - - -def find_group_with_same_name(groups, name): - for group in groups: - if group["name"] == name: - return False, group.get("id") - - return True, None - - -def expand_elastigroup(module, is_update): - do_not_update = module.params["do_not_update"] - name = module.params.get("name") - - eg = spotinst.aws_elastigroup.Elastigroup() - description = module.params.get("description") - - if name is not None: - eg.name = name - if description is not None: - eg.description = description - - # Capacity - expand_capacity(eg, module, is_update, do_not_update) - # Strategy - expand_strategy(eg, module) - # Scaling - expand_scaling(eg, module) - # Third party integrations - expand_integrations(eg, module) - # Compute - expand_compute(eg, module, is_update, do_not_update) - # Multai - expand_multai(eg, module) - # Scheduling - expand_scheduled_tasks(eg, module) - - return eg - - -def expand_compute(eg, module, is_update, do_not_update): - elastic_ips = module.params["elastic_ips"] - on_demand_instance_type = module.params.get("on_demand_instance_type") - spot_instance_types = module.params["spot_instance_types"] - ebs_volume_pool = module.params["ebs_volume_pool"] - availability_zones_list = module.params["availability_zones"] - product = module.params.get("product") - - eg_compute = spotinst.aws_elastigroup.Compute() - - if product is not None: - # Only put product on group creation - if is_update is not True: - eg_compute.product = product - - if elastic_ips is not None: - eg_compute.elastic_ips = elastic_ips - - if on_demand_instance_type or spot_instance_types is not None: - eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() - - if on_demand_instance_type is not None: - eg_instance_types.spot = spot_instance_types - if spot_instance_types is not None: - eg_instance_types.ondemand = on_demand_instance_type - - if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: - eg_compute.instance_types = eg_instance_types - - expand_ebs_volume_pool(eg_compute, ebs_volume_pool) - - eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, "AvailabilityZone") - - expand_launch_spec(eg_compute, module, is_update, do_not_update) - - eg.compute = eg_compute - - -def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): - if ebs_volumes_list is not None: - eg_volumes = [] - - for volume in ebs_volumes_list: - eg_volume = spotinst.aws_elastigroup.EbsVolume() - - if volume.get("device_name") is not None: - eg_volume.device_name = volume.get("device_name") - if volume.get("volume_ids") is not None: - eg_volume.volume_ids = volume.get("volume_ids") - - if eg_volume.device_name is not None: - eg_volumes.append(eg_volume) - - if len(eg_volumes) > 0: - eg_compute.ebs_volume_pool = eg_volumes - - -def expand_launch_spec(eg_compute, module, is_update, do_not_update): - eg_launch_spec = expand_fields(lspec_fields, module.params, "LaunchSpecification") - - if module.params["iam_role_arn"] is not None or module.params["iam_role_name"] is not None: - eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, "IamRole") - - tags = module.params["tags"] - load_balancers = module.params["load_balancers"] - target_group_arns = module.params["target_group_arns"] - block_device_mappings = module.params["block_device_mappings"] - network_interfaces = module.params["network_interfaces"] - - if is_update is True: - if "image_id" in do_not_update: - delattr(eg_launch_spec, "image_id") - - expand_tags(eg_launch_spec, tags) - - expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) - - expand_block_device_mappings(eg_launch_spec, block_device_mappings) - - expand_network_interfaces(eg_launch_spec, network_interfaces) - - eg_compute.launch_specification = eg_launch_spec - - -def expand_integrations(eg, module): - rancher = module.params.get("rancher") - mesosphere = module.params.get("mesosphere") - ecs = module.params.get("ecs") - kubernetes = module.params.get("kubernetes") - right_scale = module.params.get("right_scale") - opsworks = module.params.get("opsworks") - chef = module.params.get("chef") - - integration_exists = False - - eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() - - if mesosphere is not None: - eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, "Mesosphere") - integration_exists = True - - if ecs is not None: - eg_integrations.ecs = expand_fields(ecs_fields, ecs, "EcsConfiguration") - integration_exists = True - - if kubernetes is not None: - eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, "KubernetesConfiguration") - integration_exists = True - - if right_scale is not None: - eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, "RightScaleConfiguration") - integration_exists = True - - if opsworks is not None: - eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, "OpsWorksConfiguration") - integration_exists = True - - if rancher is not None: - eg_integrations.rancher = expand_fields(rancher_fields, rancher, "Rancher") - integration_exists = True - - if chef is not None: - eg_integrations.chef = expand_fields(chef_fields, chef, "ChefConfiguration") - integration_exists = True - - if integration_exists: - eg.third_parties_integration = eg_integrations - - -def expand_capacity(eg, module, is_update, do_not_update): - eg_capacity = expand_fields(capacity_fields, module.params, "Capacity") - - if is_update is True: - delattr(eg_capacity, "unit") - - if "target" in do_not_update: - delattr(eg_capacity, "target") - - eg.capacity = eg_capacity - - -def expand_strategy(eg, module): - persistence = module.params.get("persistence") - signals = module.params.get("signals") - - eg_strategy = expand_fields(strategy_fields, module.params, "Strategy") - - terminate_at_end_of_billing_hour = module.params.get("terminate_at_end_of_billing_hour") - - if terminate_at_end_of_billing_hour is not None: - eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, module.params, "ScalingStrategy") - - if persistence is not None: - eg_strategy.persistence = expand_fields(persistence_fields, persistence, "Persistence") - - if signals is not None: - eg_signals = expand_list(signals, signal_fields, "Signal") - - if len(eg_signals) > 0: - eg_strategy.signals = eg_signals - - eg.strategy = eg_strategy - - -def expand_multai(eg, module): - multai_load_balancers = module.params.get("multai_load_balancers") - - eg_multai = expand_fields(multai_fields, module.params, "Multai") - - if multai_load_balancers is not None: - eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, "MultaiLoadBalancer") - - if len(eg_multai_load_balancers) > 0: - eg_multai.balancers = eg_multai_load_balancers - eg.multai = eg_multai - - -def expand_scheduled_tasks(eg, module): - scheduled_tasks = module.params.get("scheduled_tasks") - - if scheduled_tasks is not None: - eg_scheduling = spotinst.aws_elastigroup.Scheduling() - - eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, "ScheduledTask") - - if len(eg_tasks) > 0: - eg_scheduling.tasks = eg_tasks - eg.scheduling = eg_scheduling - - -def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): - if load_balancers is not None or target_group_arns is not None: - eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() - eg_total_lbs = [] - - if load_balancers is not None: - for elb_name in load_balancers: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if elb_name is not None: - eg_elb.name = elb_name - eg_elb.type = "CLASSIC" - eg_total_lbs.append(eg_elb) - - if target_group_arns is not None: - for target_arn in target_group_arns: - eg_elb = spotinst.aws_elastigroup.LoadBalancer() - if target_arn is not None: - eg_elb.arn = target_arn - eg_elb.type = "TARGET_GROUP" - eg_total_lbs.append(eg_elb) - - if len(eg_total_lbs) > 0: - eg_load_balancers_config.load_balancers = eg_total_lbs - eg_launchspec.load_balancers_config = eg_load_balancers_config - - -def expand_tags(eg_launchspec, tags): - if tags is not None: - eg_tags = [] - - for tag in tags: - eg_tag = spotinst.aws_elastigroup.Tag() - if tag: - eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] - - eg_tags.append(eg_tag) - - if len(eg_tags) > 0: - eg_launchspec.tags = eg_tags - - -def expand_block_device_mappings(eg_launchspec, bdms): - if bdms is not None: - eg_bdms = [] - - for bdm in bdms: - eg_bdm = expand_fields(bdm_fields, bdm, "BlockDeviceMapping") - - if bdm.get("ebs") is not None: - eg_bdm.ebs = expand_fields(ebs_fields, bdm.get("ebs"), "EBS") - - eg_bdms.append(eg_bdm) - - if len(eg_bdms) > 0: - eg_launchspec.block_device_mappings = eg_bdms - - -def expand_network_interfaces(eg_launchspec, enis): - if enis is not None: - eg_enis = [] - - for eni in enis: - eg_eni = expand_fields(eni_fields, eni, "NetworkInterface") - - eg_pias = expand_list(eni.get("private_ip_addresses"), private_ip_fields, "PrivateIpAddress") - - if eg_pias is not None: - eg_eni.private_ip_addresses = eg_pias - - eg_enis.append(eg_eni) - - if len(eg_enis) > 0: - eg_launchspec.network_interfaces = eg_enis - - -def expand_scaling(eg, module): - up_scaling_policies = module.params["up_scaling_policies"] - down_scaling_policies = module.params["down_scaling_policies"] - target_tracking_policies = module.params["target_tracking_policies"] - - eg_scaling = spotinst.aws_elastigroup.Scaling() - - if up_scaling_policies is not None: - eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) - if len(eg_up_scaling_policies) > 0: - eg_scaling.up = eg_up_scaling_policies - - if down_scaling_policies is not None: - eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) - if len(eg_down_scaling_policies) > 0: - eg_scaling.down = eg_down_scaling_policies - - if target_tracking_policies is not None: - eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) - if len(eg_target_tracking_policies) > 0: - eg_scaling.target = eg_target_tracking_policies - - if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: - eg.scaling = eg_scaling - - -def expand_list(items, fields, class_name): - if items is not None: - new_objects_list = [] - for item in items: - new_obj = expand_fields(fields, item, class_name) - new_objects_list.append(new_obj) - - return new_objects_list - - -def expand_fields(fields, item, class_name): - class_ = getattr(spotinst.aws_elastigroup, class_name) - new_obj = class_() - - # Handle primitive fields - if item is not None: - for field in fields: - if isinstance(field, dict): - ansible_field_name = field["ansible_field_name"] - spotinst_field_name = field["spotinst_field_name"] - else: - ansible_field_name = field - spotinst_field_name = field - if item.get(ansible_field_name) is not None: - setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) - - return new_obj - - -def expand_scaling_policies(scaling_policies): - eg_scaling_policies = [] - - for policy in scaling_policies: - eg_policy = expand_fields(scaling_policy_fields, policy, "ScalingPolicy") - eg_policy.action = expand_fields(action_fields, policy, "ScalingPolicyAction") - eg_scaling_policies.append(eg_policy) - - return eg_scaling_policies - - -def expand_target_tracking_policies(tracking_policies): - eg_tracking_policies = [] - - for policy in tracking_policies: - eg_policy = expand_fields(tracking_policy_fields, policy, "TargetTrackingPolicy") - eg_tracking_policies.append(eg_policy) - - return eg_tracking_policies - - -def main(): - fields = dict( - account_id=dict(type="str"), - availability_vs_cost=dict(type="str", required=True), - availability_zones=dict(type="list", elements="dict", required=True), - block_device_mappings=dict(type="list", elements="dict"), - chef=dict(type="dict"), - credentials_path=dict(type="path", default="~/.spotinst/credentials"), - do_not_update=dict(default=[], type="list", elements="str"), - down_scaling_policies=dict(type="list", elements="dict"), - draining_timeout=dict(type="int"), - ebs_optimized=dict(type="bool"), - ebs_volume_pool=dict(type="list", elements="dict"), - ecs=dict(type="dict"), - elastic_beanstalk=dict(type="dict"), - elastic_ips=dict(type="list", elements="str"), - fallback_to_od=dict(type="bool"), - id=dict(type="str"), - health_check_grace_period=dict(type="int"), - health_check_type=dict(type="str"), - health_check_unhealthy_duration_before_replacement=dict(type="int"), - iam_role_arn=dict(type="str"), - iam_role_name=dict(type="str"), - image_id=dict(type="str", required=True), - key_pair=dict(type="str", no_log=False), - kubernetes=dict(type="dict"), - lifetime_period=dict(type="int"), - load_balancers=dict(type="list", elements="str"), - max_size=dict(type="int", required=True), - mesosphere=dict(type="dict"), - min_size=dict(type="int", required=True), - monitoring=dict(type="str"), - multai_load_balancers=dict(type="list", elements="dict"), - multai_token=dict(type="str", no_log=True), - name=dict(type="str", required=True), - network_interfaces=dict(type="list", elements="dict"), - on_demand_count=dict(type="int"), - on_demand_instance_type=dict(type="str"), - opsworks=dict(type="dict"), - persistence=dict(type="dict"), - product=dict(type="str", required=True), - rancher=dict(type="dict"), - right_scale=dict(type="dict"), - risk=dict(type="int"), - roll_config=dict(type="dict"), - scheduled_tasks=dict(type="list", elements="dict"), - security_group_ids=dict(type="list", elements="str", required=True), - shutdown_script=dict(type="str"), - signals=dict(type="list", elements="dict"), - spin_up_time=dict(type="int"), - spot_instance_types=dict(type="list", elements="str", required=True), - state=dict(default="present", choices=["present", "absent"]), - tags=dict(type="list", elements="dict"), - target=dict(type="int", required=True), - target_group_arns=dict(type="list", elements="str"), - tenancy=dict(type="str"), - terminate_at_end_of_billing_hour=dict(type="bool"), - token=dict(type="str", no_log=True), - unit=dict(type="str"), - user_data=dict(type="str"), - utilize_reserved_instances=dict(type="bool"), - uniqueness_by=dict(default="name", choices=["name", "id"]), - up_scaling_policies=dict(type="list", elements="dict"), - target_tracking_policies=dict(type="list", elements="dict"), - wait_for_instances=dict(type="bool", default=False), - wait_timeout=dict(type="int"), - ) - - module = AnsibleModule(argument_spec=fields) - - if not HAS_SPOTINST_SDK: - module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") - - # Retrieve creds file variables - creds_file_loaded_vars = dict() - - credentials_path = module.params.get("credentials_path") - - try: - with open(credentials_path) as creds: - for line in creds: - eq_index = line.find("=") - var_name = line[:eq_index].strip() - string_value = line[eq_index + 1 :].strip() - creds_file_loaded_vars[var_name] = string_value - except OSError: - pass - # End of creds file retrieval - - token = module.params.get("token") - if not token: - token = os.environ.get("SPOTINST_TOKEN") - if not token: - token = creds_file_loaded_vars.get("token") - - account = module.params.get("account_id") - if not account: - account = os.environ.get("SPOTINST_ACCOUNT_ID") or os.environ.get("ACCOUNT") - if not account: - account = creds_file_loaded_vars.get("account") - - client = spotinst.SpotinstClient(auth_token=token, print_output=False) - - if account is not None: - client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) - - group_id, message, has_changed = handle_elastigroup(client=client, module=module) - - instances = retrieve_group_instances(client=client, module=module, group_id=group_id) - - module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py deleted file mode 100644 index baf2f94fde..0000000000 --- a/plugins/modules/typetalk.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/python -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -DOCUMENTATION = r""" -module: typetalk -short_description: Send a message to typetalk -description: - - Send a message to typetalk using typetalk API. -deprecated: - removed_in: 13.0.0 - why: The typetalk service will be discontinued on Dec 2025. See U(https://nulab.com/blog/company-news/typetalk-sunsetting/). - alternative: There is none. -extends_documentation_fragment: - - community.general.attributes -attributes: - check_mode: - support: none - diff_mode: - support: none -options: - client_id: - type: str - description: - - OAuth2 client ID. - required: true - client_secret: - type: str - description: - - OAuth2 client secret. - required: true - topic: - type: int - description: - - Topic ID to post message. - required: true - msg: - type: str - description: - - Message body. - required: true -requirements: [json] -author: "Takashi Someda (@tksmd)" -""" - -EXAMPLES = r""" -- name: Send a message to typetalk - community.general.typetalk: - client_id: 12345 - client_secret: 12345 - topic: 1 - msg: install completed -""" - -import json -from urllib.parse import urlencode - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import ConnectionError, fetch_url - - -def do_request(module, url, params, headers=None): - data = urlencode(params) - if headers is None: - headers = dict() - headers = dict( - headers, - **{ - "User-Agent": "Ansible/typetalk module", - }, - ) - r, info = fetch_url(module, url, data=data, headers=headers) - if info["status"] != 200: - exc = ConnectionError(info["msg"]) - exc.code = info["status"] - raise exc - return r - - -def get_access_token(module, client_id, client_secret): - params = { - "client_id": client_id, - "client_secret": client_secret, - "grant_type": "client_credentials", - "scope": "topic.post", - } - res = do_request(module, "https://typetalk.com/oauth2/access_token", params) - return json.load(res)["access_token"] - - -def send_message(module, client_id, client_secret, topic, msg): - """ - send message to typetalk - """ - try: - access_token = get_access_token(module, client_id, client_secret) - url = f"https://typetalk.com/api/v1/topics/{topic}" - headers = { - "Authorization": f"Bearer {access_token}", - } - do_request(module, url, {"message": msg}, headers) - return True, {"access_token": access_token} - except ConnectionError as e: - return False, e - - -def main(): - module = AnsibleModule( - argument_spec=dict( - client_id=dict(required=True), - client_secret=dict(required=True, no_log=True), - topic=dict(required=True, type="int"), - msg=dict(required=True), - ), - supports_check_mode=False, - ) - - if not json: - module.fail_json(msg="json module is required") - - client_id = module.params["client_id"] - client_secret = module.params["client_secret"] - topic = module.params["topic"] - msg = module.params["msg"] - - res, error = send_message(module, client_id, client_secret, topic, msg) - if not res: - module.fail_json(msg=f"fail to send message with response code {error.code}") - - module.exit_json(changed=True, topic=topic, msg=msg) - - -if __name__ == "__main__": - main() diff --git a/tests/integration/targets/sensu_client/aliases b/tests/integration/targets/sensu_client/aliases deleted file mode 100644 index bca9905ba6..0000000000 --- a/tests/integration/targets/sensu_client/aliases +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -azp/posix/1 -needs/root diff --git a/tests/integration/targets/sensu_client/tasks/main.yml b/tests/integration/targets/sensu_client/tasks/main.yml deleted file mode 100644 index 61e49cda02..0000000000 --- a/tests/integration/targets/sensu_client/tasks/main.yml +++ /dev/null @@ -1,179 +0,0 @@ ---- -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Creating a client if the directory doesn't exist should work - sensu_client: - subscriptions: - - default - -- name: Set variable for client file - set_fact: - client_file: "/etc/sensu/conf.d/client.json" - -- name: Insert invalid JSON in the client file - lineinfile: - state: "present" - create: "yes" - path: "{{ client_file }}" - line: "{'foo' = bar}" - -- name: Configure Sensu client with an existing invalid file - sensu_client: - name: "client" - state: "present" - subscriptions: - - default - register: client - -- name: Retrieve configuration file - slurp: - src: "{{ client_file }}" - register: client_config - -- name: Assert that client data was set successfully and properly - assert: - that: - - "client is successful" - - "client is changed" - - "client['config']['name'] == 'client'" - - "'default' in client['config']['subscriptions']" - - "client['file'] == client_file" - -- name: Assert that the client configuration file is actually configured properly - vars: - config: "{{ client_config.content | b64decode | from_json }}" - assert: - that: - - "config['client']['keepalives'] == true" - - "config['client']['name'] == 'client'" - - "config['client']['safe_mode'] == false" - - "'default' in config['client']['subscriptions']" - -- name: Delete Sensu client configuration - sensu_client: - state: "absent" - register: client_delete - -- name: Delete Sensu client configuration (again) - sensu_client: - state: "absent" - register: client_delete_twice - -- name: Retrieve configuration file stat - stat: - path: "{{ client_file }}" - register: client_stat - -- name: Assert that client deletion was successful - assert: - that: - - "client_delete is successful" - - "client_delete is changed" - - "client_delete_twice is successful" - - "client_delete_twice is not changed" - - "client_stat.stat.exists == false" - -- name: Configuring a client without subscriptions should fail - sensu_client: - name: "failure" - register: failure - ignore_errors: true - -- name: Assert failure to create client - assert: - that: - - failure is failed - - "'the following are missing: subscriptions' in failure['msg']" - -- name: Configure a new client from scratch with custom parameters - sensu_client: - name: "custom" - address: "host.fqdn" - subscriptions: - - "default" - - "webserver" - redact: - - "password" - socket: - bind: "127.0.0.1" - port: "3030" - keepalive: - thresholds: - warning: "180" - critical: "300" - handlers: - - "email" - custom: - - broadcast: "irc" - occurrences: "3" - register: client - -- name: Configure a new client from scratch with custom parameters (twice) - sensu_client: - name: "custom" - address: "host.fqdn" - subscriptions: - - "default" - - "webserver" - redact: - - "password" - socket: - bind: "127.0.0.1" - port: "3030" - keepalive: - thresholds: - warning: "180" - critical: "300" - handlers: - - "email" - custom: - - broadcast: "irc" - occurrences: "3" - register: client_twice - -- name: Retrieve configuration file - slurp: - src: "{{ client_file }}" - register: client_config - -- name: Assert that client data was set successfully and properly - assert: - that: - - "client is successful" - - "client is changed" - - "client_twice is successful" - - "client_twice is not changed" - - "client['config']['name'] == 'custom'" - - "client['config']['address'] == 'host.fqdn'" - - "'default' in client['config']['subscriptions']" - - "'webserver' in client['config']['subscriptions']" - - "'password' in client['config']['redact']" - - "client['config']['keepalive']['thresholds']['warning'] == '180'" - - "client['config']['keepalive']['thresholds']['critical'] == '300'" - - "'email' in client['config']['keepalive']['handlers']" - - "client['config']['keepalive']['occurrences'] == '3'" - - "client['file'] == client_file" - -- name: Assert that the client configuration file is actually configured properly - vars: - config: "{{ client_config.content | b64decode | from_json }}" - assert: - that: - - "config['client']['name'] == 'custom'" - - "config['client']['address'] == 'host.fqdn'" - - "config['client']['keepalives'] == true" - - "config['client']['safe_mode'] == false" - - "'default' in config['client']['subscriptions']" - - "'webserver' in config['client']['subscriptions']" - - "'password' in config['client']['redact']" - - "config['client']['keepalive']['thresholds']['warning'] == '180'" - - "config['client']['keepalive']['thresholds']['critical'] == '300'" - - "'email' in config['client']['keepalive']['handlers']" - - "config['client']['keepalive']['occurrences'] == '3'" diff --git a/tests/integration/targets/sensu_handler/aliases b/tests/integration/targets/sensu_handler/aliases deleted file mode 100644 index bca9905ba6..0000000000 --- a/tests/integration/targets/sensu_handler/aliases +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -azp/posix/1 -needs/root diff --git a/tests/integration/targets/sensu_handler/tasks/main.yml b/tests/integration/targets/sensu_handler/tasks/main.yml deleted file mode 100644 index ec73a14c44..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/main.yml +++ /dev/null @@ -1,129 +0,0 @@ ---- -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Creating a handler if the directory doesn't exist should work - sensu_handler: - name: "handler" - type: "pipe" - command: "/bin/bash" - state: "present" - -- name: Insert junk JSON in a handlers file - lineinfile: - state: "present" - create: "yes" - path: "/etc/sensu/conf.d/handlers/handler.json" - line: "{'foo' = bar}" - -- name: Configure a handler with an existing invalid file - sensu_handler: - name: "handler" - type: "pipe" - command: "/bin/bash" - state: "present" - register: handler - -- name: Configure a handler (again) - sensu_handler: - name: "handler" - type: "pipe" - command: "/bin/bash" - state: "present" - register: handler_twice - -- name: Retrieve configuration file - slurp: - src: "{{ handler['file'] }}" - register: handler_config - -- name: Assert that handler data was set successfully and properly - assert: - that: - - "handler is successful" - - "handler is changed" - - "handler_twice is successful" - - "handler_twice is not changed" - - "handler['name'] == 'handler'" - - "handler['file'] == '/etc/sensu/conf.d/handlers/handler.json'" - - "handler['config']['type'] == 'pipe'" - - "handler['config']['command'] == '/bin/bash'" - - "handler['config']['timeout'] == 10" - - "handler['config']['handle_flapping'] == false" - - "handler['config']['handle_silenced'] == false" - -- name: Assert that the handler configuration file is actually configured properly - vars: - config: "{{ handler_config.content | b64decode | from_json }}" - assert: - that: - - "'handler' in config['handlers']" - - "config['handlers']['handler']['type'] == 'pipe'" - - "config['handlers']['handler']['command'] == '/bin/bash'" - - "config['handlers']['handler']['timeout'] == 10" - - "config['handlers']['handler']['handle_flapping'] == false" - - "config['handlers']['handler']['handle_silenced'] == false" - -- name: Delete Sensu handler configuration - sensu_handler: - name: "handler" - state: "absent" - register: handler_delete - -- name: Delete Sensu handler configuration (again) - sensu_handler: - name: "handler" - state: "absent" - register: handler_delete_twice - -- name: Retrieve configuration file stat - stat: - path: "{{ handler['file'] }}" - register: handler_stat - -- name: Assert that handler deletion was successful - assert: - that: - - "handler_delete is successful" - - "handler_delete is changed" - - "handler_delete_twice is successful" - - "handler_delete_twice is not changed" - - "handler_stat.stat.exists == false" - -- name: Configuring a handler without a name should fail - sensu_handler: - type: "pipe" - command: "/bin/bash" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler without a name fails - assert: - that: - - failure is failed - - "'required arguments: name' in failure['msg']" - -- name: Configuring a handler without a type should fail - sensu_handler: - name: "pipe" - command: "/bin/bash" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler without a type fails - assert: - that: - - failure is failed - - "'the following are missing: type' in failure['msg']" - -- include_tasks: pipe.yml -- include_tasks: tcp.yml -- include_tasks: udp.yml -- include_tasks: set.yml -- include_tasks: transport.yml diff --git a/tests/integration/targets/sensu_handler/tasks/pipe.yml b/tests/integration/targets/sensu_handler/tasks/pipe.yml deleted file mode 100644 index 46fe240808..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/pipe.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Note: Pipe handlers are also tested and used as part of basic main.yml coverage -- name: Configuring a handler with missing pipe parameters should fail - sensu_handler: - name: "pipe" - type: "pipe" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler with missing pipe parameters fails - assert: - that: - - failure is failed - - "'the following are missing: command' in failure['msg']" - -- name: Configure a handler with pipe parameters - sensu_handler: - name: "pipe" - type: "pipe" - command: "/bin/bash" - register: handler diff --git a/tests/integration/targets/sensu_handler/tasks/set.yml b/tests/integration/targets/sensu_handler/tasks/set.yml deleted file mode 100644 index e9a86057c2..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/set.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Configuring a handler with missing set parameters should fail - sensu_handler: - name: "set" - type: "set" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler with missing set parameters fails - assert: - that: - - failure is failed - - "'the following are missing: handlers' in failure['msg']" - -- name: Configure a set handler - sensu_handler: - name: "set" - type: "set" - handlers: - - anotherhandler - register: handler - -- name: Retrieve configuration file - slurp: - src: "{{ handler['file'] }}" - register: handler_config - -- name: Validate set handler return data - assert: - that: - - "handler is successful" - - "handler is changed" - - "handler['name'] == 'set'" - - "handler['file'] == '/etc/sensu/conf.d/handlers/set.json'" - - "handler['config']['type'] == 'set'" - - "'anotherhandler' in handler['config']['handlers']" - - "handler['config']['handle_flapping'] == false" - - "handler['config']['handle_silenced'] == false" - -- name: Assert that the handler configuration file is actually configured properly - vars: - config: "{{ handler_config.content | b64decode | from_json }}" - assert: - that: - - "'set' in config['handlers']" - - "config['handlers']['set']['type'] == 'set'" - - "'anotherhandler' in config['handlers']['set']['handlers']" - - "config['handlers']['set']['handle_flapping'] == false" - - "config['handlers']['set']['handle_silenced'] == false" diff --git a/tests/integration/targets/sensu_handler/tasks/tcp.yml b/tests/integration/targets/sensu_handler/tasks/tcp.yml deleted file mode 100644 index a5db1d3973..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/tcp.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Configuring a handler with missing tcp parameters should fail - sensu_handler: - name: "tcp" - type: "tcp" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler with missing tcp parameters fails - assert: - that: - - failure is failed - - "'the following are missing: socket' in failure['msg']" - -- name: Configure a tcp handler - sensu_handler: - name: "tcp" - type: "tcp" - socket: - host: 127.0.0.1 - port: 8000 - register: handler - -- name: Retrieve configuration file - slurp: - src: "{{ handler['file'] }}" - register: handler_config - -- name: Validate tcp handler return data - assert: - that: - - "handler is successful" - - "handler is changed" - - "handler['name'] == 'tcp'" - - "handler['file'] == '/etc/sensu/conf.d/handlers/tcp.json'" - - "handler['config']['type'] == 'tcp'" - - "handler['config']['socket']['host'] == '127.0.0.1'" - - "handler['config']['socket']['port'] == 8000" - - "handler['config']['handle_flapping'] == false" - - "handler['config']['handle_silenced'] == false" - -- name: Assert that the handler configuration file is actually configured properly - vars: - config: "{{ handler_config.content | b64decode | from_json }}" - assert: - that: - - "'tcp' in config['handlers']" - - "config['handlers']['tcp']['type'] == 'tcp'" - - "config['handlers']['tcp']['socket']['host'] == '127.0.0.1'" - - "config['handlers']['tcp']['socket']['port'] == 8000" - - "config['handlers']['tcp']['handle_flapping'] == false" - - "config['handlers']['tcp']['handle_silenced'] == false" diff --git a/tests/integration/targets/sensu_handler/tasks/transport.yml b/tests/integration/targets/sensu_handler/tasks/transport.yml deleted file mode 100644 index fa2563fa9b..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/transport.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Configuring a handler with missing transport parameters should fail - sensu_handler: - name: "transport" - type: "transport" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler with missing transport parameters fails - assert: - that: - - failure is failed - - "'the following are missing: pipe' in failure['msg']" - -- name: Configure a transport handler - sensu_handler: - name: "transport" - type: "transport" - pipe: - type: "topic" - name: "transport_handler" - register: handler - -- name: Retrieve configuration file - slurp: - src: "{{ handler['file'] }}" - register: handler_config - -- name: Validate transport handler return data - assert: - that: - - "handler is successful" - - "handler is changed" - - "handler['name'] == 'transport'" - - "handler['file'] == '/etc/sensu/conf.d/handlers/transport.json'" - - "handler['config']['type'] == 'transport'" - - "handler['config']['pipe']['type'] == 'topic'" - - "handler['config']['pipe']['name'] == 'transport_handler'" - - "handler['config']['handle_flapping'] == false" - - "handler['config']['handle_silenced'] == false" - -- name: Assert that the handler configuration file is actually configured properly - vars: - config: "{{ handler_config.content | b64decode | from_json }}" - assert: - that: - - "'transport' in config['handlers']" - - "config['handlers']['transport']['type'] == 'transport'" - - "config['handlers']['transport']['pipe']['type'] == 'topic'" - - "config['handlers']['transport']['pipe']['name'] == 'transport_handler'" - - "config['handlers']['transport']['handle_flapping'] == false" - - "config['handlers']['transport']['handle_silenced'] == false" diff --git a/tests/integration/targets/sensu_handler/tasks/udp.yml b/tests/integration/targets/sensu_handler/tasks/udp.yml deleted file mode 100644 index 60e88bb986..0000000000 --- a/tests/integration/targets/sensu_handler/tasks/udp.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: Configuring a handler with missing udp parameters should fail - sensu_handler: - name: "udp" - type: "udp" - register: failure - ignore_errors: true - -- name: Assert that configuring a handler with missing udp parameters fails - assert: - that: - - failure is failed - - "'the following are missing: socket' in failure['msg']" - -- name: Configure a udp handler - sensu_handler: - name: "udp" - type: "udp" - socket: - host: 127.0.0.1 - port: 8000 - register: handler - -- name: Retrieve configuration file - slurp: - src: "{{ handler['file'] }}" - register: handler_config - -- name: Validate udp handler return data - assert: - that: - - "handler is successful" - - "handler is changed" - - "handler['name'] == 'udp'" - - "handler['file'] == '/etc/sensu/conf.d/handlers/udp.json'" - - "handler['config']['type'] == 'udp'" - - "handler['config']['socket']['host'] == '127.0.0.1'" - - "handler['config']['socket']['port'] == 8000" - - "handler['config']['handle_flapping'] == false" - - "handler['config']['handle_silenced'] == false" - -- name: Assert that the handler configuration file is actually configured properly - vars: - config: "{{ handler_config.content | b64decode | from_json }}" - assert: - that: - - "'udp' in config['handlers']" - - "config['handlers']['udp']['type'] == 'udp'" - - "config['handlers']['udp']['socket']['host'] == '127.0.0.1'" - - "config['handlers']['udp']['socket']['port'] == 8000" - - "config['handlers']['udp']['handle_flapping'] == false" - - "config['handlers']['udp']['handle_silenced'] == false" diff --git a/tests/unit/plugins/module_utils/cloud/test_backoff.py b/tests/unit/plugins/module_utils/cloud/test_backoff.py deleted file mode 100644 index 5e5f41aa9f..0000000000 --- a/tests/unit/plugins/module_utils/cloud/test_backoff.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -import random -import unittest - -from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, _full_jitter_backoff - - -class ExponentialBackoffStrategyTestCase(unittest.TestCase): - def test_no_retries(self): - strategy = _exponential_backoff(retries=0) - result = list(strategy()) - self.assertEqual(result, [], "list should be empty") - - def test_exponential_backoff(self): - strategy = _exponential_backoff(retries=5, delay=1, backoff=2) - result = list(strategy()) - self.assertEqual(result, [1, 2, 4, 8, 16]) - - def test_max_delay(self): - strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=60) - result = list(strategy()) - self.assertEqual(result, [1, 2, 4, 8, 16, 32, 60]) - - def test_max_delay_none(self): - strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=None) - result = list(strategy()) - self.assertEqual(result, [1, 2, 4, 8, 16, 32, 64]) - - -class FullJitterBackoffStrategyTestCase(unittest.TestCase): - def test_no_retries(self): - strategy = _full_jitter_backoff(retries=0) - result = list(strategy()) - self.assertEqual(result, [], "list should be empty") - - def test_full_jitter(self): - retries = 5 - seed = 1 - - r = random.Random(seed) - expected = [r.randint(0, 2**i) for i in range(0, retries)] - - strategy = _full_jitter_backoff(retries=retries, delay=1, _random=random.Random(seed)) - result = list(strategy()) - - self.assertEqual(result, expected) diff --git a/tests/unit/plugins/module_utils/test_database.py b/tests/unit/plugins/module_utils/test_database.py deleted file mode 100644 index 2099aebaba..0000000000 --- a/tests/unit/plugins/module_utils/test_database.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -import pytest - -from ansible_collections.community.general.plugins.module_utils.database import ( - SQLParseError, - is_input_dangerous, - pg_quote_identifier, -) - -# These are all valid strings -# The results are based on interpreting the identifier as a table name -VALID = { - # User quoted - '"public.table"': '"public.table"', - '"public"."table"': '"public"."table"', - '"schema test"."table test"': '"schema test"."table test"', - # We quote part - "public.table": '"public"."table"', - '"public".table': '"public"."table"', - 'public."table"': '"public"."table"', - "schema test.table test": '"schema test"."table test"', - '"schema test".table test': '"schema test"."table test"', - 'schema test."table test"': '"schema test"."table test"', - # Embedded double quotes - 'table "test"': '"table ""test"""', - 'public."table ""test"""': '"public"."table ""test"""', - 'public.table "test"': '"public"."table ""test"""', - 'schema "test".table': '"schema ""test"""."table"', - '"schema ""test""".table': '"schema ""test"""."table"', - '"""wat"""."""test"""': '"""wat"""."""test"""', - # Sigh, handle these as well: - '"no end quote': '"""no end quote"', - 'schema."table': '"schema"."""table"', - '"schema.table': '"""schema"."table"', - 'schema."table.something': '"schema"."""table"."something"', - # Embedded dots - '"schema.test"."table.test"': '"schema.test"."table.test"', - '"schema.".table': '"schema."."table"', - '"schema."."table"': '"schema."."table"', - 'schema.".table"': '"schema".".table"', - '"schema".".table"': '"schema".".table"', - '"schema.".".table"': '"schema.".".table"', - # These are valid but maybe not what the user intended - '."table"': '".""table"""', - "table.": '"table."', -} - -INVALID = { - ("test.too.many.dots", "table"): "PostgreSQL does not support table with more than 3 dots", - ('"test.too".many.dots', "database"): "PostgreSQL does not support database with more than 1 dots", - ('test.too."many.dots"', "database"): "PostgreSQL does not support database with more than 1 dots", - ('"test"."too"."many"."dots"', "database"): "PostgreSQL does not support database with more than 1 dots", - ('"test"."too"."many"."dots"', "schema"): "PostgreSQL does not support schema with more than 2 dots", - ('"test"."too"."many"."dots"', "table"): "PostgreSQL does not support table with more than 3 dots", - ('"test"."too"."many"."dots"."for"."column"', "column"): "PostgreSQL does not support column with more than 4 dots", - ('"table "invalid" double quote"', "table"): "User escaped identifiers must escape extra quotes", - ('"schema "invalid"""."table "invalid"', "table"): "User escaped identifiers must escape extra quotes", - ('"schema."table"', "table"): "User escaped identifiers must escape extra quotes", - ('"schema".', "table"): "Identifier name unspecified or unquoted trailing dot", -} - -HOW_MANY_DOTS = ( - ("role", "role", '"role"', "PostgreSQL does not support role with more than 1 dots"), - ("db", "database", '"db"', "PostgreSQL does not support database with more than 1 dots"), - ("db.schema", "schema", '"db"."schema"', "PostgreSQL does not support schema with more than 2 dots"), - ("db.schema.table", "table", '"db"."schema"."table"', "PostgreSQL does not support table with more than 3 dots"), - ( - "db.schema.table.column", - "column", - '"db"."schema"."table"."column"', - "PostgreSQL does not support column with more than 4 dots", - ), -) - -VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID)) -INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID)) - -IS_STRINGS_DANGEROUS = ( - ("", False), - (" ", False), - ("alternative database", False), - ("backup of TRUNCATED table", False), - ("bob.dropper", False), - ("d'artagnan", False), - ("user_with_select_update_truncate_right", False), - (";DROP DATABASE fluffy_pets_photos", True), - (";drop DATABASE fluffy_pets_photos", True), - ("; TRUNCATE TABLE his_valuable_table", True), - ("; truncate TABLE his_valuable_table", True), - ("'--", True), - ('"--', True), - ("' union select username, password from admin_credentials", True), - ("' UNION SELECT username, password from admin_credentials", True), - ("' intersect select", True), - ("' INTERSECT select", True), - ("' except select", True), - ("' EXCEPT select", True), - (";ALTER TABLE prices", True), - (";alter table prices", True), - ("; UPDATE products SET price = '0'", True), - (";update products SET price = '0'", True), - ("; DELETE FROM products", True), - ("; delete FROM products", True), - ("; SELECT * FROM products", True), - (" ; select * from products", True), -) - - -@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES) -def test_valid_quotes(identifier, quoted_identifier): - assert pg_quote_identifier(identifier, "table") == quoted_identifier - - -@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES) -def test_invalid_quotes(identifier, id_type, msg): - with pytest.raises(SQLParseError) as ex: - pg_quote_identifier(identifier, id_type) - - ex.match(msg) - - -@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS) -def test_how_many_dots(identifier, id_type, quoted_identifier, msg): - assert pg_quote_identifier(identifier, id_type) == quoted_identifier - - with pytest.raises(SQLParseError) as ex: - pg_quote_identifier(f"{identifier}.more", id_type) - - ex.match(msg) - - -@pytest.mark.parametrize("string, result", IS_STRINGS_DANGEROUS) -def test_is_input_dangerous(string, result): - assert is_input_dangerous(string) == result diff --git a/tests/unit/plugins/module_utils/test_known_hosts.py b/tests/unit/plugins/module_utils/test_known_hosts.py deleted file mode 100644 index 66e47e8adf..0000000000 --- a/tests/unit/plugins/module_utils/test_known_hosts.py +++ /dev/null @@ -1,120 +0,0 @@ -# (c) 2015, Michael Scherer -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -import pytest - -from ansible_collections.community.general.plugins.module_utils import known_hosts - -URLS = { - "ssh://one.example.org/example.git": { - "is_ssh_url": True, - "get_fqdn": "one.example.org", - "add_host_key_cmd": " -t rsa one.example.org", - "port": None, - }, - "ssh+git://two.example.org/example.git": { - "is_ssh_url": True, - "get_fqdn": "two.example.org", - "add_host_key_cmd": " -t rsa two.example.org", - "port": None, - }, - "rsync://three.example.org/user/example.git": { - "is_ssh_url": False, - "get_fqdn": "three.example.org", - "add_host_key_cmd": None, # not called for non-ssh urls - "port": None, - }, - "git@four.example.org:user/example.git": { - "is_ssh_url": True, - "get_fqdn": "four.example.org", - "add_host_key_cmd": " -t rsa four.example.org", - "port": None, - }, - "git+ssh://five.example.org/example.git": { - "is_ssh_url": True, - "get_fqdn": "five.example.org", - "add_host_key_cmd": " -t rsa five.example.org", - "port": None, - }, - "ssh://six.example.org:21/example.org": { - # ssh on FTP Port? - "is_ssh_url": True, - "get_fqdn": "six.example.org", - "add_host_key_cmd": " -t rsa -p 21 six.example.org", - "port": "21", - }, - "ssh://[2001:DB8::abcd:abcd]/example.git": { - "is_ssh_url": True, - "get_fqdn": "[2001:DB8::abcd:abcd]", - "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", - "port": None, - }, - "ssh://[2001:DB8::abcd:abcd]:22/example.git": { - "is_ssh_url": True, - "get_fqdn": "[2001:DB8::abcd:abcd]", - "add_host_key_cmd": " -t rsa -p 22 [2001:DB8::abcd:abcd]", - "port": "22", - }, - "username@[2001:DB8::abcd:abcd]/example.git": { - "is_ssh_url": True, - "get_fqdn": "[2001:DB8::abcd:abcd]", - "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", - "port": None, - }, - "username@[2001:DB8::abcd:abcd]:path/example.git": { - "is_ssh_url": True, - "get_fqdn": "[2001:DB8::abcd:abcd]", - "add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]", - "port": None, - }, - "ssh://internal.git.server:7999/repos/repo.git": { - "is_ssh_url": True, - "get_fqdn": "internal.git.server", - "add_host_key_cmd": " -t rsa -p 7999 internal.git.server", - "port": "7999", - }, -} - - -@pytest.mark.parametrize("url, is_ssh_url", ((k, URLS[k]["is_ssh_url"]) for k in sorted(URLS))) -def test_is_ssh_url(url, is_ssh_url): - assert known_hosts.is_ssh_url(url) == is_ssh_url - - -@pytest.mark.parametrize("url, fqdn, port", ((k, URLS[k]["get_fqdn"], URLS[k]["port"]) for k in sorted(URLS))) -def test_get_fqdn_and_port(url, fqdn, port): - assert known_hosts.get_fqdn_and_port(url) == (fqdn, port) - - -@pytest.mark.parametrize( - "fqdn, port, add_host_key_cmd", - ( - (URLS[k]["get_fqdn"], URLS[k]["port"], URLS[k]["add_host_key_cmd"]) - for k in sorted(URLS) - if URLS[k]["is_ssh_url"] - ), -) -def test_add_host_key(mocker, fqdn, port, add_host_key_cmd): - am = mocker.MagicMock() - - get_bin_path = mocker.MagicMock() - get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan" - am.get_bin_path = get_bin_path - - run_command = mocker.MagicMock() - run_command.return_value = (0, "Needs output, otherwise thinks ssh-keyscan timed out'", "") - am.run_command = run_command - - append_to_file = mocker.MagicMock() - append_to_file.return_value = (None,) - am.append_to_file = append_to_file - - mocker.patch("os.path.isdir", return_value=True) - mocker.patch("os.path.exists", return_value=True) - - known_hosts.add_host_key(am, fqdn, port=port) - run_command.assert_called_with(keyscan_cmd + add_host_key_cmd, environ_update={"LANGUAGE": "C", "LC_ALL": "C"}) diff --git a/tests/unit/plugins/module_utils/test_saslprep.py b/tests/unit/plugins/module_utils/test_saslprep.py deleted file mode 100644 index 682bff916c..0000000000 --- a/tests/unit/plugins/module_utils/test_saslprep.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2019, Andrey Tuzhilin -# Copyright (c) 2020, Andrew Klychkov (@Andersson007) -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import annotations - -import pytest - -from ansible_collections.community.general.plugins.module_utils.saslprep import saslprep - -VALID = [ - ("", ""), - ("\u00a0", " "), - ("a", "a"), - ("й", "й"), - ("\u30de\u30c8\u30ea\u30c3\u30af\u30b9", "\u30de\u30c8\u30ea\u30c3\u30af\u30b9"), - ("The\u00adM\u00aatr\u2168", "TheMatrIX"), - ("I\u00adX", "IX"), - ("user", "user"), - ("USER", "USER"), - ("\u00aa", "a"), - ("\u2168", "IX"), - ("\u05be\u00a0\u05be", "\u05be\u0020\u05be"), -] - -INVALID = [ - (None, TypeError), - (b"", TypeError), - ("\u0221", ValueError), - ("\u0007", ValueError), - ("\u0627\u0031", ValueError), - ("\ue0001", ValueError), - ("\ue0020", ValueError), - ("\ufff9", ValueError), - ("\ufdd0", ValueError), - ("\u0000", ValueError), - ("\u06dd", ValueError), - ("\uffffD", ValueError), - ("\ud800", ValueError), - ("\u200e", ValueError), - ("\u05be\u00aa\u05be", ValueError), -] - - -@pytest.mark.parametrize("source,target", VALID) -def test_saslprep_conversions(source, target): - assert saslprep(source) == target - - -@pytest.mark.parametrize("source,exception", INVALID) -def test_saslprep_exceptions(source, exception): - with pytest.raises(exception): - saslprep(source) diff --git a/tests/unit/plugins/modules/test_cpanm.yaml b/tests/unit/plugins/modules/test_cpanm.yaml index 66f0358d11..a1a0b89c60 100644 --- a/tests/unit/plugins/modules/test_cpanm.yaml +++ b/tests/unit/plugins/modules/test_cpanm.yaml @@ -7,52 +7,6 @@ anchors: environ_true: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true} environ_false: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} test_cases: - - id: install_dancer_compatibility - input: - name: Dancer - mode: compatibility - output: - changed: true - cpanm_version: '1.7047' - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: '' - - command: [/testbin/perl, -le, use Dancer;] - environ: *env-def-false - rc: 2 - out: '' - err: error, not installed - - command: [/testbin/cpanm, Dancer] - environ: *env-def-true - rc: 0 - out: '' - err: '' - - id: install_dancer_already_installed_compatibility - input: - name: Dancer - mode: compatibility - output: - changed: false - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: '' - - command: [/testbin/perl, -le, use Dancer;] - environ: *env-def-false - rc: 0 - out: '' - err: '' - id: install_dancer input: name: Dancer @@ -72,26 +26,6 @@ test_cases: rc: 0 out: '' err: '' - - id: install_distribution_file_compatibility - input: - name: MIYAGAWA/Plack-0.99_05.tar.gz - mode: compatibility - output: - changed: true - mocks: - run_command: - - command: [/testbin/cpanm, --version] - environ: *env-def-true - rc: 0 - out: | - cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm) - perl version 5.041005 (/usr/local/bin/perl) - err: '' - - command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz] - environ: *env-def-true - rc: 0 - out: '' - err: '' - id: install_distribution_file input: name: MIYAGAWA/Plack-0.99_05.tar.gz