Prepare main for 13.0.0 (#11834)

* Bump version to 13.0.0.

* Remove deprecated modules and plugins.

* Remove deprecated module utils.

* Remove leftovers.

* Remove mode=compatibility.

* Change default of is_pre740 from true to false.

* Change default of force_defaults from true to false.

* Remove support for ubuntu_legacy mechanism.

* Remove cpanm compatibility tests.
This commit is contained in:
Felix Fontein
2026-04-20 12:35:43 +02:00
committed by GitHub
parent 7ce198f0e7
commit 72c13c85ad
62 changed files with 104 additions and 13052 deletions

35
.github/BOTMETA.yml vendored
View File

@@ -305,8 +305,6 @@ files:
$lookups/flattened.py: {}
$lookups/github_app_access_token.py:
maintainers: weisheng-p blavoie
$lookups/hiera.py:
maintainers: jparrill
$lookups/keyring.py: {}
$lookups/lastpass.py: {}
$lookups/lmdb_kv.py:
@@ -399,9 +397,6 @@ files:
maintainers: russoz
$module_utils/net_tools/pritunl/:
maintainers: Lowess
$module_utils/oracle/oci_utils.py:
labels: cloud
maintainers: $team_oracle
$module_utils/pacemaker.py:
maintainers: munchtoast
$module_utils/pipx.py:
@@ -487,10 +482,6 @@ files:
maintainers: evgkrsk
$modules/archive.py:
maintainers: bendoh
$modules/atomic_:
maintainers: krsacme
$modules/atomic_container.py:
maintainers: giuseppe krsacme
$modules/awall.py:
maintainers: tdtrask
$modules/beadm.py:
@@ -517,8 +508,6 @@ files:
maintainers: natefoo
$modules/cargo.py:
maintainers: radek-sprta
$modules/catapult.py:
maintainers: Jmainguy
$modules/circonus_annotation.py:
maintainers: NickatEpic
$modules/cisco_webex.py:
@@ -558,11 +547,6 @@ files:
maintainers: shamilovstas
$modules/deploy_helper.py:
maintainers: ramondelafuente
$modules/dimensiondata_network.py:
labels: dimensiondata_network
maintainers: aimonb tintoy
$modules/dimensiondata_vlan.py:
maintainers: tintoy
$modules/discord.py:
maintainers: cwollinger
$modules/django_check.py:
@@ -1054,8 +1038,6 @@ files:
maintainers: $team_wdc
$modules/ocapi_info.py:
maintainers: $team_wdc
$modules/oci_vcn.py:
maintainers: $team_oracle rohitChaware
$modules/odbc.py:
maintainers: john-westcott-iv
$modules/office_365_connector_card.py:
@@ -1072,8 +1054,6 @@ files:
maintainers: rvalle
$modules/one_vnet.py:
maintainers: abakanovskii
$modules/oneandone_:
maintainers: aajdinov edevenport
$modules/onepassword_info.py:
maintainers: Rylon
$modules/oneview_:
@@ -1198,8 +1178,6 @@ files:
$modules/puppet.py:
labels: puppet
maintainers: emonty
$modules/pushbullet.py:
maintainers: willybarro
$modules/pushover.py:
maintainers: weaselkeeper wopfel
$modules/python_requirements_info.py:
@@ -1313,14 +1291,6 @@ files:
maintainers: bachradsusi dankeder jamescassell
$modules/sendgrid.py:
maintainers: makaimc
$modules/sensu_:
maintainers: dmsimard
$modules/sensu_check.py:
maintainers: andsens
$modules/sensu_silence.py:
maintainers: smbambling
$modules/sensu_subscription.py:
maintainers: andsens
$modules/seport.py:
maintainers: dankeder
$modules/serverless.py:
@@ -1360,8 +1330,6 @@ files:
maintainers: orgito
$modules/spectrum_model_attrs.py:
maintainers: tgates81
$modules/spotinst_aws_elastigroup.py:
maintainers: talzur
$modules/ss_3par_cpg.py:
maintainers: farhan7500 gautamphegde
$modules/ssh_config.py:
@@ -1416,8 +1384,6 @@ files:
maintainers: indrajitr jasperla tmshn
$modules/twilio.py:
maintainers: makaimc
$modules/typetalk.py:
maintainers: tksmd
$modules/udm_:
maintainers: keachi
$modules/ufw.py:
@@ -1656,7 +1622,6 @@ macros:
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
team_oracle: manojmeda mross22 nalsaber
team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
team_rhsm: cnsnyder ptoscano
team_scaleway: remyleone abarbare

View File

@@ -0,0 +1,46 @@
removed_features:
- "dimensiondata - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "dimensiondata_wait - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle_creatable_resource - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle_display_name_option - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle_name_option - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle_tags - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle_wait_options - the doc fragment has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "hiera lookup plugin - the lookup has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "cloud module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "database module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "dimensiondata module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "known_hosts module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oracle.oci_utils module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "saslprep module utils - the module utils has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "atomic_container - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "atomic_host - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "atomic_image - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "catapult - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "dimensiondata_network - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "dimensiondata_vlan - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oci_vcn - the module has been removed. Use ``oracle.oci.oci_network_vcn`` instead (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_firewall_policy - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_load_balancer - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_monitoring_policy - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_private_network - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_public_ip - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "oneandone_server - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "pushbullet - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "sensu_check - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "sensu_client - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "sensu_handler - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "sensu_silence - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "sensu_subscription - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "spotinst_aws_elastigroup - the module has been removed. Use ``spot.cloud_modules.aws_elastigroup`` instead (https://github.com/ansible-collections/community.general/pull/11834)."
- "typetalk - the module has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "keycloak module utils - the deprecated ``KeycloakAPI.add_user_in_group()`` method has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "django module utils - the deprecated ``database``, ``noinput``, ``dry_run``, and ``check`` parameters for the Django runner have been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "pipx module utils - the deprecated ``make_process_list()`` function has been removed (https://github.com/ansible-collections/community.general/pull/11834)."
- "cpanm - the ``mode=compatibility`` is no longer available. Migrate to ``mode=new`` (https://github.com/ansible-collections/community.general/pull/11834)."
- "locale_gen - support for the ``ubuntu_legacy`` mechanism has been removed. Only the ``glibc`` mechanism is supported by the module anymore (https://github.com/ansible-collections/community.general/pull/11834)."
breaking_changes:
- "rocketchat - the default for the ``is_pre740`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/11834)."
- "github_repo - the default for the ``force_defaults`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/11834)."

View File

@@ -5,7 +5,7 @@
namespace: community
name: general
version: 12.6.0
version: 13.0.0
readme: README.md
authors:
- Ansible (https://github.com/ansible)

View File

@@ -127,11 +127,9 @@ plugin_routing:
hashi_vault:
redirect: community.hashi_vault.hashi_vault
hiera:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: >-
Hiera has been deprecated a long time ago.
If you disagree with this deprecation, please create an issue in the community.general repository.
warning_text: Hiera has been deprecated a long time ago.
manifold:
tombstone:
removal_version: 11.0.0
@@ -148,15 +146,15 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use community.general.ali_instance_info instead.
atomic_container:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Project Atomic was sunset by the end of 2019.
atomic_host:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Project Atomic was sunset by the end of 2019.
atomic_image:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Project Atomic was sunset by the end of 2019.
bearychat:
@@ -164,7 +162,7 @@ plugin_routing:
removal_version: 12.0.0
warning_text: Chat service is no longer available.
catapult:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details.
cisco_spark:
@@ -210,11 +208,11 @@ plugin_routing:
removal_version: 10.0.0
warning_text: Use community.general.consul_token and/or community.general.consul_policy instead.
dimensiondata_network:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Service and its endpoints are no longer available.
dimensiondata_vlan:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Service and its endpoints are no longer available.
docker_compose:
@@ -527,7 +525,7 @@ plugin_routing:
nios_zone:
redirect: infoblox.nios_modules.nios_zone
oci_vcn:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Use oracle.oci.oci_network_vcn instead.
ome_device_info:
@@ -537,27 +535,27 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use community.general.one_image_info instead.
oneandone_firewall_policy:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
oneandone_load_balancer:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
oneandone_monitoring_policy:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
oneandone_private_network:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
oneandone_public_ip:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
oneandone_server:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
onepassword_facts:
@@ -868,7 +866,7 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use purestorage.flashblade.purefb_info instead.
pushbullet:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Module relies on Python package pushbullet.py which is not maintained and supports only up to Python 3.2.
python_requirements_facts:
@@ -1024,23 +1022,23 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_volume_info instead.
sensu_check:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_client:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_handler:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_silence:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_subscription:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sf_account_manager:
@@ -1068,7 +1066,7 @@ plugin_routing:
removal_version: 3.0.0
warning_text: Use community.general.smartos_image_info instead.
spotinst_aws_elastigroup:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Module relies on unsupported Python package. Use the module spot.cloud_modules.aws_elastigroup instead.
stackdriver:
@@ -1082,7 +1080,7 @@ plugin_routing:
removal_version: 15.0.0
warning_text: ClearLinux was made EOL in July 2025. If you think the module is still useful for another distribution, please create an issue in the community.general repository.
typetalk:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: The typetalk service will be discontinued on Dec 2025.
vertica_facts:
@@ -1122,11 +1120,11 @@ plugin_routing:
_gcp:
redirect: community.google._gcp
dimensiondata:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Service and its endpoints are no longer available.
dimensiondata_wait:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Service and its endpoints are no longer available.
docker:
@@ -1140,27 +1138,27 @@ plugin_routing:
nios:
redirect: infoblox.nios_modules.nios
oracle:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oracle_creatable_resource:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oracle_display_name_option:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oracle_name_option:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oracle_tags:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oracle_wait_options:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
postgresql:
@@ -1181,15 +1179,15 @@ plugin_routing:
package pyrax.
module_utils:
cloud:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over.
database:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over.
dimensiondata:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: Service and its endpoints are no longer available.
docker.common:
@@ -1205,19 +1203,19 @@ plugin_routing:
hetzner:
redirect: community.hrobot.robot
known_hosts:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over.
kubevirt:
redirect: community.kubevirt.kubevirt
net_tools.nios.api:
redirect: infoblox.nios_modules.api
oci_utils:
deprecation:
oracle.oci_utils:
tombstone:
removal_version: 13.0.0
warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
oneandone:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: DNS fails to resolve the API endpoint used by the module.
postgresql:
@@ -1240,7 +1238,7 @@ plugin_routing:
remote_management.dellemc.ome:
redirect: dellemc.openmanage.ome
saslprep:
deprecation:
tombstone:
removal_version: 13.0.0
warning_text: This code is not used by community.general. If you want to use it in another collection, please copy it over.
inventory:

View File

@@ -1,53 +0,0 @@
#
# Copyright (c) 2016, Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
#
# DEPRECATED
#
# This doc fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
# Dimension Data doc fragment
DOCUMENTATION = r"""
options:
region:
description:
- The target region.
- Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py].
- They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html).
- Note that the default value C(na) stands for "North America".
- The module prepends C(dd-) to the region choice.
type: str
default: na
mcp_user:
description:
- The username used to authenticate to the CloudControl API.
- If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
type: str
mcp_password:
description:
- The password used to authenticate to the CloudControl API.
- If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- Required if O(mcp_user) is specified.
type: str
location:
description:
- The target datacenter.
type: str
required: true
validate_certs:
description:
- If V(false), SSL certificates are not validated.
- This should only be used on private instances of the CloudControl API that use self-signed certificates.
type: bool
default: true
"""

View File

@@ -1,39 +0,0 @@
#
# Copyright (c) 2016, Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
#
# DEPRECATED
#
# This doc fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
# Dimension Data ("wait-for-completion" parameters) doc fragment
DOCUMENTATION = r"""
options:
wait:
description:
- Should we wait for the task to complete before moving onto the next.
type: bool
default: false
wait_time:
description:
- The maximum amount of time (in seconds) to wait for the task to complete.
- Only applicable if O(wait=true).
type: int
default: 600
wait_poll_interval:
description:
- The amount of time (in seconds) to wait between checks for task completion.
- Only applicable if O(wait=true).
type: int
default: 2
"""

View File

@@ -1,80 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
requirements:
- Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
notes:
- For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html).
options:
config_file_location:
description:
- Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used.
Otherwise, defaults to C(~/.oci/config).
type: str
config_profile_name:
description:
- The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the
E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location).
default: "DEFAULT"
type: str
api_user:
description:
- The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment
variable, if any, is used. This option is required if the user is not specified through a configuration file (See
O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
api_user_fingerprint:
description:
- Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable,
if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See
O(config_file_location)). To get the key pair's fingerprint value please refer to
U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
api_user_key_file:
description:
- Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE)
variable, if any, is used. This option is required if the private key is not specified through a configuration file
(See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option
must also be provided.
type: path
api_user_key_pass_phrase:
description:
- Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the
E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified
through a configuration file (See O(config_file_location)).
type: str
auth_type:
description:
- The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is
performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified,
the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal
based authentication when running ansible playbooks within an OCI compute instance.
choices: ['api_key', 'instance_principal']
default: 'api_key'
type: str
tenancy:
description:
- OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required
if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy
OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
region:
description:
- The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION)
variable, if any, is used. This option is required if the region is not specified through a configuration file (See
O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm)
for more information on OCI regions.
type: str
"""

View File

@@ -1,29 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
options:
force_create:
description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation,
and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of
the resource, even if it already exists. This option is mutually exclusive with O(key_by).
default: false
type: bool
key_by:
description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance
of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify
a resource.
type: list
elements: str
"""

View File

@@ -1,21 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
options:
display_name:
description: Use O(display_name) along with the other options to return only resources that match the given display name
exactly.
type: str
"""

View File

@@ -1,20 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
options:
name:
description: Use O(name) along with the other options to return only resources that match the given name exactly.
type: str
"""

View File

@@ -1,25 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
options:
defined_tags:
description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see
U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
type: dict
freeform_tags:
description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
type: dict
"""

View File

@@ -1,30 +0,0 @@
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
#
# DEPRECATED
#
# This fragment is deprecated and will be removed in community.general 13.0.0
#
class ModuleDocFragment:
DOCUMENTATION = r"""
options:
wait:
description: Whether to wait for create or delete operation to complete.
default: true
type: bool
wait_timeout:
description: Time, in seconds, to wait when O(wait=true).
default: 1200
type: int
wait_until:
description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true),
we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during
create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation.
type: str
"""

View File

@@ -1,97 +0,0 @@
# Copyright (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
# Copyright (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
author:
- Juan Manuel Parrilla (@jparrill)
name: hiera
short_description: Get info from hiera data
requirements:
- hiera (command line utility)
description:
- Retrieves data from an Puppetmaster node using Hiera as ENC.
deprecated:
removed_in: 13.0.0
why: >-
Hiera has been deprecated a long time ago.
If you disagree with this deprecation, please create an issue in the community.general repository.
alternative: Unknown.
options:
_terms:
description:
- The list of keys to lookup on the Puppetmaster.
type: list
elements: string
required: true
executable:
description:
- Binary file to execute Hiera.
type: string
default: '/usr/bin/hiera'
env:
- name: ANSIBLE_HIERA_BIN
config_file:
description:
- File that describes the hierarchy of Hiera.
type: string
default: '/etc/hiera.yaml'
env:
- name: ANSIBLE_HIERA_CFG
# FIXME: incomplete options .. _terms? environment/fqdn?
"""
EXAMPLES = r"""
# All this examples depends on hiera.yml that describes the hierarchy
- name: "a value from Hiera 'DB'"
ansible.builtin.debug:
msg: "{{ lookup('community.general.hiera', 'foo') }}"
- name: "a value from a Hiera 'DB' on other environment"
ansible.builtin.debug:
msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
- name: "a value from a Hiera 'DB' for a concrete node"
ansible.builtin.debug:
msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
"""
RETURN = r"""
_raw:
description:
- A value associated with input key.
type: list
elements: str
"""
from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.lookup import LookupBase
from ansible.utils.cmd_functions import run_cmd
class Hiera:
def __init__(self, hiera_cfg, hiera_bin):
self.hiera_cfg = hiera_cfg
self.hiera_bin = hiera_bin
def get(self, hiera_key):
pargs = [self.hiera_bin]
pargs.extend(["-c", self.hiera_cfg])
pargs.extend(hiera_key)
rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}")
return to_text(output.strip())
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
hiera = Hiera(self.get_option("config_file"), self.get_option("executable"))
ret = [hiera.get(terms)]
return ret

View File

@@ -1,222 +0,0 @@
#
# Copyright (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# This module utils is deprecated and will be removed in community.general 13.0.0
from __future__ import annotations
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import CloudRetry
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.exponential_backoff(retries=10, delay=3)
get_ec2_security_group_ids_from_names()
@AWSRetry.jittered_backoff()
get_ec2_security_group_ids_from_names()
"""
import random
import syslog
import time
from functools import wraps
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
"""Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
backoff (float): base of the exponent to use for exponential
backoff.
max_delay (int): Optional. If provided each delay generated is capped
at this amount. Defaults to 60 seconds.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for an exponential backoff strategy.
Usage:
>>> backoff = _exponential_backoff()
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff**retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
"""Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Approximate number of seconds to sleep for the first
retry.
max_delay (int): The maximum number of seconds to sleep for any retry.
_random (random.Random or None): Makes this generator testable by
allowing developers to explicitly pass in the a seeded Random.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for a full jitter backoff strategy.
Usage:
>>> backoff = _full_jitter_backoff(retries=5)
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[3, 6, 5, 23, 38]
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2**retry))
return backoff_gen
class CloudRetry:
"""CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
"""Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code, catch_extra_error_codes=None):
"""Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
"""Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
for delay in backoff_strategy():
try:
return f(*args, **kwargs)
except Exception as e:
if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type
response_code = cls.status_code_from_exception(e)
if cls.found(response_code, catch_extra_error_codes):
msg = f"{e}: Retrying in {delay} seconds..."
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(delay)
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
@classmethod
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(
_exponential_backoff(retries=retries, delay=delay, backoff=backoff, max_delay=max_delay),
catch_extra_error_codes,
)
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using a jittered backoff
strategy. More on this strategy here:
https://www.awsarchitectureblog.com/2015/03/backoff.html
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(
_full_jitter_backoff(retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes
)
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Compatibility for the original implementation of CloudRetry.backoff that
did not provide configurable backoff strategies. Developers should use
CloudRetry.exponential_backoff instead.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1,
delay=delay,
backoff=backoff,
max_delay=None,
catch_extra_error_codes=catch_extra_error_codes,
)

View File

@@ -1,194 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
# This module utils is deprecated and will be removed in community.general 13.0.0
from __future__ import annotations
import re
import typing as t
if t.TYPE_CHECKING:
from ansible.module_utils.basic import AnsibleModule
# Input patterns for is_input_dangerous function:
#
# 1. '"' in string and '--' in string or
# "'" in string and '--' in string
PATTERN_1 = re.compile(r"(\'|\").*--")
# 2. union \ intersect \ except + select
PATTERN_2 = re.compile(r"(UNION|INTERSECT|EXCEPT).*SELECT", re.IGNORECASE)
# 3. ';' and any KEY_WORDS
PATTERN_3 = re.compile(r";.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)", re.IGNORECASE)
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
database=1,
schema=2,
table=3,
column=4,
role=1,
tablespace=1,
sequence=3,
publication=1,
)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError as e:
raise UnclosedQuoteError from e
accumulate = accumulate + quote
try:
next_char = identifier[quote + 1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote + 2 :]
accumulate = accumulate + 2
except IndexError as e:
raise UnclosedQuoteError from e
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError("Identifier name unspecified or unquoted trailing dot")
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote + 1] == ".":
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1 :]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError("User escaped identifiers must escape extra quotes")
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index(".")
except ValueError:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = f"{quote_char}{identifier}{quote_char}"
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = f"{quote_char}{identifier}{quote_char}"
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1 :]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
first_identifier = f"{quote_char}{first_identifier}{quote_char}"
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError(
f"PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots"
)
return ".".join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char="`")
if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError(
f"MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots"
)
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == "`*`":
special_cased_fragments.append("*")
else:
special_cased_fragments.append(fragment)
return ".".join(special_cased_fragments)
def is_input_dangerous(string):
"""Check if the passed string is potentially dangerous.
Can be used to prevent SQL injections.
Note: use this function only when you can't use
psycopg2's cursor.execute method parametrized
(typically with DDL queries).
"""
if not string:
return False
return any(pattern.search(string) for pattern in (PATTERN_1, PATTERN_2, PATTERN_3))
def check_input(module: AnsibleModule, *args) -> None:
"""Wrapper for is_input_dangerous function."""
needs_to_check = args
dangerous_elements = []
for elem in needs_to_check:
if isinstance(elem, str):
if is_input_dangerous(elem):
dangerous_elements.append(elem)
elif isinstance(elem, list):
for e in elem:
if is_input_dangerous(e):
dangerous_elements.append(e)
elif elem is None or isinstance(elem, bool):
pass
else:
elem = str(elem)
if is_input_dangerous(elem):
dangerous_elements.append(elem)
if dangerous_elements:
module.fail_json(msg=f"Passed input '{', '.join(dangerous_elements)}' is potentially dangerous")

View File

@@ -1,331 +0,0 @@
#
# Copyright (c) 2016 Dimension Data
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Authors:
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
# - Mark Maglana <mmaglana@gmail.com>
# - Adam Friedman <tintoy@tintoy.io>
#
# Common functionality to be used by various module components
from __future__ import annotations
#
# DEPRECATED
#
# This module utils is deprecated and will be removed in community.general 13.0.0
#
import configparser
import os
import re
import traceback
from os.path import expanduser
from uuid import UUID
from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import
LIBCLOUD_IMP_ERR = None
try:
import libcloud.security
from libcloud.common.dimensiondata import ( # noqa: F401, pylint: disable=unused-import
API_ENDPOINTS,
DimensionDataAPIException,
DimensionDataStatus,
)
from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
HAS_LIBCLOUD = True
except ImportError:
LIBCLOUD_IMP_ERR = traceback.format_exc()
HAS_LIBCLOUD = False
# MCP 2.x version pattern for location (datacenter) names.
#
# Note that this is not a totally reliable way of determining MCP version.
# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
# by specifying it in the module parameters.
MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
class DimensionDataModule:
"""
The base class containing common functionality used by Dimension Data modules for Ansible.
"""
def __init__(self, module: AnsibleModule) -> None:
"""
Create a new DimensionDataModule.
Will fail if Apache libcloud is not present.
:param module: The underlying Ansible module.
:type module: AnsibleModule
"""
self.module = module
if not HAS_LIBCLOUD:
self.module.fail_json(msg=missing_required_lib("libcloud"), exception=LIBCLOUD_IMP_ERR)
# Credentials are common to all Dimension Data modules.
credentials = self.get_credentials()
self.user_id = credentials["user_id"]
self.key = credentials["key"]
# Region and location are common to all Dimension Data modules.
region = self.module.params["region"]
self.region = f"dd-{region}"
self.location = self.module.params["location"]
libcloud.security.VERIFY_SSL_CERT = self.module.params["validate_certs"]
self.driver = get_driver(Provider.DIMENSIONDATA)(self.user_id, self.key, region=self.region)
# Determine the MCP API version (this depends on the target datacenter).
self.mcp_version = self.get_mcp_version(self.location)
# Optional "wait-for-completion" arguments
if "wait" in self.module.params:
self.wait = self.module.params["wait"]
self.wait_time = self.module.params["wait_time"]
self.wait_poll_interval = self.module.params["wait_poll_interval"]
else:
self.wait = False
self.wait_time = 0
self.wait_poll_interval = 0
def get_credentials(self):
"""
Get user_id and key from module configuration, environment, or dotfile.
Order of priority is module, environment, dotfile.
To set in environment:
export MCP_USER='myusername'
export MCP_PASSWORD='mypassword'
To set in dot file place a file at ~/.dimensiondata with
the following contents:
[dimensiondatacloud]
MCP_USER: myusername
MCP_PASSWORD: mypassword
"""
if not HAS_LIBCLOUD:
self.module.fail_json(msg="libcloud is required for this module.")
user_id = None
key = None
# First, try the module configuration
if "mcp_user" in self.module.params:
if "mcp_password" not in self.module.params:
self.module.fail_json(
msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
)
user_id = self.module.params["mcp_user"]
key = self.module.params["mcp_password"]
# Fall back to environment
if not user_id or not key:
user_id = os.environ.get("MCP_USER", None)
key = os.environ.get("MCP_PASSWORD", None)
# Finally, try dotfile (~/.dimensiondata)
if not user_id or not key:
home = expanduser("~")
config = configparser.RawConfigParser()
config.read(f"{home}/.dimensiondata")
try:
user_id = config.get("dimensiondatacloud", "MCP_USER")
key = config.get("dimensiondatacloud", "MCP_PASSWORD")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
# One or more credentials not found. Function can't recover from this
# so it has to raise an error instead of fail silently.
if not user_id:
raise MissingCredentialsError("Dimension Data user id not found")
elif not key:
raise MissingCredentialsError("Dimension Data key not found")
# Both found, return data
return dict(user_id=user_id, key=key)
def get_mcp_version(self, location):
"""
Get the MCP version for the specified location.
"""
location = self.driver.ex_get_location_by_id(location)
if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
return "2.0"
return "1.0"
def get_network_domain(self, locator, location):
"""
Retrieve a network domain by its name or Id.
"""
if is_uuid(locator):
network_domain = self.driver.ex_get_network_domain(locator)
else:
matching_network_domains = [
network_domain
for network_domain in self.driver.ex_list_network_domains(location=location)
if network_domain.name == locator
]
if matching_network_domains:
network_domain = matching_network_domains[0]
else:
network_domain = None
if network_domain:
return network_domain
raise UnknownNetworkError(f"Network '{locator}' could not be found")
def get_vlan(self, locator, location, network_domain):
"""
Get a VLAN object by its name or id
"""
if is_uuid(locator):
vlan = self.driver.ex_get_vlan(locator)
else:
matching_vlans = [
vlan for vlan in self.driver.ex_list_vlans(location, network_domain) if vlan.name == locator
]
if matching_vlans:
vlan = matching_vlans[0]
else:
vlan = None
if vlan:
return vlan
raise UnknownVLANError(f"VLAN '{locator}' could not be found")
@staticmethod
def argument_spec(**additional_argument_spec):
"""
Build an argument specification for a Dimension Data module.
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
:return: A dict containing the argument specification.
"""
spec = dict(
region=dict(type="str", default="na"),
mcp_user=dict(type="str", required=False),
mcp_password=dict(type="str", required=False, no_log=True),
location=dict(type="str", required=True),
validate_certs=dict(type="bool", required=False, default=True),
)
if additional_argument_spec:
spec.update(additional_argument_spec)
return spec
@staticmethod
def argument_spec_with_wait(**additional_argument_spec):
"""
Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
:return: A dict containing the argument specification.
"""
spec = DimensionDataModule.argument_spec(
wait=dict(type="bool", required=False, default=False),
wait_time=dict(type="int", required=False, default=600),
wait_poll_interval=dict(type="int", required=False, default=2),
)
if additional_argument_spec:
spec.update(additional_argument_spec)
return spec
@staticmethod
def required_together(*additional_required_together):
"""
Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
:param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
:return: An array containing the argument specifications.
"""
required_together = [["mcp_user", "mcp_password"]]
if additional_required_together:
required_together.extend(additional_required_together)
return required_together
class LibcloudNotFound(Exception):
"""
Exception raised when Apache libcloud cannot be found.
"""
pass
class MissingCredentialsError(Exception):
"""
Exception raised when credentials for Dimension Data CloudControl cannot be found.
"""
pass
class UnknownNetworkError(Exception):
"""
Exception raised when a network or network domain cannot be found.
"""
pass
class UnknownVLANError(Exception):
"""
Exception raised when a VLAN cannot be found.
"""
pass
def get_dd_regions():
"""
Get the list of available regions whose vendor is Dimension Data.
"""
# Get endpoints
all_regions = API_ENDPOINTS.keys()
# Only Dimension Data endpoints (no prefix)
regions = [region[3:] for region in all_regions if region.startswith("dd-")]
return regions
def is_uuid(u, version=4):
"""
Test if valid v4 UUID
"""
try:
uuid_obj = UUID(u, version=version)
return str(uuid_obj) == u
except ValueError:
return False

View File

@@ -74,10 +74,6 @@ _django_std_arg_fmts: dict[str, ArgFormatter] = dict(
# keys can be used in _django_args
_args_menu = dict(
std=(django_std_args, _django_std_arg_fmts),
database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0
noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0
dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0
check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0
database_dash=(_database_dash, {}),
data=(_data, {}),
)

View File

@@ -3019,10 +3019,6 @@ class KeycloakAPI:
except Exception as e:
self.fail_request(e, msg=f"Could not get groups for user {user_id} in realm {realm}: {e}")
def add_user_in_group(self, user_id, group_id, realm: str = "master"):
"""DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0."""
return self.add_user_to_group(user_id, group_id, realm)
def add_user_to_group(self, user_id, group_id, realm: str = "master"):
"""
Add a user to a group.

View File

@@ -1,171 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
#
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
# This module utils is deprecated and will be removed in community.general 13.0.0
from __future__ import annotations
import hmac
import os
import re
from urllib.parse import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1 # type: ignore[no-redef]
HASHED_KEY_MAGIC = "|1|"
def is_ssh_url(url):
"""check if url is ssh"""
if "@" in url and "://" not in url:
return True
return any(url.startswith(scheme) for scheme in ("ssh://", "git+ssh://", "ssh+git://"))
def get_fqdn_and_port(repo_url):
"""chop the hostname and port out of a url"""
fqdn = None
port = None
ipv6_re = re.compile(r"(\[[^]]*\])(?::([0-9]+))?")
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse(repo_url)
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if "USER" in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = [
user_host_file,
"/etc/ssh/ssh_known_hosts",
"/etc/ssh/ssh_known_hosts2",
"/etc/openssh/ssh_known_hosts",
]
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
with open(hf) as host_fh:
data = host_fh.read()
except OSError:
hfiles_not_found += 1
continue
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC) :].split("|", 2)
hash = hmac.new(kn_salt.decode("base64"), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode("base64"):
return False
except Exception:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
"""use ssh-keyscan to add the hostkey"""
keyscan_cmd = module.get_bin_path("ssh-keyscan", True)
if "USER" in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int("700", 8))
except Exception:
module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}")
else:
module.fail_json(msg=f"{user_ssh_dir} does not exist")
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg=f"{user_ssh_dir} is not a directory")
if port:
this_cmd = f"{keyscan_cmd} -t {key_type} -p {port} {fqdn}"
else:
this_cmd = f"{keyscan_cmd} -t {key_type} {fqdn}"
rc, out, err = module.run_command(this_cmd, environ_update={"LANGUAGE": "C", "LC_ALL": "C"})
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
if rc != 0 or not out:
msg = "failed to retrieve hostkey"
if not out:
msg += f'. "{this_cmd}" returned no matches.'
else:
msg += f' using command "{this_cmd}". [stdout]: {out}'
if err:
msg += f" [stderr]: {err}"
module.fail_json(msg=msg)
module.append_to_file(user_host_file, out)
return rc, out, err

View File

@@ -1,245 +0,0 @@
# Copyright (c) Ansible project
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import annotations
import time
#
# DEPRECATED
#
# This module utils is deprecated and will be removed in community.general 13.0.0
#
class OneAndOneResources:
firewall_policy = "firewall_policy"
load_balancer = "load_balancer"
monitoring_policy = "monitoring_policy"
private_network = "private_network"
public_ip = "public_ip"
role = "role"
server = "server"
user = "user"
vpn = "vpn"
def get_resource(oneandone_conn, resource_type, resource_id):
switcher = {
"firewall_policy": oneandone_conn.get_firewall,
"load_balancer": oneandone_conn.get_load_balancer,
"monitoring_policy": oneandone_conn.get_monitoring_policy,
"private_network": oneandone_conn.get_private_network,
"public_ip": oneandone_conn.get_public_ip,
"role": oneandone_conn.get_role,
"server": oneandone_conn.get_server,
"user": oneandone_conn.get_user,
"vpn": oneandone_conn.get_vpn,
}
return switcher.get(resource_type)(resource_id)
def get_datacenter(oneandone_conn, datacenter, full_object=False):
"""
Validates the datacenter exists by ID or country code.
Returns the datacenter ID.
"""
for _datacenter in oneandone_conn.list_datacenters():
if datacenter in (_datacenter["id"], _datacenter["country_code"]):
if full_object:
return _datacenter
return _datacenter["id"]
def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
"""
Validates the fixed instance size exists by ID or name.
Return the instance size ID.
"""
for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
if fixed_instance_size in (_fixed_instance_size["id"], _fixed_instance_size["name"]):
if full_object:
return _fixed_instance_size
return _fixed_instance_size["id"]
def get_appliance(oneandone_conn, appliance, full_object=False):
"""
Validates the appliance exists by ID or name.
Return the appliance ID.
"""
for _appliance in oneandone_conn.list_appliances(q="IMAGE"):
if appliance in (_appliance["id"], _appliance["name"]):
if full_object:
return _appliance
return _appliance["id"]
def get_private_network(oneandone_conn, private_network, full_object=False):
"""
Validates the private network exists by ID or name.
Return the private network ID.
"""
for _private_network in oneandone_conn.list_private_networks():
if private_network in (_private_network["name"], _private_network["id"]):
if full_object:
return _private_network
return _private_network["id"]
def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
"""
Validates the monitoring policy exists by ID or name.
Return the monitoring policy ID.
"""
for _monitoring_policy in oneandone_conn.list_monitoring_policies():
if monitoring_policy in (_monitoring_policy["name"], _monitoring_policy["id"]):
if full_object:
return _monitoring_policy
return _monitoring_policy["id"]
def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
"""
Validates the firewall policy exists by ID or name.
Return the firewall policy ID.
"""
for _firewall_policy in oneandone_conn.list_firewall_policies():
if firewall_policy in (_firewall_policy["name"], _firewall_policy["id"]):
if full_object:
return _firewall_policy
return _firewall_policy["id"]
def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
"""
Validates the load balancer exists by ID or name.
Return the load balancer ID.
"""
for _load_balancer in oneandone_conn.list_load_balancers():
if load_balancer in (_load_balancer["name"], _load_balancer["id"]):
if full_object:
return _load_balancer
return _load_balancer["id"]
def get_server(oneandone_conn, instance, full_object=False):
"""
Validates that the server exists whether by ID or name.
Returns the server if one was found.
"""
for server in oneandone_conn.list_servers(per_page=1000):
if instance in (server["id"], server["name"]):
if full_object:
return server
return server["id"]
def get_user(oneandone_conn, user, full_object=False):
"""
Validates that the user exists by ID or a name.
Returns the user if one was found.
"""
for _user in oneandone_conn.list_users(per_page=1000):
if user in (_user["id"], _user["name"]):
if full_object:
return _user
return _user["id"]
def get_role(oneandone_conn, role, full_object=False):
"""
Given a name, validates that the role exists
whether it is a proper ID or a name.
Returns the role if one was found, else None.
"""
for _role in oneandone_conn.list_roles(per_page=1000):
if role in (_role["id"], _role["name"]):
if full_object:
return _role
return _role["id"]
def get_vpn(oneandone_conn, vpn, full_object=False):
"""
Validates that the vpn exists by ID or a name.
Returns the vpn if one was found.
"""
for _vpn in oneandone_conn.list_vpns(per_page=1000):
if vpn in (_vpn["id"], _vpn["name"]):
if full_object:
return _vpn
return _vpn["id"]
def get_public_ip(oneandone_conn, public_ip, full_object=False):
"""
Validates that the public ip exists by ID or a name.
Returns the public ip if one was found.
"""
for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
if public_ip in (_public_ip["id"], _public_ip["ip"]):
if full_object:
return _public_ip
return _public_ip["id"]
def wait_for_resource_creation_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval):
"""
Waits for the resource create operation to complete based on the timeout period.
"""
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(wait_interval)
# Refresh the resource info
resource = get_resource(oneandone_conn, resource_type, resource_id)
if resource_type == OneAndOneResources.server:
resource_state = resource["status"]["state"]
else:
resource_state = resource["state"]
if (resource_type == OneAndOneResources.server and resource_state.lower() == "powered_on") or (
resource_type != OneAndOneResources.server and resource_state.lower() == "active"
):
return
elif resource_state.lower() == "failed":
raise Exception(f"{resource_type} creation failed for {resource_id}")
elif resource_state.lower() in ("active", "enabled", "deploying", "configuring"):
continue
else:
raise Exception(f"Unknown {resource_type} state {resource_state}")
raise Exception(f"Timed out waiting for {resource_type} completion for {resource_id}")
def wait_for_resource_deletion_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval):
"""
Waits for the resource delete operation to complete based on the timeout period.
"""
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(wait_interval)
# Refresh the operation info
logs = oneandone_conn.list_logs(q="DELETE", period="LAST_HOUR", sort="-start_date")
if resource_type == OneAndOneResources.server:
_type = "VM"
elif resource_type == OneAndOneResources.private_network:
_type = "PRIVATENETWORK"
else:
raise Exception(f"Unsupported wait_for delete operation for {resource_type} resource")
for log in logs:
if (
log["resource"]["id"] == resource_id
and log["action"] == "DELETE"
and log["type"] == _type
and log["status"]["state"] == "OK"
):
return
raise Exception(f"Timed out waiting for {resource_type} deletion for {resource_id}")

File diff suppressed because it is too large Load Diff

View File

@@ -104,22 +104,3 @@ def make_process_dict(include_injected, include_deps=False):
return results, raw_data
return process_dict
def make_process_list(mod_helper, **kwargs):
#
# ATTENTION!
#
# The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0
#
process_dict = make_process_dict(mod_helper, **kwargs)
def process_list(rc, out, err):
res_dict, raw_data = process_dict(rc, out, err)
if kwargs.get("include_raw"):
mod_helper.vars.raw_output = raw_data
return [entry for name, entry in res_dict.items() if name == kwargs.get("name")]
return process_list

View File

@@ -1,171 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
# Copyright (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
#
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
# This module utils is deprecated and will be removed in community.general 13.0.0
from __future__ import annotations
from stringprep import (
in_table_a1,
in_table_b1,
in_table_c3,
in_table_c4,
in_table_c5,
in_table_c6,
in_table_c7,
in_table_c8,
in_table_c9,
in_table_c12,
in_table_c21_c22,
in_table_d1,
in_table_d2,
)
from unicodedata import normalize
def is_unicode_str(string):
return True if isinstance(string, str) else False
def mapping_profile(string):
"""RFC4013 Mapping profile implementation."""
# Regarding RFC4013,
# This profile specifies:
# - non-ASCII space characters [StringPrep, C.1.2] that can be
# mapped to SPACE (U+0020), and
# - the "commonly mapped to nothing" characters [StringPrep, B.1]
# that can be mapped to nothing.
tmp = []
for c in string:
# If not the "commonly mapped to nothing"
if not in_table_b1(c):
if in_table_c12(c):
# map non-ASCII space characters
# (that can be mapped) to Unicode space
tmp.append(" ")
else:
tmp.append(c)
return "".join(tmp)
def is_ral_string(string):
"""RFC3454 Check bidirectional category of the string"""
# Regarding RFC3454,
# Table D.1 lists the characters that belong
# to Unicode bidirectional categories "R" and "AL".
# If a string contains any RandALCat character, a RandALCat
# character MUST be the first character of the string, and a
# RandALCat character MUST be the last character of the string.
if in_table_d1(string[0]):
if not in_table_d1(string[-1]):
raise ValueError("RFC3454: incorrect bidirectional RandALCat string.")
return True
return False
def prohibited_output_profile(string):
"""RFC4013 Prohibited output profile implementation."""
# Implements:
# RFC4013, 2.3. Prohibited Output.
# This profile specifies the following characters as prohibited input:
# - Non-ASCII space characters [StringPrep, C.1.2]
# - ASCII control characters [StringPrep, C.2.1]
# - Non-ASCII control characters [StringPrep, C.2.2]
# - Private Use characters [StringPrep, C.3]
# - Non-character code points [StringPrep, C.4]
# - Surrogate code points [StringPrep, C.5]
# - Inappropriate for plain text characters [StringPrep, C.6]
# - Inappropriate for canonical representation characters [StringPrep, C.7]
# - Change display properties or deprecated characters [StringPrep, C.8]
# - Tagging characters [StringPrep, C.9]
# RFC4013, 2.4. Bidirectional Characters.
# RFC4013, 2.5. Unassigned Code Points.
# Determine how to handle bidirectional characters (RFC3454):
if is_ral_string(string):
# If a string contains any RandALCat characters,
# The string MUST NOT contain any LCat character:
is_prohibited_bidi_ch = in_table_d2
bidi_table = "D.2"
else:
# Forbid RandALCat characters in LCat string:
is_prohibited_bidi_ch = in_table_d1
bidi_table = "D.1"
RFC = "RFC4013"
for c in string:
# RFC4013 2.3. Prohibited Output:
if in_table_c12(c):
raise ValueError(f"{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).")
if in_table_c21_c22(c):
raise ValueError(f"{RFC}: prohibited control characters (C.2.1).")
if in_table_c3(c):
raise ValueError(f"{RFC}: prohibited private Use characters (C.3).")
if in_table_c4(c):
raise ValueError(f"{RFC}: prohibited non-character code points (C.4).")
if in_table_c5(c):
raise ValueError(f"{RFC}: prohibited surrogate code points (C.5).")
if in_table_c6(c):
raise ValueError(f"{RFC}: prohibited inappropriate for plain text characters (C.6).")
if in_table_c7(c):
raise ValueError(f"{RFC}: prohibited inappropriate for canonical representation characters (C.7).")
if in_table_c8(c):
raise ValueError(f"{RFC}: prohibited change display properties / deprecated characters (C.8).")
if in_table_c9(c):
raise ValueError(f"{RFC}: prohibited tagging characters (C.9).")
# RFC4013, 2.4. Bidirectional Characters:
if is_prohibited_bidi_ch(c):
raise ValueError(f"{RFC}: prohibited bidi characters ({bidi_table}).")
# RFC4013, 2.5. Unassigned Code Points:
if in_table_a1(c):
raise ValueError(f"{RFC}: prohibited unassigned code points (A.1).")
def saslprep(string):
"""RFC4013 implementation.
Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
to prepare Unicode strings representing user names and passwords for comparison.
Regarding the RFC4013, the "SASLprep" profile is intended to be used by
Simple Authentication and Security Layer (SASL) mechanisms
(such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
exchanging simple user names and/or passwords.
Args:
string (unicode string): Unicode string to validate and prepare.
Returns:
Prepared unicode string.
"""
# RFC4013: "The algorithm assumes all strings are
# comprised of characters from the Unicode [Unicode] character set."
# Validate the string is a Unicode string
if not is_unicode_str(string):
raise TypeError(f"input must be of type str, not {type(string)}")
# RFC4013: 2.1. Mapping.
string = mapping_profile(string)
# RFC4013: 2.2. Normalization.
# "This profile specifies using Unicode normalization form KC."
string = normalize("NFKC", string)
if not string:
return ""
# RFC4013: 2.3. Prohibited Output.
# RFC4013: 2.4. Bidirectional Characters.
# RFC4013: 2.5. Unassigned Code Points.
prohibited_output_profile(string)
return string

View File

@@ -1,232 +0,0 @@
#!/usr/bin/python
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: atomic_container
short_description: Manage the containers on the atomic host platform
description:
- Manage the containers on the atomic host platform.
- Allows to manage the lifecycle of a container on the atomic host platform.
deprecated:
removed_in: 13.0.0
why: Project Atomic was sunset by the end of 2019.
alternative: There is none.
author: "Giuseppe Scrivano (@giuseppe)"
requirements:
- atomic
notes:
- According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
backend:
description:
- Define the backend to use for the container.
required: true
choices: ["docker", "ostree"]
type: str
name:
description:
- Name of the container.
required: true
type: str
image:
description:
- The image to use to install the container.
required: true
type: str
rootfs:
description:
- Define the rootfs of the image.
type: str
state:
description:
- State of the container.
choices: ["absent", "latest", "present", "rollback"]
default: "latest"
type: str
mode:
description:
- Define if it is an user or a system container.
choices: ["user", "system"]
type: str
values:
description:
- Values for the installation of the container.
- This option is permitted only with mode 'user' or 'system'.
- The values specified here will be used at installation time as --set arguments for atomic install.
type: list
elements: str
default: []
"""
EXAMPLES = r"""
- name: Install the etcd system container
community.general.atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: latest
mode: system
values:
- ETCD_NAME=etcd.server
- name: Uninstall the etcd system container
community.general.atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: absent
mode: system
"""
RETURN = r"""
msg:
description: The command standard output.
returned: always
type: str
sample: 'Using default tag: latest ...'
"""
# import module snippets
import traceback
from ansible.module_utils.basic import AnsibleModule
def do_install(module, mode, rootfs, container, image, values_list, backend):
system_list = ["--system"] if mode == "system" else []
user_list = ["--user"] if mode == "user" else []
rootfs_list = [f"--rootfs={rootfs}"] if rootfs else []
atomic_bin = module.get_bin_path("atomic")
args = (
[atomic_bin, "install", f"--storage={backend}", f"--name={container}"]
+ system_list
+ user_list
+ rootfs_list
+ values_list
+ [image]
)
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_update(module, container, image, values_list):
atomic_bin = module.get_bin_path("atomic")
args = [atomic_bin, "containers", "update", f"--rebase={image}"] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name, backend):
atomic_bin = module.get_bin_path("atomic")
args = [atomic_bin, "uninstall", f"--storage={backend}", name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=True)
def do_rollback(module, name):
atomic_bin = module.get_bin_path("atomic")
args = [atomic_bin, "containers", "rollback", name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
mode = module.params["mode"]
name = module.params["name"]
image = module.params["image"]
rootfs = module.params["rootfs"]
values = module.params["values"]
backend = module.params["backend"]
state = module.params["state"]
atomic_bin = module.get_bin_path("atomic")
module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C")
values_list = [f"--set={x}" for x in values] if values else []
args = [
atomic_bin,
"containers",
"list",
"--no-trunc",
"-n",
"--all",
"-f",
f"backend={backend}",
"-f",
f"container={name}",
]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
present = name in out
if state == "present" and present:
module.exit_json(msg=out, changed=False)
elif (state in ["latest", "present"]) and not present:
do_install(module, mode, rootfs, name, image, values_list, backend)
elif state == "latest":
do_update(module, name, image, values_list)
elif state == "absent":
if not present:
module.exit_json(msg="The container is not present", changed=False)
else:
do_uninstall(module, name, backend)
elif state == "rollback":
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
mode=dict(choices=["user", "system"]),
name=dict(required=True),
image=dict(required=True),
rootfs=dict(),
state=dict(default="latest", choices=["present", "absent", "latest", "rollback"]),
backend=dict(required=True, choices=["docker", "ostree"]),
values=dict(type="list", default=[], elements="str"),
),
)
if module.params["values"] is not None and module.params["mode"] == "default":
module.fail_json(msg="values is supported only with user or system mode")
# Verify that the platform supports atomic command
dummy = module.get_bin_path("atomic", required=True)
try:
core(module)
except Exception as e:
module.fail_json(msg=f"Unanticipated error running atomic: {e}", exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@@ -1,105 +0,0 @@
#!/usr/bin/python
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: atomic_host
short_description: Manage the atomic host platform
description:
- Manage the atomic host platform.
- Rebooting of Atomic host platform should be done outside this module.
deprecated:
removed_in: 13.0.0
why: Project Atomic was sunset by the end of 2019.
alternative: There is none.
author:
- Saravanan KR (@krsacme)
notes:
- Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
- According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
revision:
description:
- The version number of the atomic host to be deployed.
- Providing V(latest) will upgrade to the latest available version.
default: 'latest'
aliases: [version]
type: str
"""
EXAMPLES = r"""
- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
community.general.atomic_host:
revision: latest
- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
community.general.atomic_host:
revision: 23.130
"""
RETURN = r"""
msg:
description: The command standard output.
returned: always
type: str
sample: 'Already on latest'
"""
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
def core(module):
revision = module.params["revision"]
atomic_bin = module.get_bin_path("atomic", required=True)
module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C")
if revision == "latest":
args = [atomic_bin, "host", "upgrade"]
else:
args = [atomic_bin, "host", "deploy", revision]
rc, out, err = module.run_command(args, check_rc=False)
if rc == 77 and revision == "latest":
module.exit_json(msg="Already on latest", changed=False)
elif rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
revision=dict(type="str", default="latest", aliases=["version"]),
),
)
# Verify that the platform is atomic host
if not os.path.exists("/run/ostree-booted"):
module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
try:
core(module)
except Exception as e:
module.fail_json(msg=f"{e}", exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@@ -1,177 +0,0 @@
#!/usr/bin/python
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: atomic_image
short_description: Manage the container images on the atomic host platform
description:
- Manage the container images on the atomic host platform.
- Allows to execute the commands specified by the RUN label in the container image when present.
deprecated:
removed_in: 13.0.0
why: Project Atomic was sunset by the end of 2019.
alternative: There is none.
author:
- Saravanan KR (@krsacme)
notes:
- According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS.
requirements:
- atomic
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
backend:
description:
- Define the backend where the image is pulled.
choices: ['docker', 'ostree']
type: str
name:
description:
- Name of the container image.
required: true
type: str
state:
description:
- The state of the container image.
- The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container,
if running.
choices: ['absent', 'latest', 'present']
default: 'latest'
type: str
started:
description:
- Start or stop the container.
type: bool
default: true
"""
EXAMPLES = r"""
- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
community.general.atomic_image:
name: rhel7/rsyslog
state: latest
- name: Pull busybox to the OSTree backend
community.general.atomic_image:
name: busybox
state: latest
backend: ostree
"""
RETURN = r"""
msg:
description: The command standard output.
returned: always
type: str
sample: 'Using default tag: latest ...'
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
def do_upgrade(module, image):
atomic_bin = module.get_bin_path("atomic")
args = [atomic_bin, "update", "--force", image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=err)
elif "Image is up to date" in out:
return False
return True
def core(module):
image = module.params["name"]
state = module.params["state"]
started = module.params["started"]
backend = module.params["backend"]
is_upgraded = False
module.run_command_environ_update = dict(LANG="C", LC_ALL="C", LC_MESSAGES="C")
atomic_bin = module.get_bin_path("atomic")
out = {}
err = {}
rc = 0
if backend:
if state == "present" or state == "latest":
args = [atomic_bin, "pull", f"--storage={backend}", image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
out_run = ""
if started:
args = [atomic_bin, "run", f"--storage={backend}", image]
rc, out_run, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=(out + out_run), changed=changed)
elif state == "absent":
args = [atomic_bin, "images", "delete", f"--storage={backend}", image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Unable to find" not in out
module.exit_json(msg=out, changed=changed)
return
if state == "present" or state == "latest":
if state == "latest":
is_upgraded = do_upgrade(module, image)
if started:
args = [atomic_bin, "run", image]
else:
args = [atomic_bin, "install", image]
elif state == "absent":
args = [atomic_bin, "uninstall", image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
elif rc == 1 and "already present" in err:
module.exit_json(restult=err, changed=is_upgraded)
elif started and "Container is running" in out:
module.exit_json(result=out, changed=is_upgraded)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
backend=dict(type="str", choices=["docker", "ostree"]),
name=dict(type="str", required=True),
state=dict(type="str", default="latest", choices=["absent", "latest", "present"]),
started=dict(type="bool", default=True),
),
)
# Verify that the platform supports atomic command
dummy = module.get_bin_path("atomic", required=True)
try:
core(module)
except Exception as e:
module.fail_json(msg=f"{e}", exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@@ -1,154 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2016, Jonathan Mainguy <jon@soh.re>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# basis of code taken from the ansible twillio and nexmo modules
from __future__ import annotations
DOCUMENTATION = r"""
module: catapult
short_description: Send a sms / mms using the catapult bandwidth API
description:
- Allows notifications to be sent using SMS / MMS using the catapult bandwidth API.
deprecated:
removed_in: 13.0.0
why: >-
DNS fails to resolve the API endpoint used by the module since Oct 2024.
See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
src:
type: str
description:
- One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)).
required: true
dest:
type: list
elements: str
description:
- The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)).
required: true
msg:
type: str
description:
- The contents of the text message (must be 2048 characters or less).
required: true
media:
type: str
description:
- For MMS messages, a media URL to the location of the media to be sent with the message.
user_id:
type: str
description:
- User ID from API account page.
required: true
api_token:
type: str
description:
- API Token from API account page.
required: true
api_secret:
type: str
description:
- API Secret from API account page.
required: true
author: "Jonathan Mainguy (@Jmainguy)"
notes:
- Will return changed even if the media URL is wrong.
- Will return changed if the destination number is invalid.
"""
EXAMPLES = r"""
- name: Send a mms to multiple users
community.general.catapult:
src: "+15035555555"
dest:
- "+12525089000"
- "+12018994225"
media: "http://example.com/foobar.jpg"
msg: "Task is complete"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
- name: Send a sms to a single user
community.general.catapult:
src: "+15035555555"
dest: "+12018994225"
msg: "Consider yourself notified"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def send(module, src, dest, msg, media, user_id, api_token, api_secret):
"""
Send the message
"""
AGENT = "Ansible"
URI = f"https://api.catapult.inetwork.com/v1/users/{user_id}/messages"
data = {"from": src, "to": dest, "text": msg}
if media:
data["media"] = media
headers = {"User-Agent": AGENT, "Content-type": "application/json"}
# Hack module params to have the Basic auth params that fetch_url expects
module.params["url_username"] = api_token.replace("\n", "")
module.params["url_password"] = api_secret.replace("\n", "")
return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True),
dest=dict(required=True, type="list", elements="str"),
msg=dict(required=True),
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
media=dict(),
),
)
src = module.params["src"]
dest = module.params["dest"]
msg = module.params["msg"]
media = module.params["media"]
user_id = module.params["user_id"]
api_token = module.params["api_token"]
api_secret = module.params["api_secret"]
for number in dest:
rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
if info["status"] != 201:
body = json.loads(info["body"])
fail_msg = body["message"]
module.fail_json(msg=fail_msg)
changed = True
module.exit_json(changed=changed)
if __name__ == "__main__":
main()

View File

@@ -83,16 +83,12 @@ options:
description:
- Controls the module behavior. See notes below for more details.
- The default changed from V(compatibility) to V(new) in community.general 9.0.0.
V(compatibility) was removed from community.general 13.0.0.
- 'O(mode=new): The O(name) parameter may refer to a module name, a distribution file, a HTTP URL or a git repository
URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. This is the default mode
from community.general 9.0.0 onwards.'
- 'O(mode=compatibility): This was the default mode before community.general 9.0.0. O(name) must be either a module
name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified),
then nothing happens. Otherwise, it is installed using the C(cpanm) executable. O(name) cannot be an URL, or a git
URL. C(cpanm) version specifiers do not work in this mode.'
- 'B(ATTENTION): V(compatibility) mode is deprecated and will be removed in community.general 13.0.0.'
type: str
choices: [compatibility, new]
choices: [new]
default: new
version_added: 3.0.0
name_check:
@@ -184,7 +180,7 @@ class CPANMinus(ModuleHelper):
install_recommendations=dict(type="bool"),
install_suggestions=dict(type="bool"),
executable=dict(type="path"),
mode=dict(type="str", default="new", choices=["compatibility", "new"]),
mode=dict(type="str", default="new", choices=["new"]),
name_check=dict(type="str"),
),
required_one_of=[("name", "from_path")],
@@ -204,17 +200,8 @@ class CPANMinus(ModuleHelper):
def __init_module__(self):
v = self.vars
if v.mode == "compatibility":
if v.name_check:
self.do_raise("Parameter name_check can only be used with mode=new")
self.deprecate(
"'mode=compatibility' is deprecated, use 'mode=new' instead",
version="13.0.0",
collection_name="community.general",
)
else:
if v.name and v.from_path:
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
if v.name and v.from_path:
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
self.command = v.executable if v.executable else self.command
self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
@@ -260,22 +247,15 @@ class CPANMinus(ModuleHelper):
def __run__(self):
def process(rc, out, err):
if self.vars.mode == "compatibility" and rc != 0:
self.do_raise(msg=err, cmd=self.vars.cmd_args)
return "is up to date" not in err and "is up to date" not in out
v = self.vars
pkg_param = "from_path" if v.from_path else "name"
if v.mode == "compatibility":
if self._is_package_installed(v.name, v.locallib, v.version):
return
pkg_spec = v[pkg_param]
else:
installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False
if installed:
return
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False
if installed:
return
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
with self.runner(
[

View File

@@ -1,272 +0,0 @@
#!/usr/bin/python
#
# Copyright (c) 2016 Dimension Data
# Authors:
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
# - Adam Friedman <tintoy@tintoy.io>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: dimensiondata_network
short_description: Create, update, and delete MCP 1.0 & 2.0 networks
extends_documentation_fragment:
- community.general.dimensiondata
- community.general.dimensiondata_wait
- community.general.attributes
description:
- Create, update, and delete MCP 1.0 & 2.0 networks.
deprecated:
removed_in: 13.0.0
why: Service and its endpoints are no longer available.
alternative: There is none.
author: 'Aimon Bustardo (@aimonb)'
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- The name of the network domain to create.
required: true
type: str
description:
description:
- Additional description of the network domain.
type: str
service_plan:
description:
- The service plan, either "ESSENTIALS" or "ADVANCED".
- MCP 2.0 Only.
choices: [ESSENTIALS, ADVANCED]
default: ESSENTIALS
type: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
"""
EXAMPLES = r"""
- name: Create an MCP 1.0 network
community.general.dimensiondata_network:
region: na
location: NA5
name: mynet
- name: Create an MCP 2.0 network
community.general.dimensiondata_network:
region: na
mcp_user: my_user
mcp_password: my_password
location: NA9
name: mynet
service_plan: ADVANCED
- name: Delete a network
community.general.dimensiondata_network:
region: na
location: NA1
name: mynet
state: absent
"""
RETURN = r"""
network:
description: Dictionary describing the network.
returned: On success when O(state=present).
type: complex
contains:
id:
description: Network ID.
type: str
sample: "8c787000-a000-4050-a215-280893411a7d"
name:
description: Network name.
type: str
sample: "My network"
description:
description: Network description.
type: str
sample: "My network description"
location:
description: Datacenter location.
type: str
sample: NA3
status:
description: Network status. (MCP 2.0 only).
type: str
sample: NORMAL
private_net:
description: Private network subnet. (MCP 1.0 only).
type: str
sample: "10.2.3.0"
multicast:
description: Multicast enabled? (MCP 1.0 only).
type: bool
sample: false
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
if HAS_LIBCLOUD:
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.compute.base import NodeLocation
class DimensionDataNetworkModule(DimensionDataModule):
"""
The dimensiondata_network module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data network module.
"""
super().__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(type="str", required=True),
description=dict(type="str"),
service_plan=dict(default="ESSENTIALS", choices=["ADVANCED", "ESSENTIALS"]),
state=dict(default="present", choices=["present", "absent"]),
),
required_together=DimensionDataModule.required_together(),
)
)
self.name = self.module.params["name"]
self.description = self.module.params["description"]
self.service_plan = self.module.params["service_plan"]
self.state = self.module.params["state"]
def state_present(self):
network = self._get_network()
if network:
self.module.exit_json(changed=False, msg="Network already exists", network=self._network_to_dict(network))
network = self._create_network()
self.module.exit_json(
changed=True,
msg=f'Created network "{self.name}" in datacenter "{self.location}".',
network=self._network_to_dict(network),
)
def state_absent(self):
network = self._get_network()
if not network:
self.module.exit_json(
changed=False, msg=f'Network "{self.name}" does not exist', network=self._network_to_dict(network)
)
self._delete_network(network)
def _get_network(self):
if self.mcp_version == "1.0":
networks = self.driver.list_networks(location=self.location)
else:
networks = self.driver.ex_list_network_domains(location=self.location)
matched_network = [network for network in networks if network.name == self.name]
if matched_network:
return matched_network[0]
return None
def _network_to_dict(self, network):
network_dict = dict(id=network.id, name=network.name, description=network.description)
if isinstance(network.location, NodeLocation):
network_dict["location"] = network.location.id
else:
network_dict["location"] = network.location
if self.mcp_version == "1.0":
network_dict["private_net"] = network.private_net
network_dict["multicast"] = network.multicast
network_dict["status"] = None
else:
network_dict["private_net"] = None
network_dict["multicast"] = None
network_dict["status"] = network.status
return network_dict
def _create_network(self):
# Make sure service_plan argument is defined
if self.mcp_version == "2.0" and "service_plan" not in self.module.params:
self.module.fail_json(msg="service_plan required when creating network and location is MCP 2.0")
# Create network
try:
if self.mcp_version == "1.0":
network = self.driver.ex_create_network(self.location, self.name, description=self.description)
else:
network = self.driver.ex_create_network_domain(
self.location, self.name, self.module.params["service_plan"], description=self.description
)
except DimensionDataAPIException as e:
self.module.fail_json(msg=f"Failed to create new network: {e}", exception=traceback.format_exc())
if self.module.params["wait"] is True:
network = self._wait_for_network_state(network.id, "NORMAL")
return network
def _delete_network(self, network):
try:
if self.mcp_version == "1.0":
deleted = self.driver.ex_delete_network(network)
else:
deleted = self.driver.ex_delete_network_domain(network)
if deleted:
self.module.exit_json(changed=True, msg=f"Deleted network with id {network.id}")
self.module.fail_json(f"Unexpected failure deleting network with id {network.id}")
except DimensionDataAPIException as e:
self.module.fail_json(msg=f"Failed to delete network: {e}", exception=traceback.format_exc())
def _wait_for_network_state(self, net_id, state_to_wait_for):
try:
return self.driver.connection.wait_for_state(
state_to_wait_for,
self.driver.ex_get_network_domain,
self.module.params["wait_poll_interval"],
self.module.params["wait_time"],
net_id,
)
except DimensionDataAPIException as e:
self.module.fail_json(
msg=f"Network did not reach {state_to_wait_for} state in time: {e}",
exception=traceback.format_exc(),
)
def main():
module = DimensionDataNetworkModule()
if module.state == "present":
module.state_present()
elif module.state == "absent":
module.state_absent()
if __name__ == "__main__":
main()

View File

@@ -1,530 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2016 Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
from __future__ import annotations
DOCUMENTATION = r"""
module: dimensiondata_vlan
short_description: Manage a VLAN in a Cloud Control network domain
extends_documentation_fragment:
- community.general.dimensiondata
- community.general.dimensiondata_wait
- community.general.attributes
description:
- Manage VLANs in Cloud Control network domains.
deprecated:
removed_in: 13.0.0
why: Service and its endpoints are no longer available.
alternative: There is none.
author: 'Adam Friedman (@tintoy)'
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- The name of the target VLAN.
type: str
required: true
description:
description:
- A description of the VLAN.
type: str
default: ''
network_domain:
description:
- The ID or name of the target network domain.
required: true
type: str
private_ipv4_base_address:
description:
- The base address for the VLAN's IPv4 network (for example V(192.168.1.0)).
type: str
default: ''
private_ipv4_prefix_size:
description:
- The size of the IPv4 address space, for example V(24).
- Required, if O(private_ipv4_base_address) is specified.
type: int
default: 0
state:
description:
- The desired state for the target VLAN.
- V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist).
choices: [present, absent, readonly]
default: present
type: str
allow_expand:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently
possesses.
- If V(false), the module fails under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: false
"""
EXAMPLES = r"""
- name: Add or update VLAN
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
description: A test VLAN
private_ipv4_base_address: 192.168.23.0
private_ipv4_prefix_size: 24
state: present
wait: true
- name: Read / get VLAN details
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
state: readonly
wait: true
- name: Delete a VLAN
community.general.dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan_1
state: absent
wait: true
"""
RETURN = r"""
vlan:
description: Dictionary describing the VLAN.
returned: On success when O(state=present)
type: complex
contains:
id:
description: VLAN ID.
type: str
sample: "aaaaa000-a000-4050-a215-2808934ccccc"
name:
description: VLAN name.
type: str
sample: "My VLAN"
description:
description: VLAN description.
type: str
sample: "My VLAN description"
location:
description: Datacenter location.
type: str
sample: NA3
private_ipv4_base_address:
description: The base address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.0
private_ipv4_prefix_size:
description: The prefix size for the VLAN's private IPV4 network.
type: int
sample: 24
private_ipv4_gateway_address:
description: The gateway address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.1
private_ipv6_base_address:
description: The base address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:0
private_ipv6_prefix_size:
description: The prefix size for the VLAN's IPV6 network.
type: int
sample: 64
private_ipv6_gateway_address:
description: The gateway address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:1
status:
description: VLAN status.
type: str
sample: NORMAL
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.dimensiondata import (
DimensionDataModule,
UnknownNetworkError,
)
try:
from libcloud.common.dimensiondata import DimensionDataAPIException, DimensionDataVlan
HAS_LIBCLOUD = True
except ImportError:
DimensionDataVlan = None
HAS_LIBCLOUD = False
class DimensionDataVlanModule(DimensionDataModule):
"""
The dimensiondata_vlan module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data VLAN module.
"""
super().__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(required=True, type="str"),
description=dict(default="", type="str"),
network_domain=dict(required=True, type="str"),
private_ipv4_base_address=dict(default="", type="str"),
private_ipv4_prefix_size=dict(default=0, type="int"),
allow_expand=dict(default=False, type="bool"),
state=dict(default="present", choices=["present", "absent", "readonly"]),
),
required_together=DimensionDataModule.required_together(),
)
)
self.name = self.module.params["name"]
self.description = self.module.params["description"]
self.network_domain_selector = self.module.params["network_domain"]
self.private_ipv4_base_address = self.module.params["private_ipv4_base_address"]
self.private_ipv4_prefix_size = self.module.params["private_ipv4_prefix_size"]
self.state = self.module.params["state"]
self.allow_expand = self.module.params["allow_expand"]
if self.wait and self.state != "present":
self.module.fail_json(msg='The wait parameter is only supported when state is "present".')
def state_present(self):
"""
Ensure that the target VLAN is present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
if self.module.check_mode:
self.module.exit_json(
msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}" (should be present).',
changed=True,
)
vlan = self._create_vlan(network_domain)
self.module.exit_json(
msg=f'Created VLAN "{self.name}" in network domain "{self.network_domain_selector}".',
vlan=vlan_to_dict(vlan),
changed=True,
)
else:
diff = VlanDiff(vlan, self.module.params)
if not diff.has_changes():
self.module.exit_json(
msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (no changes detected).',
vlan=vlan_to_dict(vlan),
changed=False,
)
return
try:
diff.ensure_legal_change()
except InvalidVlanChangeError as invalid_vlan_change:
self.module.fail_json(
msg=f'Unable to update VLAN "{self.name}" in network domain "{self.network_domain_selector}": {invalid_vlan_change}'
)
if diff.needs_expand() and not self.allow_expand:
self.module.fail_json(
msg=f"The configured private IPv4 network size ({self.private_ipv4_prefix_size}-bit prefix) for "
f"the VLAN differs from its current network size ({vlan.private_ipv4_range_size}-bit prefix) "
"and needs to be expanded. Use allow_expand=true if this is what you want."
)
if self.module.check_mode:
self.module.exit_json(
msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (changes detected).',
vlan=vlan_to_dict(vlan),
changed=True,
)
if diff.needs_edit():
vlan.name = self.name
vlan.description = self.description
self.driver.ex_update_vlan(vlan)
if diff.needs_expand():
vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
self.driver.ex_expand_vlan(vlan)
self.module.exit_json(
msg=f'Updated VLAN "{self.name}" in network domain "{self.network_domain_selector}".',
vlan=vlan_to_dict(vlan),
changed=True,
)
def state_readonly(self):
"""
Read the target VLAN's state.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if vlan:
self.module.exit_json(vlan=vlan_to_dict(vlan), changed=False)
else:
self.module.fail_json(
msg=f'VLAN "{self.name}" does not exist in network domain "{self.network_domain_selector}".'
)
def state_absent(self):
"""
Ensure that the target VLAN is not present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
self.module.exit_json(
msg=f'VLAN "{self.name}" is absent from network domain "{self.network_domain_selector}".', changed=False
)
return
if self.module.check_mode:
self.module.exit_json(
msg=f'VLAN "{self.name}" is present in network domain "{self.network_domain_selector}" (should be absent).',
vlan=vlan_to_dict(vlan),
changed=True,
)
self._delete_vlan(vlan)
self.module.exit_json(
msg=f'Deleted VLAN "{self.name}" from network domain "{self.network_domain_selector}".', changed=True
)
def _get_vlan(self, network_domain):
"""
Retrieve the target VLAN details from CloudControl.
:param network_domain: The target network domain.
:return: The VLAN, or None if the target VLAN was not found.
:rtype: DimensionDataVlan
"""
vlans = self.driver.ex_list_vlans(location=self.location, network_domain=network_domain)
matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
if matching_vlans:
return matching_vlans[0]
return None
def _create_vlan(self, network_domain):
vlan = self.driver.ex_create_vlan(
network_domain, self.name, self.private_ipv4_base_address, self.description, self.private_ipv4_prefix_size
)
if self.wait:
vlan = self._wait_for_vlan_state(vlan.id, "NORMAL")
return vlan
def _delete_vlan(self, vlan):
try:
self.driver.ex_delete_vlan(vlan)
# Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
if self.wait:
self._wait_for_vlan_state(vlan, "NOT_FOUND")
except DimensionDataAPIException as api_exception:
self.module.fail_json(
msg=f'Failed to delete VLAN "{vlan.id}" due to unexpected error from the CloudControl API: {api_exception.msg}'
)
def _wait_for_vlan_state(self, vlan, state_to_wait_for):
network_domain = self._get_network_domain()
wait_poll_interval = self.module.params["wait_poll_interval"]
wait_time = self.module.params["wait_time"]
# Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
try:
return self.driver.connection.wait_for_state(
state_to_wait_for, self.driver.ex_get_vlan, wait_poll_interval, wait_time, vlan
)
except DimensionDataAPIException as api_exception:
if api_exception.code != "RESOURCE_NOT_FOUND":
raise
return DimensionDataVlan(
id=vlan.id,
status="NOT_FOUND",
name="",
description="",
private_ipv4_range_address="",
private_ipv4_range_size=0,
ipv4_gateway="",
ipv6_range_address="",
ipv6_range_size=0,
ipv6_gateway="",
location=self.location,
network_domain=network_domain,
)
def _get_network_domain(self):
"""
Retrieve the target network domain from the Cloud Control API.
:return: The network domain.
"""
try:
return self.get_network_domain(self.network_domain_selector, self.location)
except UnknownNetworkError:
self.module.fail_json(
msg=f'Cannot find network domain "{self.network_domain_selector}" in datacenter "{self.location}".'
)
return None
class InvalidVlanChangeError(Exception):
"""
Error raised when an illegal change to VLAN state is attempted.
"""
pass
class VlanDiff:
"""
Represents differences between VLAN information (from CloudControl) and module parameters.
"""
def __init__(self, vlan, module_params):
"""
:param vlan: The VLAN information from CloudControl.
:type vlan: DimensionDataVlan
:param module_params: The module parameters.
:type module_params: dict
"""
self.vlan = vlan
self.module_params = module_params
self.name_changed = module_params["name"] != vlan.name
self.description_changed = module_params["description"] != vlan.description
self.private_ipv4_base_address_changed = (
module_params["private_ipv4_base_address"] != vlan.private_ipv4_range_address
)
self.private_ipv4_prefix_size_changed = (
module_params["private_ipv4_prefix_size"] != vlan.private_ipv4_range_size
)
# Is configured prefix size greater than or less than the actual prefix size?
private_ipv4_prefix_size_difference = module_params["private_ipv4_prefix_size"] - vlan.private_ipv4_range_size
self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
def has_changes(self):
"""
Does the VlanDiff represent any changes between the VLAN and module configuration?
:return: True, if there are change changes; otherwise, False.
"""
return self.needs_edit() or self.needs_expand()
def ensure_legal_change(self):
"""
Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
- private_ipv4_base_address cannot be changed
- private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
:raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
"""
# Cannot change base address for private IPv4 network.
if self.private_ipv4_base_address_changed:
raise InvalidVlanChangeError("Cannot change the private IPV4 base address for an existing VLAN.")
# Cannot shrink private IPv4 network (by increasing prefix size).
if self.private_ipv4_prefix_size_increased:
raise InvalidVlanChangeError(
"Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported)."
)
def needs_edit(self):
"""
Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
:return: True, if an Edit operation is required; otherwise, False.
"""
return self.name_changed or self.description_changed
def needs_expand(self):
"""
Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
The VLAN's network is expanded by reducing the size of its network prefix.
:return: True, if an Expand operation is required; otherwise, False.
"""
return self.private_ipv4_prefix_size_decreased
def vlan_to_dict(vlan):
return {
"id": vlan.id,
"name": vlan.name,
"description": vlan.description,
"location": vlan.location.id,
"private_ipv4_base_address": vlan.private_ipv4_range_address,
"private_ipv4_prefix_size": vlan.private_ipv4_range_size,
"private_ipv4_gateway_address": vlan.ipv4_gateway,
"ipv6_base_address": vlan.ipv6_range_address,
"ipv6_prefix_size": vlan.ipv6_range_size,
"ipv6_gateway_address": vlan.ipv6_gateway,
"status": vlan.status,
}
def main():
module = DimensionDataVlanModule()
if module.state == "present":
module.state_present()
elif module.state == "readonly":
module.state_readonly()
elif module.state == "absent":
module.state_absent()
if __name__ == "__main__":
main()

View File

@@ -75,8 +75,9 @@ options:
force_defaults:
description:
- If V(true), overwrite current O(description) and O(private) attributes with defaults.
- V(true) is deprecated for this option and will not be allowed starting in community.general 13.0.0. V(false) will be the default value then.
- The default value changed from V(true) to V(false) in community.general 13.0.0.
type: bool
default: false
version_added: 4.1.0
requirements:
- PyGithub>=1.54
@@ -239,7 +240,7 @@ def main():
private=dict(type="bool"),
description=dict(type="str"),
api_url=dict(type="str", default="https://api.github.com"),
force_defaults=dict(type="bool"),
force_defaults=dict(type="bool", default=False),
)
module = AnsibleModule(
argument_spec=module_args,
@@ -249,14 +250,6 @@ def main():
mutually_exclusive=[("username", "access_token")],
)
if module.params["force_defaults"] is None:
module.deprecate(
"'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead",
version="13.0.0",
collection_name="community.general",
)
module.params["force_defaults"] = True
if not HAS_GITHUB_PACKAGE:
module.fail_json(msg=missing_required_lib("PyGithub"), exception=GITHUB_IMP_ERR)

View File

@@ -38,16 +38,11 @@ options:
notes:
- Currently the module is B(only supported for Debian, Ubuntu, and Arch Linux) systems.
- This module requires the package C(locales) installed in Debian and Ubuntu systems.
- If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/)
exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error.
- If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else it raises an error.
Support for C(/var/lib/locales/supported.d/) (the V(ubuntu_legacy) mechanism) has been removed in community.general 13.0.0.
- When using V(glibc) mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen).
- When using V(ubuntu_legacy) mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running
C(locale-gen).
- Please note that the module asserts the availability of the locale by checking the files C(/usr/share/i18n/SUPPORTED) and
C(/usr/local/share/i18n/SUPPORTED), but the C(/usr/local) one is not supported by Archlinux.
- Please note that the code path that uses V(ubuntu_legacy) mechanism has not been tested for a while, because recent versions of
Ubuntu is already using the V(glibc) mechanism. There is no support for V(ubuntu_legacy), given our inability to test it.
Therefore, that mechanism is B(deprecated) and will be removed in community.general 13.0.0.
"""
EXAMPLES = r"""
@@ -70,7 +65,6 @@ mechanism:
type: str
choices:
- glibc
- ubuntu_legacy
returned: success
sample: glibc
version_added: 10.2.0
@@ -114,10 +108,6 @@ class LocaleGen(StateModuleHelper):
def __init_module__(self):
self.mechanisms = dict(
ubuntu_legacy=dict(
available=SUPPORTED_LOCALES,
apply_change=self.apply_change_ubuntu_legacy,
),
glibc=dict(
available=SUPPORTED_LOCALES,
apply_change=self.apply_change_glibc,
@@ -127,18 +117,8 @@ class LocaleGen(StateModuleHelper):
if os.path.exists(ETC_LOCALE_GEN):
self.vars.ubuntu_mode = False
self.vars.mechanism = "glibc"
elif os.path.exists(VAR_LIB_LOCALES):
self.vars.ubuntu_mode = True
self.vars.mechanism = "ubuntu_legacy"
self.module.deprecate(
"On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from"
" in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version,"
" please create an issue in the community.general repository",
version="13.0.0",
collection_name="community.general",
)
else:
self.do_raise(f'{VAR_LIB_LOCALES} and {ETC_LOCALE_GEN} are missing. Is the package "locales" installed?')
self.do_raise(f'{ETC_LOCALE_GEN} is missing. Is the package "locales" installed?')
self.runner = locale_runner(self.module)
@@ -269,34 +249,6 @@ class LocaleGen(StateModuleHelper):
with runner() as ctx:
ctx.run()
def apply_change_ubuntu_legacy(self, target_state, names):
"""Create or remove locale.
Keyword arguments:
target_state -- Desired state, either present or absent.
names -- Name list including encoding such as de_CH.UTF-8.
"""
runner = locale_gen_runner(self.module)
if target_state == "present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
with runner() as ctx:
ctx.run()
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
with open(VAR_LIB_LOCALES_LOCAL) as fr:
content = fr.readlines()
with open(VAR_LIB_LOCALES_LOCAL, "w") as fw:
for line in content:
locale, charset = line.split(" ")
if locale not in names:
fw.write(line)
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
with runner("purge") as ctx:
ctx.run()
@check_mode_skip
def __state_fallback__(self):
if self.vars.state_tracking == self.vars.state:

View File

@@ -1,215 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oci_vcn
short_description: Manage Virtual Cloud Networks(VCN) in OCI
deprecated:
removed_in: 13.0.0
why: Superseded by official Oracle collection.
alternative: Use module C(oci_network_vcn) from the C(oracle.oci) collection.
description:
- This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud
Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases).
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
cidr_block:
description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present).
type: str
compartment_id:
description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This
option is mutually exclusive with O(vcn_id).
type: str
display_name:
description: A user-friendly name. Does not have to be unique, and it is changeable.
type: str
aliases: ['name']
dns_label:
description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully
qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)).
Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric
string that begins with a letter. The value cannot be changed.
type: str
state:
description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN.
type: str
default: present
choices: ['present', 'absent']
vcn_id:
description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present).
This option is mutually exclusive with O(compartment_id).
type: str
aliases: ['id']
author: "Rohit Chaware (@rohitChaware)"
extends_documentation_fragment:
- community.general.oracle
- community.general.oracle_creatable_resource
- community.general.oracle_wait_options
- community.general.oracle_tags
- community.general.attributes
"""
EXAMPLES = r"""
- name: Create a VCN
community.general.oci_vcn:
cidr_block: '10.0.0.0/16'
compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
display_name: my_vcn
dns_label: ansiblevcn
- name: Updates the specified VCN's display name
community.general.oci_vcn:
vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
display_name: ansible_vcn
- name: Delete the specified VCN
community.general.oci_vcn:
vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
state: absent
"""
RETURN = r"""
vcn:
description: Information about the VCN.
returned: On successful create and update operation
type: dict
sample:
{
"cidr_block": "10.0.0.0/16",
"compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
"default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
"default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
"display_name": "ansible_vcn",
"dns_label": "ansiblevcn",
"id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
"lifecycle_state": "AVAILABLE",
"time_created": "2017-11-13T20:22:40.626000+00:00",
"vcn_domain_name": "ansiblevcn.oraclevcn.com"
}
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
try:
from oci.core.models import CreateVcnDetails, UpdateVcnDetails
from oci.core.virtual_network_client import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def delete_vcn(virtual_network_client, module):
result = oci_utils.delete_and_wait(
resource_type="vcn",
client=virtual_network_client,
get_fn=virtual_network_client.get_vcn,
kwargs_get={"vcn_id": module.params["vcn_id"]},
delete_fn=virtual_network_client.delete_vcn,
kwargs_delete={"vcn_id": module.params["vcn_id"]},
module=module,
)
return result
def update_vcn(virtual_network_client, module):
result = oci_utils.check_and_update_resource(
resource_type="vcn",
client=virtual_network_client,
get_fn=virtual_network_client.get_vcn,
kwargs_get={"vcn_id": module.params["vcn_id"]},
update_fn=virtual_network_client.update_vcn,
primitive_params_update=["vcn_id"],
kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
module=module,
update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
)
return result
def create_vcn(virtual_network_client, module):
create_vcn_details = CreateVcnDetails()
for attribute in create_vcn_details.attribute_map.keys():
if attribute in module.params:
setattr(create_vcn_details, attribute, module.params[attribute])
result = oci_utils.create_and_wait(
resource_type="vcn",
create_fn=virtual_network_client.create_vcn,
kwargs_create={"create_vcn_details": create_vcn_details},
client=virtual_network_client,
get_fn=virtual_network_client.get_vcn,
get_param="vcn_id",
module=module,
)
return result
def main():
module_args = oci_utils.get_taggable_arg_spec(supports_create=True, supports_wait=True)
module_args.update(
dict(
cidr_block=dict(type="str"),
compartment_id=dict(type="str"),
display_name=dict(type="str", aliases=["name"]),
dns_label=dict(type="str"),
state=dict(type="str", default="present", choices=["absent", "present"]),
vcn_id=dict(type="str", aliases=["id"]),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
mutually_exclusive=[["compartment_id", "vcn_id"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg=missing_required_lib("oci"))
virtual_network_client = oci_utils.create_service_client(module, VirtualNetworkClient)
exclude_attributes = {"display_name": True, "dns_label": True}
state = module.params["state"]
vcn_id = module.params["vcn_id"]
if state == "absent":
if vcn_id is not None:
result = delete_vcn(virtual_network_client, module)
else:
module.fail_json(msg="Specify vcn_id with state as 'absent' to delete a VCN.")
else:
if vcn_id is not None:
result = update_vcn(virtual_network_client, module)
else:
result = oci_utils.check_and_create_resource(
resource_type="vcn",
create_fn=create_vcn,
kwargs_create={
"virtual_network_client": virtual_network_client,
"module": module,
},
list_fn=virtual_network_client.list_vcns,
kwargs_list={"compartment_id": module.params["compartment_id"]},
module=module,
model=CreateVcnDetails(),
exclude_attributes=exclude_attributes,
)
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@@ -1,503 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_firewall_policy
short_description: Configure 1&1 firewall policy
description:
- Create, remove, reconfigure, update firewall policies. This module has a dependency on 1and1 >= 1.0.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a firewall policy state to create, remove, or update.
type: str
default: 'present'
choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
name:
description:
- Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128.
type: str
firewall_policy:
description:
- The identifier (id or name) of the firewall policy used with update state.
type: str
rules:
description:
- List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three
optional parameters (port_from, port_to, and source).
type: list
elements: dict
default: []
add_server_ips:
description:
- A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state.
type: list
elements: str
default: []
remove_server_ips:
description:
- A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state.
type: list
elements: str
default: []
add_rules:
description:
- List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter.
Used in combination with update state.
type: list
elements: dict
default: []
remove_rules:
description:
- List of rule IDs that are removed from an existing firewall policy. Used in combination with update state.
type: list
elements: str
default: []
description:
description:
- Firewall policy description. maxLength=256.
type: str
wait:
description:
- Wait for the instance to be in state 'running' before returning.
default: true
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- "1and1"
author:
- "Amel Ajdinovic (@aajdinov)"
- "Ethan Devenport (@edevenport)"
"""
EXAMPLES = r"""
- name: Create a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
name: ansible-firewall-policy
description: Testing creation of firewall policies with ansible
rules:
- protocol: TCP
port_from: 80
port_to: 80
source: 0.0.0.0
wait: true
wait_timeout: 500
- name: Destroy a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
state: absent
name: ansible-firewall-policy
- name: Update a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
state: update
firewall_policy: ansible-firewall-policy
name: ansible-firewall-policy-updated
description: Testing creation of firewall policies with ansible - updated
- name: Add server to a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
add_server_ips:
- server_identifier (id or name)
- "server_identifier #2 (id or name)"
wait: true
wait_timeout: 500
state: update
- name: Remove server from a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_server_ips:
- B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
wait: true
wait_timeout: 500
state: update
- name: Add rules to a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
description: Adding rules to an existing firewall policy
add_rules:
- protocol: TCP
port_from: 70
port_to: 70
source: 0.0.0.0
- protocol: TCP
port_from: 60
port_to: 60
source: 0.0.0.0
wait: true
wait_timeout: 500
state: update
- name: Remove rules from a firewall policy
community.general.oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_rules:
- "rule_id #1"
- "rule_id #2"
- '...'
wait: true
wait_timeout: 500
state: update
"""
RETURN = r"""
firewall_policy:
description: Information about the firewall policy that was processed.
type: dict
sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
returned: always
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_firewall_policy,
get_server,
wait_for_resource_creation_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
"""
Assigns servers to a firewall policy.
"""
try:
attach_servers = []
for _server_id in server_ids:
server = get_server(oneandone_conn, _server_id, True)
attach_server = oneandone.client.AttachServer(
server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"]
)
attach_servers.append(attach_server)
if module.check_mode:
return bool(attach_servers)
firewall_policy = oneandone_conn.attach_server_firewall_policy(
firewall_id=firewall_id, server_ips=attach_servers
)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
"""
Unassigns a server/IP from a firewall policy.
"""
try:
if module.check_mode:
firewall_server = oneandone_conn.get_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id)
return bool(firewall_server)
firewall_policy = oneandone_conn.remove_firewall_server(firewall_id=firewall_id, server_ip_id=server_ip_id)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
"""
Adds new rules to a firewall policy.
"""
try:
firewall_rules = []
for rule in rules:
firewall_rule = oneandone.client.FirewallPolicyRule(
protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"]
)
firewall_rules.append(firewall_rule)
if module.check_mode:
firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
return bool(firewall_rules and firewall_policy_id)
firewall_policy = oneandone_conn.add_firewall_policy_rule(
firewall_id=firewall_id, firewall_policy_rules=firewall_rules
)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
"""
Removes a rule from a firewall policy.
"""
try:
if module.check_mode:
rule = oneandone_conn.get_firewall_policy_rule(firewall_id=firewall_id, rule_id=rule_id)
return bool(rule)
firewall_policy = oneandone_conn.remove_firewall_rule(firewall_id=firewall_id, rule_id=rule_id)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def update_firewall_policy(module, oneandone_conn):
"""
Updates a firewall policy based on input arguments.
Firewall rules and server ips can be added/removed to/from
firewall policy. Firewall policy name and description can be
updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
firewall_policy_id = module.params.get("firewall_policy")
name = module.params.get("name")
description = module.params.get("description")
add_server_ips = module.params.get("add_server_ips")
remove_server_ips = module.params.get("remove_server_ips")
add_rules = module.params.get("add_rules")
remove_rules = module.params.get("remove_rules")
changed = False
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
if firewall_policy is None:
_check_mode(module, False)
if name or description:
_check_mode(module, True)
firewall_policy = oneandone_conn.modify_firewall(
firewall_id=firewall_policy["id"], name=name, description=description
)
changed = True
if add_server_ips:
if module.check_mode:
_check_mode(module, _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips))
firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy["id"], add_server_ips)
changed = True
if remove_server_ips:
chk_changed = False
for server_ip_id in remove_server_ips:
if module.check_mode:
chk_changed |= _remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id)
_remove_firewall_server(module, oneandone_conn, firewall_policy["id"], server_ip_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True)
changed = True
if add_rules:
firewall_policy = _add_firewall_rules(module, oneandone_conn, firewall_policy["id"], add_rules)
_check_mode(module, firewall_policy)
changed = True
if remove_rules:
chk_changed = False
for rule_id in remove_rules:
if module.check_mode:
chk_changed |= _remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id)
_remove_firewall_rule(module, oneandone_conn, firewall_policy["id"], rule_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True)
changed = True
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e))
def create_firewall_policy(module, oneandone_conn):
"""
Create a new firewall policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
name = module.params.get("name")
description = module.params.get("description")
rules = module.params.get("rules")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
firewall_rules = []
for rule in rules:
firewall_rule = oneandone.client.FirewallPolicyRule(
protocol=rule["protocol"], port_from=rule["port_from"], port_to=rule["port_to"], source=rule["source"]
)
firewall_rules.append(firewall_rule)
firewall_policy_obj = oneandone.client.FirewallPolicy(name=name, description=description)
_check_mode(module, True)
firewall_policy = oneandone_conn.create_firewall_policy(
firewall_policy=firewall_policy_obj, firewall_policy_rules=firewall_rules
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.firewall_policy, firewall_policy["id"], wait_timeout, wait_interval
)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy["id"], True) # refresh
changed = True if firewall_policy else False
_check_mode(module, False)
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e))
def remove_firewall_policy(module, oneandone_conn):
"""
Removes a firewall policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
fp_id = module.params.get("name")
firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
if module.check_mode:
if firewall_policy_id is None:
_check_mode(module, False)
_check_mode(module, True)
firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
changed = True if firewall_policy else False
return (changed, {"id": firewall_policy["id"], "name": firewall_policy["name"]})
except Exception as e:
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
name=dict(type="str"),
firewall_policy=dict(type="str"),
description=dict(type="str"),
rules=dict(type="list", elements="dict", default=[]),
add_server_ips=dict(type="list", elements="str", default=[]),
remove_server_ips=dict(type="list", elements="str", default=[]),
add_rules=dict(type="list", elements="dict", default=[]),
remove_rules=dict(type="list", elements="str", default=[]),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "update"]),
),
supports_check_mode=True,
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.')
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("name"):
module.fail_json(msg="'name' parameter is required to delete a firewall policy.")
try:
(changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "update":
if not module.params.get("firewall_policy"):
module.fail_json(msg="'firewall_policy' parameter is required to update a firewall policy.")
try:
(changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "present":
for param in ("name", "rules"):
if not module.params.get(param):
module.fail_json(msg=f"{param} parameter is required for new firewall policies.")
try:
(changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, firewall_policy=firewall_policy)
if __name__ == "__main__":
main()

View File

@@ -1,634 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_load_balancer
short_description: Configure 1&1 load balancer
description:
- Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a load balancer state to create, remove, or update.
type: str
default: 'present'
choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
load_balancer:
description:
- The identifier (id or name) of the load balancer used with update state.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
name:
description:
- Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128.
type: str
health_check_test:
description:
- Type of the health check. At the moment, HTTP is not allowed.
type: str
choices: ["NONE", "TCP", "HTTP", "ICMP"]
health_check_interval:
description:
- Health check period in seconds. minimum=5, maximum=300, multipleOf=1.
type: str
health_check_path:
description:
- URL to call for checking. Required for HTTP health check. maxLength=1000.
type: str
health_check_parse:
description:
- Regular expression to check. Required for HTTP health check. maxLength=64.
type: str
persistence:
description:
- Persistence.
type: bool
persistence_time:
description:
- Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1.
type: str
method:
description:
- Balancing procedure.
type: str
choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"]
datacenter:
description:
- ID or country code of the datacenter where the load balancer is created.
- If not specified, it defaults to V(US).
type: str
choices: ["US", "ES", "DE", "GB"]
rules:
description:
- A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server
parameters, in addition to source parameter, which is optional.
type: list
elements: dict
default: []
description:
description:
- Description of the load balancer. maxLength=256.
type: str
add_server_ips:
description:
- A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update).
type: list
elements: str
default: []
remove_server_ips:
description:
- A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update).
type: list
elements: str
default: []
add_rules:
description:
- A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter.
Used in combination with O(state=update).
type: list
elements: dict
default: []
remove_rules:
description:
- A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update).
type: list
elements: str
default: []
wait:
description:
- Wait for the instance to be in state 'running' before returning.
default: true
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
"""
EXAMPLES = r"""
- name: Create a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
name: ansible load balancer
description: Testing creation of load balancer with ansible
health_check_test: TCP
health_check_interval: 40
persistence: true
persistence_time: 1200
method: ROUND_ROBIN
datacenter: US
rules:
- protocol: TCP
port_balancer: 80
port_server: 80
source: 0.0.0.0
wait: true
wait_timeout: 500
- name: Destroy a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
name: ansible load balancer
wait: true
wait_timeout: 500
state: absent
- name: Update a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
load_balancer: ansible load balancer
name: ansible load balancer updated
description: Testing the update of a load balancer with ansible
wait: true
wait_timeout: 500
state: update
- name: Add server to a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
load_balancer: ansible load balancer updated
description: Adding server to a load balancer with ansible
add_server_ips:
- server identifier (id or name)
wait: true
wait_timeout: 500
state: update
- name: Remove server from a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
load_balancer: ansible load balancer updated
description: Removing server from a load balancer with ansible
remove_server_ips:
- B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
wait: true
wait_timeout: 500
state: update
- name: Add rules to a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
load_balancer: ansible load balancer updated
description: Adding rules to a load balancer with ansible
add_rules:
- protocol: TCP
port_balancer: 70
port_server: 70
source: 0.0.0.0
- protocol: TCP
port_balancer: 60
port_server: 60
source: 0.0.0.0
wait: true
wait_timeout: 500
state: update
- name: Remove rules from a load balancer
community.general.oneandone_load_balancer:
auth_token: oneandone_private_api_key
load_balancer: ansible load balancer updated
description: Adding rules to a load balancer with ansible
remove_rules:
- "rule_id #1"
- "rule_id #2"
- '...'
wait: true
wait_timeout: 500
state: update
"""
RETURN = r"""
load_balancer:
description: Information about the load balancer that was processed.
type: dict
sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}
returned: always
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_datacenter,
get_load_balancer,
get_server,
wait_for_resource_creation_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
DATACENTERS = ["US", "ES", "DE", "GB"]
HEALTH_CHECK_TESTS = ["NONE", "TCP", "HTTP", "ICMP"]
METHODS = ["ROUND_ROBIN", "LEAST_CONNECTIONS"]
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
"""
Assigns servers to a load balancer.
"""
try:
attach_servers = []
for server_id in server_ids:
server = get_server(oneandone_conn, server_id, True)
attach_server = oneandone.client.AttachServer(
server_id=server["id"], server_ip_id=next(iter(server["ips"] or []), None)["id"]
)
attach_servers.append(attach_server)
if module.check_mode:
return bool(attach_servers)
load_balancer = oneandone_conn.attach_load_balancer_server(
load_balancer_id=load_balancer_id, server_ips=attach_servers
)
return load_balancer
except Exception as ex:
module.fail_json(msg=str(ex))
def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
"""
Unassigns a server/IP from a load balancer.
"""
try:
if module.check_mode:
lb_server = oneandone_conn.get_load_balancer_server(
load_balancer_id=load_balancer_id, server_ip_id=server_ip_id
)
return bool(lb_server)
load_balancer = oneandone_conn.remove_load_balancer_server(
load_balancer_id=load_balancer_id, server_ip_id=server_ip_id
)
return load_balancer
except Exception as ex:
module.fail_json(msg=str(ex))
def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
"""
Adds new rules to a load_balancer.
"""
try:
load_balancer_rules = []
for rule in rules:
load_balancer_rule = oneandone.client.LoadBalancerRule(
protocol=rule["protocol"],
port_balancer=rule["port_balancer"],
port_server=rule["port_server"],
source=rule["source"],
)
load_balancer_rules.append(load_balancer_rule)
if module.check_mode:
lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
return bool(load_balancer_rules and lb_id)
load_balancer = oneandone_conn.add_load_balancer_rule(
load_balancer_id=load_balancer_id, load_balancer_rules=load_balancer_rules
)
return load_balancer
except Exception as ex:
module.fail_json(msg=str(ex))
def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
"""
Removes a rule from a load_balancer.
"""
try:
if module.check_mode:
rule = oneandone_conn.get_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id)
return bool(rule)
load_balancer = oneandone_conn.remove_load_balancer_rule(load_balancer_id=load_balancer_id, rule_id=rule_id)
return load_balancer
except Exception as ex:
module.fail_json(msg=str(ex))
def update_load_balancer(module, oneandone_conn):
"""
Updates a load_balancer based on input arguments.
Load balancer rules and server ips can be added/removed to/from
load balancer. Load balancer name, description, health_check_test,
health_check_interval, persistence, persistence_time, and method
can be updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
load_balancer_id = module.params.get("load_balancer")
name = module.params.get("name")
description = module.params.get("description")
health_check_test = module.params.get("health_check_test")
health_check_interval = module.params.get("health_check_interval")
health_check_path = module.params.get("health_check_path")
health_check_parse = module.params.get("health_check_parse")
persistence = module.params.get("persistence")
persistence_time = module.params.get("persistence_time")
method = module.params.get("method")
add_server_ips = module.params.get("add_server_ips")
remove_server_ips = module.params.get("remove_server_ips")
add_rules = module.params.get("add_rules")
remove_rules = module.params.get("remove_rules")
changed = False
load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
if load_balancer is None:
_check_mode(module, False)
if (
name
or description
or health_check_test
or health_check_interval
or health_check_path
or health_check_parse
or persistence
or persistence_time
or method
):
_check_mode(module, True)
load_balancer = oneandone_conn.modify_load_balancer(
load_balancer_id=load_balancer["id"],
name=name,
description=description,
health_check_test=health_check_test,
health_check_interval=health_check_interval,
health_check_path=health_check_path,
health_check_parse=health_check_parse,
persistence=persistence,
persistence_time=persistence_time,
method=method,
)
changed = True
if add_server_ips:
if module.check_mode:
_check_mode(module, _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips))
load_balancer = _add_server_ips(module, oneandone_conn, load_balancer["id"], add_server_ips)
changed = True
if remove_server_ips:
chk_changed = False
for server_ip_id in remove_server_ips:
if module.check_mode:
chk_changed |= _remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id)
_remove_load_balancer_server(module, oneandone_conn, load_balancer["id"], server_ip_id)
_check_mode(module, chk_changed)
load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True)
changed = True
if add_rules:
load_balancer = _add_load_balancer_rules(module, oneandone_conn, load_balancer["id"], add_rules)
_check_mode(module, load_balancer)
changed = True
if remove_rules:
chk_changed = False
for rule_id in remove_rules:
if module.check_mode:
chk_changed |= _remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id)
_remove_load_balancer_rule(module, oneandone_conn, load_balancer["id"], rule_id)
_check_mode(module, chk_changed)
load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True)
changed = True
try:
return (changed, load_balancer)
except Exception as ex:
module.fail_json(msg=str(ex))
def create_load_balancer(module, oneandone_conn):
"""
Create a new load_balancer.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
name = module.params.get("name")
description = module.params.get("description")
health_check_test = module.params.get("health_check_test")
health_check_interval = module.params.get("health_check_interval")
health_check_path = module.params.get("health_check_path")
health_check_parse = module.params.get("health_check_parse")
persistence = module.params.get("persistence")
persistence_time = module.params.get("persistence_time")
method = module.params.get("method")
datacenter = module.params.get("datacenter")
rules = module.params.get("rules")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
load_balancer_rules = []
datacenter_id = None
if datacenter is not None:
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
module.fail_json(msg=f"datacenter {datacenter} not found.")
for rule in rules:
load_balancer_rule = oneandone.client.LoadBalancerRule(
protocol=rule["protocol"],
port_balancer=rule["port_balancer"],
port_server=rule["port_server"],
source=rule["source"],
)
load_balancer_rules.append(load_balancer_rule)
_check_mode(module, True)
load_balancer_obj = oneandone.client.LoadBalancer(
health_check_path=health_check_path,
health_check_parse=health_check_parse,
name=name,
description=description,
health_check_test=health_check_test,
health_check_interval=health_check_interval,
persistence=persistence,
persistence_time=persistence_time,
method=method,
datacenter_id=datacenter_id,
)
load_balancer = oneandone_conn.create_load_balancer(
load_balancer=load_balancer_obj, load_balancer_rules=load_balancer_rules
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.load_balancer, load_balancer["id"], wait_timeout, wait_interval
)
load_balancer = get_load_balancer(oneandone_conn, load_balancer["id"], True) # refresh
changed = True if load_balancer else False
_check_mode(module, False)
return (changed, load_balancer)
except Exception as ex:
module.fail_json(msg=str(ex))
def remove_load_balancer(module, oneandone_conn):
"""
Removes a load_balancer.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
lb_id = module.params.get("name")
load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
if module.check_mode:
if load_balancer_id is None:
_check_mode(module, False)
_check_mode(module, True)
load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
changed = True if load_balancer else False
return (changed, {"id": load_balancer["id"], "name": load_balancer["name"]})
except Exception as ex:
module.fail_json(msg=str(ex))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
load_balancer=dict(type="str"),
name=dict(type="str"),
description=dict(type="str"),
health_check_test=dict(choices=HEALTH_CHECK_TESTS),
health_check_interval=dict(type="str"),
health_check_path=dict(type="str"),
health_check_parse=dict(type="str"),
persistence=dict(type="bool"),
persistence_time=dict(type="str"),
method=dict(choices=METHODS),
datacenter=dict(choices=DATACENTERS),
rules=dict(type="list", elements="dict", default=[]),
add_server_ips=dict(type="list", elements="str", default=[]),
remove_server_ips=dict(type="list", elements="str", default=[]),
add_rules=dict(type="list", elements="dict", default=[]),
remove_rules=dict(type="list", elements="str", default=[]),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "update"]),
),
supports_check_mode=True,
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg="auth_token parameter is required.")
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("name"):
module.fail_json(msg="'name' parameter is required for deleting a load balancer.")
try:
(changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == "update":
if not module.params.get("load_balancer"):
module.fail_json(msg="'load_balancer' parameter is required for updating a load balancer.")
try:
(changed, load_balancer) = update_load_balancer(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == "present":
for param in (
"name",
"health_check_test",
"health_check_interval",
"persistence",
"persistence_time",
"method",
"rules",
):
if not module.params.get(param):
module.fail_json(msg=f"{param} parameter is required for new load balancers.")
try:
(changed, load_balancer) = create_load_balancer(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(changed=changed, load_balancer=load_balancer)
if __name__ == "__main__":
main()

View File

@@ -1,948 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_monitoring_policy
short_description: Configure 1&1 monitoring policy
description:
- Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency
on 1and1 >= 1.0.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a monitoring policy's state to create, remove, update.
type: str
default: present
choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
name:
description:
- Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128.
type: str
monitoring_policy:
description:
- The identifier (id or name) of the monitoring policy used with update state.
type: str
agent:
description:
- Set true for using agent.
type: str
email:
description:
- User's email. maxLength=128.
type: str
description:
description:
- Monitoring policy description. maxLength=256.
type: str
thresholds:
description:
- Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions.
Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and
value is used to advise when the value is exceeded.
type: list
elements: dict
default: []
suboptions:
cpu:
description:
- Consumption limits of CPU.
required: true
ram:
description:
- Consumption limits of RAM.
required: true
disk:
description:
- Consumption limits of hard disk.
required: true
internal_ping:
description:
- Response limits of internal ping.
required: true
transfer:
description:
- Consumption limits for transfer.
required: true
ports:
description:
- Array of ports that are to be monitored.
type: list
elements: dict
default: []
suboptions:
protocol:
description:
- Internet protocol.
choices: ["TCP", "UDP"]
required: true
port:
description:
- Port number. minimum=1, maximum=65535.
required: true
alert_if:
description:
- Case of alert.
choices: ["RESPONDING", "NOT_RESPONDING"]
required: true
email_notification:
description:
- Set true for sending e-mail notifications.
required: true
processes:
description:
- Array of processes that are to be monitored.
type: list
elements: dict
default: []
suboptions:
process:
description:
- Name of the process. maxLength=50.
required: true
alert_if:
description:
- Case of alert.
choices: ["RUNNING", "NOT_RUNNING"]
required: true
add_ports:
description:
- Ports to add to the monitoring policy.
type: list
elements: dict
default: []
add_processes:
description:
- Processes to add to the monitoring policy.
type: list
elements: dict
default: []
add_servers:
description:
- Servers to add to the monitoring policy.
type: list
elements: str
default: []
remove_ports:
description:
- Ports to remove from the monitoring policy.
type: list
elements: str
default: []
remove_processes:
description:
- Processes to remove from the monitoring policy.
type: list
elements: str
default: []
remove_servers:
description:
- Servers to remove from the monitoring policy.
type: list
elements: str
default: []
update_ports:
description:
- Ports to be updated on the monitoring policy.
type: list
elements: dict
default: []
update_processes:
description:
- Processes to be updated on the monitoring policy.
type: list
elements: dict
default: []
wait:
description:
- Wait for the instance to be in state 'running' before returning.
default: true
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- "1and1"
author:
- "Amel Ajdinovic (@aajdinov)"
- "Ethan Devenport (@edevenport)"
"""
EXAMPLES = r"""
- name: Create a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
name: ansible monitoring policy
description: Testing creation of a monitoring policy with ansible
email: your@emailaddress.com
agent: true
thresholds:
- cpu:
warning:
value: 80
alert: false
critical:
value: 92
alert: false
- ram:
warning:
value: 80
alert: false
critical:
value: 90
alert: false
- disk:
warning:
value: 80
alert: false
critical:
value: 90
alert: false
- internal_ping:
warning:
value: 50
alert: false
critical:
value: 100
alert: false
- transfer:
warning:
value: 1000
alert: false
critical:
value: 2000
alert: false
ports:
- protocol: TCP
port: 22
alert_if: RESPONDING
email_notification: false
processes:
- process: test
alert_if: NOT_RUNNING
email_notification: false
wait: true
- name: Destroy a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
state: absent
name: ansible monitoring policy
- name: Update a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy
name: ansible monitoring policy updated
description: Testing creation of a monitoring policy with ansible updated
email: another@emailaddress.com
thresholds:
- cpu:
warning:
value: 70
alert: false
critical:
value: 90
alert: false
- ram:
warning:
value: 70
alert: false
critical:
value: 80
alert: false
- disk:
warning:
value: 70
alert: false
critical:
value: 80
alert: false
- internal_ping:
warning:
value: 60
alert: false
critical:
value: 90
alert: false
- transfer:
warning:
value: 900
alert: false
critical:
value: 1900
alert: false
wait: true
state: update
- name: Add a port to a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_ports:
- protocol: TCP
port: 33
alert_if: RESPONDING
email_notification: false
wait: true
state: update
- name: Update existing ports of a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_ports:
- id: existing_port_id
protocol: TCP
port: 34
alert_if: RESPONDING
email_notification: false
- id: existing_port_id
protocol: TCP
port: 23
alert_if: RESPONDING
email_notification: false
wait: true
state: update
- name: Remove a port from a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_ports:
- port_id
state: update
- name: Add a process to a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_processes:
- process: test_2
alert_if: NOT_RUNNING
email_notification: false
wait: true
state: update
- name: Update existing processes of a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
update_processes:
- id: process_id
process: test_1
alert_if: NOT_RUNNING
email_notification: false
- id: process_id
process: test_3
alert_if: NOT_RUNNING
email_notification: false
wait: true
state: update
- name: Remove a process from a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_processes:
- process_id
wait: true
state: update
- name: Add server to a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
add_servers:
- server id or name
wait: true
state: update
- name: Remove server from a monitoring policy
community.general.oneandone_monitoring_policy:
auth_token: oneandone_private_api_key
monitoring_policy: ansible monitoring policy updated
remove_servers:
- server01
wait: true
state: update
"""
RETURN = r"""
monitoring_policy:
description: Information about the monitoring policy that was processed.
type: dict
sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
returned: always
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_monitoring_policy,
get_server,
wait_for_resource_creation_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
"""
Adds new ports to a monitoring policy.
"""
try:
monitoring_policy_ports = []
for _port in ports:
monitoring_policy_port = oneandone.client.Port(
protocol=_port["protocol"],
port=_port["port"],
alert_if=_port["alert_if"],
email_notification=_port["email_notification"],
)
monitoring_policy_ports.append(monitoring_policy_port)
if module.check_mode:
return bool(monitoring_policy_ports)
monitoring_policy = oneandone_conn.add_port(
monitoring_policy_id=monitoring_policy_id, ports=monitoring_policy_ports
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
"""
Removes a port from a monitoring policy.
"""
try:
if module.check_mode:
monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id, port_id=port_id
)
return bool(monitoring_policy)
monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id, port_id=port_id
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
"""
Modifies a monitoring policy port.
"""
try:
if module.check_mode:
cm_port = oneandone_conn.get_monitoring_policy_port(
monitoring_policy_id=monitoring_policy_id, port_id=port_id
)
return bool(cm_port)
monitoring_policy_port = oneandone.client.Port(
protocol=port["protocol"],
port=port["port"],
alert_if=port["alert_if"],
email_notification=port["email_notification"],
)
monitoring_policy = oneandone_conn.modify_port(
monitoring_policy_id=monitoring_policy_id, port_id=port_id, port=monitoring_policy_port
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
"""
Adds new processes to a monitoring policy.
"""
try:
monitoring_policy_processes = []
for _process in processes:
monitoring_policy_process = oneandone.client.Process(
process=_process["process"],
alert_if=_process["alert_if"],
email_notification=_process["email_notification"],
)
monitoring_policy_processes.append(monitoring_policy_process)
if module.check_mode:
mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
return bool(monitoring_policy_processes and mp_id)
monitoring_policy = oneandone_conn.add_process(
monitoring_policy_id=monitoring_policy_id, processes=monitoring_policy_processes
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
"""
Removes a process from a monitoring policy.
"""
try:
if module.check_mode:
process = oneandone_conn.get_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id, process_id=process_id
)
return bool(process)
monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id, process_id=process_id
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
"""
Modifies a monitoring policy process.
"""
try:
if module.check_mode:
cm_process = oneandone_conn.get_monitoring_policy_process(
monitoring_policy_id=monitoring_policy_id, process_id=process_id
)
return bool(cm_process)
monitoring_policy_process = oneandone.client.Process(
process=process["process"], alert_if=process["alert_if"], email_notification=process["email_notification"]
)
monitoring_policy = oneandone_conn.modify_process(
monitoring_policy_id=monitoring_policy_id, process_id=process_id, process=monitoring_policy_process
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
"""
Attaches servers to a monitoring policy.
"""
try:
attach_servers = []
for _server_id in servers:
server_id = get_server(oneandone_conn, _server_id)
attach_server = oneandone.client.AttachServer(server_id=server_id)
attach_servers.append(attach_server)
if module.check_mode:
return bool(attach_servers)
monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id, servers=attach_servers
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
"""
Detaches a server from a monitoring policy.
"""
try:
if module.check_mode:
mp_server = oneandone_conn.get_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id, server_id=server_id
)
return bool(mp_server)
monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id, server_id=server_id
)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex))
def update_monitoring_policy(module, oneandone_conn):
"""
Updates a monitoring_policy based on input arguments.
Monitoring policy ports, processes and servers can be added/removed to/from
a monitoring policy. Monitoring policy name, description, email,
thresholds for cpu, ram, disk, transfer and internal_ping
can be updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
monitoring_policy_id = module.params.get("monitoring_policy")
name = module.params.get("name")
description = module.params.get("description")
email = module.params.get("email")
thresholds = module.params.get("thresholds")
add_ports = module.params.get("add_ports")
update_ports = module.params.get("update_ports")
remove_ports = module.params.get("remove_ports")
add_processes = module.params.get("add_processes")
update_processes = module.params.get("update_processes")
remove_processes = module.params.get("remove_processes")
add_servers = module.params.get("add_servers")
remove_servers = module.params.get("remove_servers")
changed = False
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
if monitoring_policy is None:
_check_mode(module, False)
_monitoring_policy = oneandone.client.MonitoringPolicy(name=name, description=description, email=email)
_thresholds = None
if thresholds:
threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"]
_thresholds = []
for threshold in thresholds:
key = list(threshold.keys())[0]
if key in threshold_entities:
_threshold = oneandone.client.Threshold(
entity=key,
warning_value=threshold[key]["warning"]["value"],
warning_alert=str(threshold[key]["warning"]["alert"]).lower(),
critical_value=threshold[key]["critical"]["value"],
critical_alert=str(threshold[key]["critical"]["alert"]).lower(),
)
_thresholds.append(_threshold)
if name or description or email or thresholds:
_check_mode(module, True)
monitoring_policy = oneandone_conn.modify_monitoring_policy(
monitoring_policy_id=monitoring_policy["id"],
monitoring_policy=_monitoring_policy,
thresholds=_thresholds,
)
changed = True
if add_ports:
if module.check_mode:
_check_mode(module, _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports))
monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy["id"], add_ports)
changed = True
if update_ports:
chk_changed = False
for update_port in update_ports:
if module.check_mode:
chk_changed |= _modify_port(
module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port
)
_modify_port(module, oneandone_conn, monitoring_policy["id"], update_port["id"], update_port)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True)
changed = True
if remove_ports:
chk_changed = False
for port_id in remove_ports:
if module.check_mode:
chk_changed |= _delete_monitoring_policy_port(
module, oneandone_conn, monitoring_policy["id"], port_id
)
_delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy["id"], port_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True)
changed = True
if add_processes:
monitoring_policy = _add_processes(module, oneandone_conn, monitoring_policy["id"], add_processes)
_check_mode(module, monitoring_policy)
changed = True
if update_processes:
chk_changed = False
for update_process in update_processes:
if module.check_mode:
chk_changed |= _modify_process(
module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process
)
_modify_process(module, oneandone_conn, monitoring_policy["id"], update_process["id"], update_process)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True)
changed = True
if remove_processes:
chk_changed = False
for process_id in remove_processes:
if module.check_mode:
chk_changed |= _delete_monitoring_policy_process(
module, oneandone_conn, monitoring_policy["id"], process_id
)
_delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy["id"], process_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True)
changed = True
if add_servers:
monitoring_policy = _attach_monitoring_policy_server(
module, oneandone_conn, monitoring_policy["id"], add_servers
)
_check_mode(module, monitoring_policy)
changed = True
if remove_servers:
chk_changed = False
for _server_id in remove_servers:
server_id = get_server(oneandone_conn, _server_id)
if module.check_mode:
chk_changed |= _detach_monitoring_policy_server(
module, oneandone_conn, monitoring_policy["id"], server_id
)
_detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy["id"], server_id)
_check_mode(module, chk_changed)
monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy["id"], True)
changed = True
return (changed, monitoring_policy)
except Exception as ex:
module.fail_json(msg=str(ex))
def create_monitoring_policy(module, oneandone_conn):
"""
Creates a new monitoring policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
name = module.params.get("name")
description = module.params.get("description")
email = module.params.get("email")
agent = module.params.get("agent")
thresholds = module.params.get("thresholds")
ports = module.params.get("ports")
processes = module.params.get("processes")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
_monitoring_policy = oneandone.client.MonitoringPolicy(
name,
description,
email,
agent,
)
_monitoring_policy.specs["agent"] = str(_monitoring_policy.specs["agent"]).lower()
threshold_entities = ["cpu", "ram", "disk", "internal_ping", "transfer"]
_thresholds = []
for threshold in thresholds:
key = list(threshold.keys())[0]
if key in threshold_entities:
_threshold = oneandone.client.Threshold(
entity=key,
warning_value=threshold[key]["warning"]["value"],
warning_alert=str(threshold[key]["warning"]["alert"]).lower(),
critical_value=threshold[key]["critical"]["value"],
critical_alert=str(threshold[key]["critical"]["alert"]).lower(),
)
_thresholds.append(_threshold)
_ports = []
for port in ports:
_port = oneandone.client.Port(
protocol=port["protocol"],
port=port["port"],
alert_if=port["alert_if"],
email_notification=str(port["email_notification"]).lower(),
)
_ports.append(_port)
_processes = []
for process in processes:
_process = oneandone.client.Process(
process=process["process"],
alert_if=process["alert_if"],
email_notification=str(process["email_notification"]).lower(),
)
_processes.append(_process)
_check_mode(module, True)
monitoring_policy = oneandone_conn.create_monitoring_policy(
monitoring_policy=_monitoring_policy, thresholds=_thresholds, ports=_ports, processes=_processes
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn,
OneAndOneResources.monitoring_policy,
monitoring_policy["id"],
wait_timeout,
wait_interval,
)
changed = True if monitoring_policy else False
_check_mode(module, False)
return (changed, monitoring_policy)
except Exception as ex:
module.fail_json(msg=str(ex))
def remove_monitoring_policy(module, oneandone_conn):
"""
Removes a monitoring policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
mp_id = module.params.get("name")
monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
if module.check_mode:
if monitoring_policy_id is None:
_check_mode(module, False)
_check_mode(module, True)
monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
changed = True if monitoring_policy else False
return (changed, {"id": monitoring_policy["id"], "name": monitoring_policy["name"]})
except Exception as ex:
module.fail_json(msg=str(ex))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
name=dict(type="str"),
monitoring_policy=dict(type="str"),
agent=dict(type="str"),
email=dict(type="str"),
description=dict(type="str"),
thresholds=dict(type="list", elements="dict", default=[]),
ports=dict(type="list", elements="dict", default=[]),
processes=dict(type="list", elements="dict", default=[]),
add_ports=dict(type="list", elements="dict", default=[]),
update_ports=dict(type="list", elements="dict", default=[]),
remove_ports=dict(type="list", elements="str", default=[]),
add_processes=dict(type="list", elements="dict", default=[]),
update_processes=dict(type="list", elements="dict", default=[]),
remove_processes=dict(type="list", elements="str", default=[]),
add_servers=dict(type="list", elements="str", default=[]),
remove_servers=dict(type="list", elements="str", default=[]),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "update"]),
),
supports_check_mode=True,
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg="auth_token parameter is required.")
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("name"):
module.fail_json(msg="'name' parameter is required to delete a monitoring policy.")
try:
(changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == "update":
if not module.params.get("monitoring_policy"):
module.fail_json(msg="'monitoring_policy' parameter is required to update a monitoring policy.")
try:
(changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == "present":
for param in ("name", "agent", "email", "thresholds", "ports", "processes"):
if not module.params.get(param):
module.fail_json(msg=f"{param} parameter is required for a new monitoring policy.")
try:
(changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
if __name__ == "__main__":
main()

View File

@@ -1,418 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_private_network
short_description: Configure 1&1 private networking
description:
- Create, remove, reconfigure, update a private network. This module has a dependency on 1and1 >= 1.0.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a network's state to create, remove, or update.
type: str
default: 'present'
choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
private_network:
description:
- The identifier (id or name) of the network used with update state.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
name:
description:
- Private network name used with present state. Used as identifier (id or name) when used with absent state.
type: str
description:
description:
- Set a description for the network.
type: str
datacenter:
description:
- The identifier of the datacenter where the private network is created.
type: str
choices: [US, ES, DE, GB]
network_address:
description:
- Set a private network space, for example V(192.168.1.0).
type: str
subnet_mask:
description:
- Set the netmask for the private network, for example V(255.255.255.0).
type: str
add_members:
description:
- List of server identifiers (name or id) to be added to the private network.
type: list
elements: str
default: []
remove_members:
description:
- List of server identifiers (name or id) to be removed from the private network.
type: list
elements: str
default: []
wait:
description:
- Wait for the instance to be in state 'running' before returning.
default: true
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
"""
EXAMPLES = r"""
- name: Create a private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
name: backup_network
description: Testing creation of a private network with ansible
network_address: 70.35.193.100
subnet_mask: 255.0.0.0
datacenter: US
- name: Destroy a private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
state: absent
name: backup_network
- name: Modify the private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
state: update
private_network: backup_network
network_address: 192.168.2.0
subnet_mask: 255.255.255.0
- name: Add members to the private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
state: update
private_network: backup_network
add_members:
- server identifier (id or name)
- name: Remove members from the private network
community.general.oneandone_private_network:
auth_token: oneandone_private_api_key
state: update
private_network: backup_network
remove_members:
- server identifier (id or name)
"""
RETURN = r"""
private_network:
description: Information about the private network.
type: dict
sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}
returned: always
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_datacenter,
get_private_network,
get_server,
wait_for_resource_creation_completion,
wait_for_resource_deletion_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
DATACENTERS = ["US", "ES", "DE", "GB"]
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def _add_servers(module, oneandone_conn, name, members):
try:
private_network_id = get_private_network(oneandone_conn, name)
if module.check_mode:
return bool(private_network_id and members)
network = oneandone_conn.attach_private_network_servers(
private_network_id=private_network_id, server_ids=members
)
return network
except Exception as e:
module.fail_json(msg=str(e))
def _remove_member(module, oneandone_conn, name, member_id):
try:
private_network_id = get_private_network(oneandone_conn, name)
if module.check_mode:
if private_network_id:
network_member = oneandone_conn.get_private_network_server(
private_network_id=private_network_id, server_id=member_id
)
if network_member:
return True
return False
network = oneandone_conn.remove_private_network_server(private_network_id=name, server_id=member_id)
return network
except Exception as ex:
module.fail_json(msg=str(ex))
def create_network(module, oneandone_conn):
"""
Create new private network
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any network was added.
"""
name = module.params.get("name")
description = module.params.get("description")
network_address = module.params.get("network_address")
subnet_mask = module.params.get("subnet_mask")
datacenter = module.params.get("datacenter")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
if datacenter is not None:
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
module.fail_json(msg=f"datacenter {datacenter} not found.")
try:
_check_mode(module, True)
network = oneandone_conn.create_private_network(
private_network=oneandone.client.PrivateNetwork(
name=name,
description=description,
network_address=network_address,
subnet_mask=subnet_mask,
datacenter_id=datacenter_id,
)
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.private_network, network["id"], wait_timeout, wait_interval
)
network = get_private_network(oneandone_conn, network["id"], True)
changed = True if network else False
_check_mode(module, False)
return (changed, network)
except Exception as e:
module.fail_json(msg=str(e))
def update_network(module, oneandone_conn):
"""
Modifies a private network.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
_private_network_id = module.params.get("private_network")
_name = module.params.get("name")
_description = module.params.get("description")
_network_address = module.params.get("network_address")
_subnet_mask = module.params.get("subnet_mask")
_add_members = module.params.get("add_members")
_remove_members = module.params.get("remove_members")
changed = False
private_network = get_private_network(oneandone_conn, _private_network_id, True)
if private_network is None:
_check_mode(module, False)
if _name or _description or _network_address or _subnet_mask:
_check_mode(module, True)
private_network = oneandone_conn.modify_private_network(
private_network_id=private_network["id"],
name=_name,
description=_description,
network_address=_network_address,
subnet_mask=_subnet_mask,
)
changed = True
if _add_members:
instances = []
for member in _add_members:
instance_id = get_server(oneandone_conn, member)
instance_obj = oneandone.client.AttachServer(server_id=instance_id)
instances.extend([instance_obj])
private_network = _add_servers(module, oneandone_conn, private_network["id"], instances)
_check_mode(module, private_network)
changed = True
if _remove_members:
chk_changed = False
for member in _remove_members:
instance = get_server(oneandone_conn, member, True)
if module.check_mode:
chk_changed |= _remove_member(module, oneandone_conn, private_network["id"], instance["id"])
_check_mode(module, instance and chk_changed)
_remove_member(module, oneandone_conn, private_network["id"], instance["id"])
private_network = get_private_network(oneandone_conn, private_network["id"], True)
changed = True
return (changed, private_network)
except Exception as ex:
module.fail_json(msg=str(ex))
def remove_network(module, oneandone_conn):
"""
Removes a private network.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object.
"""
try:
pn_id = module.params.get("name")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
private_network_id = get_private_network(oneandone_conn, pn_id)
if module.check_mode:
if private_network_id is None:
_check_mode(module, False)
_check_mode(module, True)
private_network = oneandone_conn.delete_private_network(private_network_id)
wait_for_resource_deletion_completion(
oneandone_conn, OneAndOneResources.private_network, private_network["id"], wait_timeout, wait_interval
)
changed = True if private_network else False
return (changed, {"id": private_network["id"], "name": private_network["name"]})
except Exception as e:
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
private_network=dict(type="str"),
name=dict(type="str"),
description=dict(type="str"),
network_address=dict(type="str"),
subnet_mask=dict(type="str"),
add_members=dict(type="list", elements="str", default=[]),
remove_members=dict(type="list", elements="str", default=[]),
datacenter=dict(choices=DATACENTERS),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "update"]),
),
supports_check_mode=True,
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg="auth_token parameter is required.")
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("name"):
module.fail_json(msg="'name' parameter is required for deleting a network.")
try:
(changed, private_network) = remove_network(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "update":
if not module.params.get("private_network"):
module.fail_json(msg="'private_network' parameter is required for updating a network.")
try:
(changed, private_network) = update_network(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "present":
if not module.params.get("name"):
module.fail_json(msg="'name' parameter is required for new networks.")
try:
(changed, private_network) = create_network(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, private_network=private_network)
if __name__ == "__main__":
main()

View File

@@ -1,306 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_public_ip
short_description: Configure 1&1 public IPs
description:
- Create, update, and remove public IPs. This module has a dependency on 1and1 >= 1.0.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a public IP state to create, remove, or update.
type: str
default: 'present'
choices: ["present", "absent", "update"]
auth_token:
description:
- Authenticating API token provided by 1&1.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
reverse_dns:
description:
- Reverse DNS name. maxLength=256.
type: str
datacenter:
description:
- ID of the datacenter where the IP is created (only for unassigned IPs).
type: str
choices: [US, ES, DE, GB]
default: US
type:
description:
- Type of IP. Currently, only IPV4 is available.
type: str
choices: ["IPV4", "IPV6"]
default: 'IPV4'
public_ip_id:
description:
- The ID of the public IP used with update and delete states.
type: str
wait:
description:
- Wait for the instance to be in state 'running' before returning.
default: true
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods.
type: int
default: 5
requirements:
- "1and1"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
"""
EXAMPLES = r"""
- name: Create a public IP
community.general.oneandone_public_ip:
auth_token: oneandone_private_api_key
reverse_dns: example.com
datacenter: US
type: IPV4
- name: Update a public IP
community.general.oneandone_public_ip:
auth_token: oneandone_private_api_key
public_ip_id: public ip id
reverse_dns: secondexample.com
state: update
- name: Delete a public IP
community.general.oneandone_public_ip:
auth_token: oneandone_private_api_key
public_ip_id: public ip id
state: absent
"""
RETURN = r"""
public_ip:
description: Information about the public IP that was processed.
type: dict
sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}
returned: always
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_datacenter,
get_public_ip,
wait_for_resource_creation_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
DATACENTERS = ["US", "ES", "DE", "GB"]
TYPES = ["IPV4", "IPV6"]
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def create_public_ip(module, oneandone_conn):
"""
Create new public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was added.
"""
reverse_dns = module.params.get("reverse_dns")
datacenter = module.params.get("datacenter")
ip_type = module.params.get("type")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
if datacenter is not None:
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
_check_mode(module, False)
module.fail_json(msg=f"datacenter {datacenter} not found.")
try:
_check_mode(module, True)
public_ip = oneandone_conn.create_public_ip(
reverse_dns=reverse_dns, ip_type=ip_type, datacenter_id=datacenter_id
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval
)
public_ip = oneandone_conn.get_public_ip(public_ip["id"])
changed = True if public_ip else False
return (changed, public_ip)
except Exception as e:
module.fail_json(msg=str(e))
def update_public_ip(module, oneandone_conn):
"""
Update a public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was changed.
"""
reverse_dns = module.params.get("reverse_dns")
public_ip_id = module.params.get("public_ip_id")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
if public_ip is None:
_check_mode(module, False)
module.fail_json(msg=f"public IP {public_ip_id} not found.")
try:
_check_mode(module, True)
public_ip = oneandone_conn.modify_public_ip(ip_id=public_ip["id"], reverse_dns=reverse_dns)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.public_ip, public_ip["id"], wait_timeout, wait_interval
)
public_ip = oneandone_conn.get_public_ip(public_ip["id"])
changed = True if public_ip else False
return (changed, public_ip)
except Exception as e:
module.fail_json(msg=str(e))
def delete_public_ip(module, oneandone_conn):
"""
Delete a public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was deleted.
"""
public_ip_id = module.params.get("public_ip_id")
public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
if public_ip is None:
_check_mode(module, False)
module.fail_json(msg=f"public IP {public_ip_id} not found.")
try:
_check_mode(module, True)
deleted_public_ip = oneandone_conn.delete_public_ip(ip_id=public_ip["id"])
changed = True if deleted_public_ip else False
return (changed, {"id": public_ip["id"]})
except Exception as e:
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", no_log=True, default=os.environ.get("ONEANDONE_AUTH_TOKEN")),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
public_ip_id=dict(type="str"),
reverse_dns=dict(type="str"),
datacenter=dict(choices=DATACENTERS, default="US"),
type=dict(choices=TYPES, default="IPV4"),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "update"]),
),
supports_check_mode=True,
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg="auth_token parameter is required.")
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("public_ip_id"):
module.fail_json(msg="'public_ip_id' parameter is required to delete a public ip.")
try:
(changed, public_ip) = delete_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "update":
if not module.params.get("public_ip_id"):
module.fail_json(msg="'public_ip_id' parameter is required to update a public ip.")
try:
(changed, public_ip) = update_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == "present":
try:
(changed, public_ip) = create_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, public_ip=public_ip)
if __name__ == "__main__":
main()

View File

@@ -1,656 +0,0 @@
#!/usr/bin/python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: oneandone_server
short_description: Create, destroy, start, stop, and reboot a 1&1 Host server
description:
- Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait
for it to be 'running' before returning.
deprecated:
removed_in: 13.0.0
why: DNS fails to resolve the API endpoint used by the module.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
description:
- Define a server's state to create, remove, start or stop it.
type: str
default: present
choices: ["present", "absent", "running", "stopped"]
auth_token:
description:
- Authenticating API token provided by 1&1. Overrides the E(ONEANDONE_AUTH_TOKEN) environment variable.
type: str
api_url:
description:
- Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable.
type: str
datacenter:
description:
- The datacenter location.
type: str
default: US
choices: ["US", "ES", "DE", "GB"]
hostname:
description:
- The hostname or ID of the server. Only used when state is 'present'.
type: str
description:
description:
- The description of the server.
type: str
appliance:
description:
- The operating system name or ID for the server. It is required only for 'present' state.
type: str
fixed_instance_size:
description:
- The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive
with vcore, cores_per_processor, ram, and hdds parameters.
- 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).'
type: str
vcore:
description:
- The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters.
type: int
cores_per_processor:
description:
- The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters.
type: int
ram:
description:
- The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters.
type: float
hdds:
description:
- A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided
with O(vcore), O(cores_per_processor), and O(ram) parameters.
type: list
elements: dict
private_network:
description:
- The private network name or ID.
type: str
firewall_policy:
description:
- The firewall policy name or ID.
type: str
load_balancer:
description:
- The load balancer name or ID.
type: str
monitoring_policy:
description:
- The monitoring policy name or ID.
type: str
server:
description:
- Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
type: str
count:
description:
- The number of servers to create.
type: int
default: 1
ssh_key:
description:
- User's public SSH key (contents, not path).
type: raw
server_type:
description:
- The type of server to be built.
type: str
default: "cloud"
choices: ["cloud", "baremetal", "k8s_node"]
wait:
description:
- Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if
you do not want to wait for each individual server to be deleted before moving on with other tasks).
type: bool
default: true
wait_timeout:
description:
- How long before wait gives up, in seconds.
type: int
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the wait_for methods.
type: int
default: 5
auto_increment:
description:
- When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting
the count where there is a %02d or %03d in the hostname string.
type: bool
default: true
requirements:
- "1and1"
author:
- "Amel Ajdinovic (@aajdinov)"
- "Ethan Devenport (@edevenport)"
"""
EXAMPLES = r"""
- name: Create three servers and enumerate their names
community.general.oneandone_server:
auth_token: oneandone_private_api_key
hostname: node%02d
fixed_instance_size: XL
datacenter: US
appliance: C5A349786169F140BCBC335675014C08
auto_increment: true
count: 3
- name: Create three servers, passing in an ssh_key
community.general.oneandone_server:
auth_token: oneandone_private_api_key
hostname: node%02d
vcore: 2
cores_per_processor: 4
ram: 8.0
hdds:
- size: 50
is_main: false
datacenter: ES
appliance: C5A349786169F140BCBC335675014C08
count: 3
wait: true
wait_timeout: 600
wait_interval: 10
ssh_key: SSH_PUBLIC_KEY
- name: Removing server
community.general.oneandone_server:
auth_token: oneandone_private_api_key
state: absent
server: 'node01'
- name: Starting server
community.general.oneandone_server:
auth_token: oneandone_private_api_key
state: running
server: 'node01'
- name: Stopping server
community.general.oneandone_server:
auth_token: oneandone_private_api_key
state: stopped
server: 'node01'
"""
RETURN = r"""
servers:
description: Information about each server that was processed.
type: list
sample:
- {"hostname": "my-server", "id": "server-id"}
returned: always
"""
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.oneandone import (
OneAndOneResources,
get_appliance,
get_datacenter,
get_firewall_policy,
get_fixed_instance_size,
get_load_balancer,
get_monitoring_policy,
get_private_network,
get_server,
wait_for_resource_creation_completion,
wait_for_resource_deletion_completion,
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
DATACENTERS = ["US", "ES", "DE", "GB"]
ONEANDONE_SERVER_STATES = (
"DEPLOYING",
"POWERED_OFF",
"POWERED_ON",
"POWERING_ON",
"POWERING_OFF",
)
def _check_mode(module, result):
if module.check_mode:
module.exit_json(changed=result)
def _create_server(
module,
oneandone_conn,
hostname,
description,
fixed_instance_size_id,
vcore,
cores_per_processor,
ram,
hdds,
datacenter_id,
appliance_id,
ssh_key,
private_network_id,
firewall_policy_id,
load_balancer_id,
monitoring_policy_id,
server_type,
wait,
wait_timeout,
wait_interval,
):
try:
existing_server = get_server(oneandone_conn, hostname)
if existing_server:
if module.check_mode:
return False
return None
if module.check_mode:
return True
server = oneandone_conn.create_server(
oneandone.client.Server(
name=hostname,
description=description,
fixed_instance_size_id=fixed_instance_size_id,
vcore=vcore,
cores_per_processor=cores_per_processor,
ram=ram,
appliance_id=appliance_id,
datacenter_id=datacenter_id,
rsa_key=ssh_key,
private_network_id=private_network_id,
firewall_policy_id=firewall_policy_id,
load_balancer_id=load_balancer_id,
monitoring_policy_id=monitoring_policy_id,
server_type=server_type,
),
hdds,
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval
)
server = oneandone_conn.get_server(server["id"]) # refresh
return server
except Exception as ex:
module.fail_json(msg=str(ex))
def _insert_network_data(server):
for addr_data in server["ips"]:
if addr_data["type"] == "IPV6":
server["public_ipv6"] = addr_data["ip"]
elif addr_data["type"] == "IPV4":
server["public_ipv4"] = addr_data["ip"]
return server
def create_server(module, oneandone_conn):
"""
Create new server
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any server was added, and a 'servers' attribute with the list of the
created servers' hostname, id and ip addresses.
"""
hostname = module.params.get("hostname")
description = module.params.get("description")
auto_increment = module.params.get("auto_increment")
count = module.params.get("count")
fixed_instance_size = module.params.get("fixed_instance_size")
vcore = module.params.get("vcore")
cores_per_processor = module.params.get("cores_per_processor")
ram = module.params.get("ram")
hdds = module.params.get("hdds")
datacenter = module.params.get("datacenter")
appliance = module.params.get("appliance")
ssh_key = module.params.get("ssh_key")
private_network = module.params.get("private_network")
monitoring_policy = module.params.get("monitoring_policy")
firewall_policy = module.params.get("firewall_policy")
load_balancer = module.params.get("load_balancer")
server_type = module.params.get("server_type")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
_check_mode(module, False)
module.fail_json(msg=f"datacenter {datacenter} not found.")
fixed_instance_size_id = None
if fixed_instance_size:
fixed_instance_size_id = get_fixed_instance_size(oneandone_conn, fixed_instance_size)
if fixed_instance_size_id is None:
_check_mode(module, False)
module.fail_json(msg=f"fixed_instance_size {fixed_instance_size} not found.")
appliance_id = get_appliance(oneandone_conn, appliance)
if appliance_id is None:
_check_mode(module, False)
module.fail_json(msg=f"appliance {appliance} not found.")
private_network_id = None
if private_network:
private_network_id = get_private_network(oneandone_conn, private_network)
if private_network_id is None:
_check_mode(module, False)
module.fail_json(msg=f"private network {private_network} not found.")
monitoring_policy_id = None
if monitoring_policy:
monitoring_policy_id = get_monitoring_policy(oneandone_conn, monitoring_policy)
if monitoring_policy_id is None:
_check_mode(module, False)
module.fail_json(msg=f"monitoring policy {monitoring_policy} not found.")
firewall_policy_id = None
if firewall_policy:
firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_policy)
if firewall_policy_id is None:
_check_mode(module, False)
module.fail_json(msg=f"firewall policy {firewall_policy} not found.")
load_balancer_id = None
if load_balancer:
load_balancer_id = get_load_balancer(oneandone_conn, load_balancer)
if load_balancer_id is None:
_check_mode(module, False)
module.fail_json(msg=f"load balancer {load_balancer} not found.")
if auto_increment:
hostnames = _auto_increment_hostname(count, hostname)
descriptions = _auto_increment_description(count, description)
else:
hostnames = [hostname] * count
descriptions = [description] * count
hdd_objs = []
if hdds:
for hdd in hdds:
hdd_objs.append(oneandone.client.Hdd(size=hdd["size"], is_main=hdd["is_main"]))
servers = []
for index, name in enumerate(hostnames):
server = _create_server(
module=module,
oneandone_conn=oneandone_conn,
hostname=name,
description=descriptions[index],
fixed_instance_size_id=fixed_instance_size_id,
vcore=vcore,
cores_per_processor=cores_per_processor,
ram=ram,
hdds=hdd_objs,
datacenter_id=datacenter_id,
appliance_id=appliance_id,
ssh_key=ssh_key,
private_network_id=private_network_id,
monitoring_policy_id=monitoring_policy_id,
firewall_policy_id=firewall_policy_id,
load_balancer_id=load_balancer_id,
server_type=server_type,
wait=wait,
wait_timeout=wait_timeout,
wait_interval=wait_interval,
)
if server:
servers.append(server)
changed = False
if servers:
for server in servers:
if server:
_check_mode(module, True)
_check_mode(module, False)
servers = [_insert_network_data(_server) for _server in servers]
changed = True
_check_mode(module, False)
return (changed, servers)
def remove_server(module, oneandone_conn):
"""
Removes a server.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object.
Returns a dictionary containing a 'changed' attribute indicating whether
the server was removed, and a 'removed_server' attribute with
the removed server's hostname and id.
"""
server_id = module.params.get("server")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
changed = False
removed_server = None
server = get_server(oneandone_conn, server_id, True)
if server:
_check_mode(module, True)
try:
oneandone_conn.delete_server(server_id=server["id"])
if wait:
wait_for_resource_deletion_completion(
oneandone_conn, OneAndOneResources.server, server["id"], wait_timeout, wait_interval
)
changed = True
except Exception as ex:
module.fail_json(msg=f"failed to terminate the server: {ex}")
removed_server = {"id": server["id"], "hostname": server["name"]}
_check_mode(module, False)
return (changed, removed_server)
def startstop_server(module, oneandone_conn):
"""
Starts or Stops a server.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object.
Returns a dictionary with a 'changed' attribute indicating whether
anything has changed for the server as a result of this function
being run, and a 'server' attribute with basic information for
the server.
"""
state = module.params.get("state")
server_id = module.params.get("server")
wait = module.params.get("wait")
wait_timeout = module.params.get("wait_timeout")
wait_interval = module.params.get("wait_interval")
changed = False
# Resolve server
server = get_server(oneandone_conn, server_id, True)
if server:
# Attempt to change the server state, only if it is not already there
# or on its way.
try:
if state == "stopped" and server["status"]["state"] == "POWERED_ON":
_check_mode(module, True)
oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_OFF", method="SOFTWARE")
elif state == "running" and server["status"]["state"] == "POWERED_OFF":
_check_mode(module, True)
oneandone_conn.modify_server_status(server_id=server["id"], action="POWER_ON", method="SOFTWARE")
except Exception as ex:
module.fail_json(msg=f"failed to set server {server_id} to state {state}: {ex}")
_check_mode(module, False)
# Make sure the server has reached the desired state
if wait:
operation_completed = False
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(wait_interval)
server = oneandone_conn.get_server(server["id"]) # refresh
server_state = server["status"]["state"]
if state == "stopped" and server_state == "POWERED_OFF":
operation_completed = True
break
if state == "running" and server_state == "POWERED_ON":
operation_completed = True
break
if not operation_completed:
module.fail_json(msg=f"Timeout waiting for server {server_id} to get to state {state}")
changed = True
server = _insert_network_data(server)
_check_mode(module, False)
return (changed, server)
def _auto_increment_hostname(count, hostname):
"""
Allow a custom incremental count in the hostname when defined with the
string formatting (%) operator. Otherwise, increment using name-01,
name-02, name-03, and so forth.
"""
if "%" not in hostname:
hostname = f"{hostname}-%01d"
return [hostname % i for i in range(1, count + 1)]
def _auto_increment_description(count, description):
"""
Allow the incremental count in the description when defined with the
string formatting (%) operator. Otherwise, repeat the same description.
"""
if "%" in description:
return [description % i for i in range(1, count + 1)]
else:
return [description] * count
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(type="str", default=os.environ.get("ONEANDONE_AUTH_TOKEN"), no_log=True),
api_url=dict(type="str", default=os.environ.get("ONEANDONE_API_URL")),
hostname=dict(type="str"),
description=dict(type="str"),
appliance=dict(type="str"),
fixed_instance_size=dict(type="str"),
vcore=dict(type="int"),
cores_per_processor=dict(type="int"),
ram=dict(type="float"),
hdds=dict(type="list", elements="dict"),
count=dict(type="int", default=1),
ssh_key=dict(type="raw", no_log=False),
auto_increment=dict(type="bool", default=True),
server=dict(type="str"),
datacenter=dict(choices=DATACENTERS, default="US"),
private_network=dict(type="str"),
firewall_policy=dict(type="str"),
load_balancer=dict(type="str"),
monitoring_policy=dict(type="str"),
server_type=dict(type="str", default="cloud", choices=["cloud", "baremetal", "k8s_node"]),
wait=dict(type="bool", default=True),
wait_timeout=dict(type="int", default=600),
wait_interval=dict(type="int", default=5),
state=dict(type="str", default="present", choices=["present", "absent", "running", "stopped"]),
),
supports_check_mode=True,
mutually_exclusive=(
["fixed_instance_size", "vcore"],
["fixed_instance_size", "cores_per_processor"],
["fixed_instance_size", "ram"],
["fixed_instance_size", "hdds"],
),
required_together=(["vcore", "cores_per_processor", "ram", "hdds"],),
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg="1and1 required for this module")
if not module.params.get("auth_token"):
module.fail_json(msg='The "auth_token" parameter or ONEANDONE_AUTH_TOKEN environment variable is required.')
if not module.params.get("api_url"):
oneandone_conn = oneandone.client.OneAndOneService(api_token=module.params.get("auth_token"))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get("auth_token"), api_url=module.params.get("api_url")
)
state = module.params.get("state")
if state == "absent":
if not module.params.get("server"):
module.fail_json(msg="'server' parameter is required for deleting a server.")
try:
(changed, servers) = remove_server(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state in ("running", "stopped"):
if not module.params.get("server"):
module.fail_json(msg="'server' parameter is required for starting/stopping a server.")
try:
(changed, servers) = startstop_server(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
elif state == "present":
for param in ("hostname", "appliance", "datacenter"):
if not module.params.get(param):
module.fail_json(msg=f"{param} parameter is required for new server.")
try:
(changed, servers) = create_server(module, oneandone_conn)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(changed=changed, servers=servers)
if __name__ == "__main__":
main()

View File

@@ -1,193 +0,0 @@
#!/usr/bin/python
#
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
author: "Willy Barro (@willybarro)"
requirements: [pushbullet.py]
module: pushbullet
short_description: Sends notifications to Pushbullet
description:
- This module sends push notifications through Pushbullet to channels or devices.
deprecated:
removed_in: 13.0.0
why: Module relies on Python package pushbullet.py which is not maintained and supports only up to Python 3.2.
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
api_key:
type: str
description:
- Push bullet API token.
required: true
channel:
type: str
description:
- The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet
page.
device:
type: str
description:
- The device NAME you wish to send a push notification, as seen on the Pushbullet main page.
push_type:
type: str
description:
- Thing you wish to push.
default: note
choices: ["note", "link"]
title:
type: str
description:
- Title of the notification.
required: true
body:
type: str
description:
- Body of the notification, for example details of the fault you are alerting.
url:
type: str
description:
- URL field, used when O(push_type=link).
notes:
- Requires C(pushbullet.py) Python package on the remote host. You can install it through C(pip) with C(pip install pushbullet.py).
- See U(https://github.com/randomchars/pushbullet.py).
"""
EXAMPLES = r"""
- name: Sends a push notification to a device
community.general.pushbullet:
api_key: "ABC123abc123ABC123abc123ABC123ab"
device: "Chrome"
title: "You may see this on Google Chrome"
- name: Sends a link to a device
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
device: Chrome
push_type: link
title: Ansible Documentation
body: https://docs.ansible.com/
- name: Sends a push notification to a channel
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
channel: my-awesome-channel
title: "Broadcasting a message to the #my-awesome-channel folks"
- name: Sends a push notification with title and body to a channel
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
channel: my-awesome-channel
title: ALERT! Signup service is down
body: Error rate on signup service is over 90% for more than 2 minutes
"""
import traceback
PUSHBULLET_IMP_ERR = None
try:
from pushbullet import PushBullet
from pushbullet.errors import InvalidKeyError, PushError
except ImportError:
PUSHBULLET_IMP_ERR = traceback.format_exc()
pushbullet_found = False
else:
pushbullet_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(type="str", required=True, no_log=True),
channel=dict(type="str"),
device=dict(type="str"),
push_type=dict(type="str", default="note", choices=["note", "link"]),
title=dict(type="str", required=True),
body=dict(type="str"),
url=dict(type="str"),
),
mutually_exclusive=(["channel", "device"],),
supports_check_mode=True,
)
api_key = module.params["api_key"]
channel = module.params["channel"]
device = module.params["device"]
push_type = module.params["push_type"]
title = module.params["title"]
body = module.params["body"]
url = module.params["url"]
if not pushbullet_found:
module.fail_json(msg=missing_required_lib("pushbullet.py"), exception=PUSHBULLET_IMP_ERR)
# Init pushbullet
try:
pb = PushBullet(api_key)
target = None
except InvalidKeyError:
module.fail_json(msg="Invalid api_key")
# Checks for channel/device
if device is None and channel is None:
module.fail_json(msg="You need to provide a channel or a device.")
# Search for given device
if device is not None:
devices_by_nickname = {}
for d in pb.devices:
devices_by_nickname[d.nickname] = d
if device in devices_by_nickname:
target = devices_by_nickname[device]
else:
str_devices_by_nickname = "', '".join(devices_by_nickname)
module.fail_json(msg=f"Device '{device}' not found. Available devices: '{str_devices_by_nickname}'")
# Search for given channel
if channel is not None:
channels_by_tag = {}
for c in pb.channels:
channels_by_tag[c.channel_tag] = c
if channel in channels_by_tag:
target = channels_by_tag[channel]
else:
str_channels_by_tag = "', '".join(channels_by_tag)
module.fail_json(msg=f"Channel '{channel}' not found. Available channels: '{str_channels_by_tag}'")
# If in check mode, exit saying that we succeeded
if module.check_mode:
module.exit_json(changed=False, msg="OK")
# Send push notification
try:
if push_type == "link":
target.push_link(title, url, body)
else:
target.push_note(title, body)
module.exit_json(changed=False, msg="OK")
except PushError as e:
module.fail_json(msg=f"An error occurred, Pushbullet's response: {e}")
module.fail_json(msg="An unknown error has occurred")
if __name__ == "__main__":
main()

View File

@@ -101,9 +101,10 @@ options:
description:
- If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its
inception, but is no longer supported by Rocket.Chat 7.4.0.
- The default value of the option, V(true), is B(deprecated) since community.general 11.2.0 and will change to V(false) in community.general 13.0.0.
- The default value changed from V(true) to V(false) in community.general 13.0.0.
- This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version.
type: bool
default: false
version_added: 10.5.0
"""
@@ -229,7 +230,7 @@ def main():
validate_certs=dict(default=True, type="bool"),
color=dict(type="str", default="normal", choices=["normal", "good", "warning", "danger"]),
attachments=dict(type="list", elements="dict"),
is_pre740=dict(type="bool"),
is_pre740=dict(type="bool", default=False),
)
)
@@ -246,15 +247,6 @@ def main():
attachments = module.params["attachments"]
is_pre740 = module.params["is_pre740"]
if is_pre740 is None:
module.deprecate(
"The default value 'true' for 'is_pre740' is deprecated and will change to 'false' in community.general 13.0.0."
" You can explicitly set 'is_pre740' in your task to avoid this deprecation warning",
version="13.0.0",
collection_name="community.general",
)
is_pre740 = True
payload = build_payload_for_rocketchat(
module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740
)

View File

@@ -1,379 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2014, Anders Ingemann <aim@secoya.dk>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: sensu_check
short_description: Manage Sensu checks
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and are not added to the check definition unless specified.
- All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified
for your convenience.
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
type: str
description:
- The name of the check.
- This is the key that is used to determine whether a check exists.
required: true
state:
type: str
description:
- Whether the check should be present or not.
choices: ['present', 'absent']
default: present
path:
type: str
description:
- Path to the JSON file of the check to be added/removed.
- It is created if it does not exist (unless O(state=absent)).
- The parent folders need to exist when O(state=present), otherwise an error is thrown.
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow
clobbered it incorrectly.
type: bool
default: false
command:
type: str
description:
- Path to the sensu check to run (not required when O(state=absent)).
handlers:
type: list
elements: str
description:
- List of handlers to notify when the check fails.
subscribers:
type: list
elements: str
description:
- List of subscribers/channels this check should run for.
- See sensu_subscribers to subscribe a machine to a channel.
interval:
type: int
description:
- Check interval in seconds.
timeout:
type: int
description:
- Timeout for the check.
- If not specified, it defaults to 10.
ttl:
type: int
description:
- Time to live in seconds until the check is considered stale.
handle:
description:
- Whether the check should be handled or not.
- Default is V(false).
type: bool
subdue_begin:
type: str
description:
- When to disable handling of check failures.
subdue_end:
type: str
description:
- When to enable handling of check failures.
dependencies:
type: list
elements: str
description:
- Other checks this one depends on.
- If dependencies fail handling of this check is disabled.
metric:
description:
- Whether the check is a metric.
type: bool
default: false
standalone:
description:
- Whether the check should be scheduled by the sensu client or server.
- This option obviates the need for specifying the O(subscribers) option.
- Default is V(false).
type: bool
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it using the sensu API.
- Default is V(false).
type: bool
occurrences:
type: int
description:
- Number of event occurrences before the handler should take action.
- If not specified, defaults to 1.
refresh:
type: int
description:
- Number of seconds handlers should wait before taking second action.
aggregate:
description:
- Classifies the check as an aggregate check, making it available using the aggregate API.
- Default is V(false).
type: bool
low_flap_threshold:
type: int
description:
- The low threshold for flap detection.
high_flap_threshold:
type: int
description:
- The high threshold for flap detection.
custom:
type: dict
description:
- A hash/dictionary of custom parameters for mixing to the configuration.
- You cannot rewrite other module parameters using this.
source:
type: str
description:
- The check source, used to create a JIT Sensu client for an external resource (for example a network switch).
author: "Anders Ingemann (@andsens)"
"""
EXAMPLES = r"""
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: Get cpu metrics
community.general.sensu_check:
name: cpu_load
command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric: true
handlers: relay
subscribers: common
interval: 60
# Check whether nginx is running
- name: Check nginx process
community.general.sensu_check:
name: nginx_running
command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
handlers: default
subscribers: nginx
interval: 60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: Check disk
community.general.sensu_check:
name: check_disk_capacity
state: absent
"""
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
def sensu_check(module, path, name, state="present", backup=False):
changed = False
reasons = []
stream = None
try:
try:
stream = open(path)
config = json.load(stream)
except OSError as e:
if e.errno == 2: # File not found, non-fatal
if state == "absent":
reasons.append("file did not exist and state is `absent'")
return changed, reasons
config = {}
else:
module.fail_json(msg=f"{e}", exception=traceback.format_exc())
except ValueError:
msg = f"{path} contains invalid JSON"
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if "checks" not in config:
if state == "absent":
reasons.append("`checks' section did not exist and state is `absent'")
return changed, reasons
config["checks"] = {}
changed = True
reasons.append("`checks' section did not exist")
if state == "absent":
if name in config["checks"]:
del config["checks"][name]
changed = True
reasons.append("check was present and state is `absent'")
if state == "present":
if name not in config["checks"]:
check = {}
config["checks"][name] = check
changed = True
reasons.append("check was absent and state is `present'")
else:
check = config["checks"][name]
simple_opts = [
"command",
"handlers",
"subscribers",
"interval",
"timeout",
"ttl",
"handle",
"dependencies",
"standalone",
"publish",
"occurrences",
"refresh",
"aggregate",
"low_flap_threshold",
"high_flap_threshold",
"source",
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append(f"`{opt}' did not exist or was different")
else:
if opt in check:
del check[opt]
changed = True
reasons.append(f"`{opt}' was removed")
if module.params["custom"]:
# Convert to json
custom_params = module.params["custom"]
overwrited_fields = set(custom_params.keys()) & set(
simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"]
)
if overwrited_fields:
msg = f'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {list(overwrited_fields)}'
module.fail_json(msg=msg)
for k, v in custom_params.items():
if k in config["checks"][name]:
if config["checks"][name][k] != v:
changed = True
reasons.append(f"`custom param {k}' was changed")
else:
changed = True
reasons.append(f"`custom param {k}' was added")
check[k] = v
simple_opts += custom_params.keys()
# Remove obsolete custom params
for opt in set(config["checks"][name].keys()) - set(
simple_opts + ["type", "subdue", "subdue_begin", "subdue_end"]
):
changed = True
reasons.append(f"`custom param {opt}' was deleted")
del check[opt]
if module.params["metric"]:
if "type" not in check or check["type"] != "metric":
check["type"] = "metric"
changed = True
reasons.append("`type' was not defined or not `metric'")
if not module.params["metric"] and "type" in check:
del check["type"]
changed = True
reasons.append("`type' was defined")
if module.params["subdue_begin"] is not None and module.params["subdue_end"] is not None:
subdue = {
"begin": module.params["subdue_begin"],
"end": module.params["subdue_end"],
}
if "subdue" not in check or check["subdue"] != subdue:
check["subdue"] = subdue
changed = True
reasons.append("`subdue' did not exist or was different")
else:
if "subdue" in check:
del check["subdue"]
changed = True
reasons.append("`subdue' was removed")
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, "w")
stream.write(json.dumps(config, indent=2) + "\n")
except OSError as e:
module.fail_json(msg=f"{e}", exception=traceback.format_exc())
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {
"name": {"type": "str", "required": True},
"path": {"type": "str", "default": "/etc/sensu/conf.d/checks.json"},
"state": {"type": "str", "default": "present", "choices": ["present", "absent"]},
"backup": {"type": "bool", "default": False},
"command": {"type": "str"},
"handlers": {"type": "list", "elements": "str"},
"subscribers": {"type": "list", "elements": "str"},
"interval": {"type": "int"},
"timeout": {"type": "int"},
"ttl": {"type": "int"},
"handle": {"type": "bool"},
"subdue_begin": {"type": "str"},
"subdue_end": {"type": "str"},
"dependencies": {"type": "list", "elements": "str"},
"metric": {"type": "bool", "default": False},
"standalone": {"type": "bool"},
"publish": {"type": "bool"},
"occurrences": {"type": "int"},
"refresh": {"type": "int"},
"aggregate": {"type": "bool"},
"low_flap_threshold": {"type": "int"},
"high_flap_threshold": {"type": "int"},
"custom": {"type": "dict"},
"source": {"type": "str"},
}
required_together = [["subdue_begin", "subdue_end"]]
module = AnsibleModule(argument_spec=arg_spec, required_together=required_together, supports_check_mode=True)
if module.params["state"] != "absent" and module.params["command"] is None:
module.fail_json(msg="missing required arguments: command")
path = module.params["path"]
name = module.params["name"]
state = module.params["state"]
backup = module.params["backup"]
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg="OK", name=name, reasons=reasons)
if __name__ == "__main__":
main()

View File

@@ -1,285 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017, Red Hat Inc.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: sensu_client
author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu client configuration
description:
- Manages Sensu client configuration.
- For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html).
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
type: str
description:
- Whether the client should be present or not.
choices: ['present', 'absent']
default: present
name:
type: str
description:
- A unique name for the client. The name cannot contain special characters or spaces.
- If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
address:
type: str
description:
- An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
- If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided
by Sensu).
subscriptions:
type: list
elements: str
description:
- An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (for example V(webserver)).
- These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
- The subscriptions array items must be strings.
safe_mode:
description:
- If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request
and execute the check.
type: bool
default: false
redact:
type: list
elements: str
description:
- Client definition attributes to redact (values) when logging and sending client keepalives.
socket:
type: dict
description:
- The socket definition scope, used to configure the Sensu client socket.
keepalives:
description:
- If Sensu should monitor keepalives for this client.
type: bool
default: true
keepalive:
type: dict
description:
- The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds
and so).
registration:
type: dict
description:
- The registration definition scope, used to configure Sensu registration event handlers.
deregister:
description:
- If a deregistration event should be created upon Sensu client process stop.
- Default is V(false).
type: bool
deregistration:
type: dict
description:
- The deregistration definition scope, used to configure automated Sensu client de-registration.
ec2:
type: dict
description:
- The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
chef:
type: dict
description:
- The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
puppet:
type: dict
description:
- The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
servicenow:
type: dict
description:
- The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users
only).
"""
EXAMPLES = r"""
# Minimum possible configuration
- name: Configure Sensu client
community.general.sensu_client:
subscriptions:
- default
# With customization
- name: Configure Sensu client
community.general.sensu_client:
name: "{{ ansible_fqdn }}"
address: "{{ ansible_default_ipv4['address'] }}"
subscriptions:
- default
- webserver
redact:
- password
socket:
bind: 127.0.0.1
port: 3030
keepalive:
thresholds:
warning: 180
critical: 300
handlers:
- email
custom:
- broadcast: irc
occurrences: 3
register: client
notify:
- Restart sensu-client
- name: Secure Sensu client configuration file
ansible.builtin.file:
path: "{{ client['file'] }}"
owner: "sensu"
group: "sensu"
mode: "0600"
- name: Delete the Sensu client configuration
community.general.sensu_client:
state: "absent"
"""
RETURN = r"""
config:
description: Effective client configuration, when state is present.
returned: success
type: dict
sample:
{
"name": "client",
"subscriptions": [
"default"
]
}
file:
description: Path to the client configuration file.
returned: success
type: str
sample: "/etc/sensu/conf.d/client.json"
"""
import json
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
state=dict(type="str", choices=["present", "absent"], default="present"),
name=dict(
type="str",
),
address=dict(
type="str",
),
subscriptions=dict(type="list", elements="str"),
safe_mode=dict(type="bool", default=False),
redact=dict(type="list", elements="str"),
socket=dict(type="dict"),
keepalives=dict(type="bool", default=True),
keepalive=dict(type="dict"),
registration=dict(type="dict"),
deregister=dict(type="bool"),
deregistration=dict(type="dict"),
ec2=dict(type="dict"),
chef=dict(type="dict"),
puppet=dict(type="dict"),
servicenow=dict(type="dict"),
),
required_if=[["state", "present", ["subscriptions"]]],
)
state = module.params["state"]
path = "/etc/sensu/conf.d/client.json"
if state == "absent":
if os.path.exists(path):
if module.check_mode:
msg = f"{path} would have been deleted"
module.exit_json(msg=msg, changed=True)
else:
try:
os.remove(path)
msg = f"{path} deleted successfully"
module.exit_json(msg=msg, changed=True)
except OSError as e:
msg = "Exception when trying to delete {path}: {exception}"
module.fail_json(msg=msg.format(path=path, exception=str(e)))
else:
# Idempotency: it is okay if the file doesn't exist
msg = f"{path} already does not exist"
module.exit_json(msg=msg)
# Build client configuration from module arguments
config = {"client": {}}
args = [
"name",
"address",
"subscriptions",
"safe_mode",
"redact",
"socket",
"keepalives",
"keepalive",
"registration",
"deregister",
"deregistration",
"ec2",
"chef",
"puppet",
"servicenow",
]
for arg in args:
if arg in module.params and module.params[arg] is not None:
config["client"][arg] = module.params[arg]
# Load the current config, if there is one, so we can compare
current_config = None
try:
current_config = json.load(open(path))
except (OSError, ValueError):
# File either doesn't exist or it is invalid JSON
pass
if current_config is not None and current_config == config:
# Config is the same, let's not change anything
module.exit_json(msg="Client configuration is already up to date", config=config["client"], file=path)
# Validate that directory exists before trying to write to it
if not module.check_mode and not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}")
if module.check_mode:
module.exit_json(
msg="Client configuration would have been updated", changed=True, config=config["client"], file=path
)
try:
with open(path, "w") as client:
client.write(json.dumps(config, indent=4))
module.exit_json(msg="Client configuration updated", changed=True, config=config["client"], file=path)
except OSError as e:
module.fail_json(msg=f"Unable to write file {path}: {e}")
if __name__ == "__main__":
main()

View File

@@ -1,293 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017, Red Hat Inc.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: sensu_handler
author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu handler configuration
description:
- Manages Sensu handler configuration.
- For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html).
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
state:
type: str
description:
- Whether the handler should be present or not.
choices: ['present', 'absent']
default: present
name:
type: str
description:
- A unique name for the handler. The name cannot contain special characters or spaces.
required: true
type:
type: str
description:
- The handler type.
choices: ['pipe', 'tcp', 'udp', 'transport', 'set']
filter:
type: str
description:
- The Sensu event filter (name) to use when filtering events for the handler.
filters:
type: list
elements: str
description:
- An array of Sensu event filters (names) to use when filtering events for the handler.
- Each array item must be a string.
severities:
type: list
elements: str
description:
- An array of check result severities the handler handles.
- 'NOTE: event resolution bypasses this filtering.'
- "Example: [ 'warning', 'critical', 'unknown' ]."
mutator:
type: str
description:
- The Sensu event mutator (name) to use to mutate event data for the handler.
timeout:
type: int
description:
- The handler execution duration timeout in seconds (hard stop).
- Only used by pipe and tcp handler types.
default: 10
handle_silenced:
description:
- If events matching one or more silence entries should be handled.
type: bool
default: false
handle_flapping:
description:
- If events in the flapping state should be handled.
type: bool
default: false
command:
type: str
description:
- The handler command to be executed.
- The event data is passed to the process using STDIN.
- 'NOTE: the O(command) attribute is only required for Pipe handlers (that is, handlers configured with O(type=pipe)).'
socket:
type: dict
description:
- The socket definition scope, used to configure the TCP/UDP handler socket.
- 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp)
or O(type=udp)).'
pipe:
type: dict
description:
- The pipe definition scope, used to configure the Sensu transport pipe.
- 'NOTE: the O(pipe) attribute is only required for Transport handlers (that is, handlers configured with O(type=transport)).'
handlers:
type: list
elements: str
description:
- An array of Sensu event handlers (names) to use for events using the handler set.
- 'NOTE: the O(handlers) attribute is only required for handler sets (that is, handlers configured with O(type=set)).'
"""
EXAMPLES = r"""
# Configure a handler that sends event data as STDIN (pipe)
- name: Configure IRC Sensu handler
community.general.sensu_handler:
name: "irc_handler"
type: "pipe"
command: "/usr/local/bin/notify-irc.sh"
severities:
- "ok"
- "critical"
- "warning"
- "unknown"
timeout: 15
notify:
- Restart sensu-client
- Restart sensu-server
# Delete a handler
- name: Delete IRC Sensu handler
community.general.sensu_handler:
name: "irc_handler"
state: "absent"
# Example of a TCP handler
- name: Configure TCP Sensu handler
community.general.sensu_handler:
name: "tcp_handler"
type: "tcp"
timeout: 30
socket:
host: "10.0.1.99"
port: 4444
register: handler
notify:
- Restart sensu-client
- Restart sensu-server
- name: Secure Sensu handler configuration file
ansible.builtin.file:
path: "{{ handler['file'] }}"
owner: "sensu"
group: "sensu"
mode: "0600"
"""
RETURN = r"""
config:
description: Effective handler configuration, when state is present.
returned: success
type: dict
sample:
{
"name": "irc",
"type": "pipe",
"command": "/usr/local/bin/notify-irc.sh"
}
file:
description: Path to the handler configuration file.
returned: success
type: str
sample: "/etc/sensu/conf.d/handlers/irc.json"
name:
description: Name of the handler.
returned: success
type: str
sample: "irc"
"""
import json
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
state=dict(type="str", choices=["present", "absent"], default="present"),
name=dict(type="str", required=True),
type=dict(type="str", choices=["pipe", "tcp", "udp", "transport", "set"]),
filter=dict(type="str"),
filters=dict(type="list", elements="str"),
severities=dict(type="list", elements="str"),
mutator=dict(type="str"),
timeout=dict(type="int", default=10),
handle_silenced=dict(type="bool", default=False),
handle_flapping=dict(type="bool", default=False),
command=dict(type="str"),
socket=dict(type="dict"),
pipe=dict(type="dict"),
handlers=dict(type="list", elements="str"),
),
required_if=[
["state", "present", ["type"]],
["type", "pipe", ["command"]],
["type", "tcp", ["socket"]],
["type", "udp", ["socket"]],
["type", "transport", ["pipe"]],
["type", "set", ["handlers"]],
],
)
state = module.params["state"]
name = module.params["name"]
path = f"/etc/sensu/conf.d/handlers/{name}.json"
if state == "absent":
if os.path.exists(path):
if module.check_mode:
msg = f"{path} would have been deleted"
module.exit_json(msg=msg, changed=True)
else:
try:
os.remove(path)
msg = f"{path} deleted successfully"
module.exit_json(msg=msg, changed=True)
except OSError as e:
msg = "Exception when trying to delete {path}: {exception}"
module.fail_json(msg=msg.format(path=path, exception=str(e)))
else:
# Idempotency: it is okay if the file doesn't exist
msg = f"{path} already does not exist"
module.exit_json(msg=msg)
# Build handler configuration from module arguments
config = {"handlers": {name: {}}}
args = [
"type",
"filter",
"filters",
"severities",
"mutator",
"timeout",
"handle_silenced",
"handle_flapping",
"command",
"socket",
"pipe",
"handlers",
]
for arg in args:
if arg in module.params and module.params[arg] is not None:
config["handlers"][name][arg] = module.params[arg]
# Load the current config, if there is one, so we can compare
current_config = None
try:
current_config = json.load(open(path))
except (OSError, ValueError):
# File either doesn't exist or it is invalid JSON
pass
if current_config is not None and current_config == config:
# Config is the same, let's not change anything
module.exit_json(
msg="Handler configuration is already up to date", config=config["handlers"][name], file=path, name=name
)
# Validate that directory exists before trying to write to it
if not module.check_mode and not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
module.fail_json(msg=f"Unable to create {os.path.dirname(path)}: {e}")
if module.check_mode:
module.exit_json(
msg="Handler configuration would have been updated",
changed=True,
config=config["handlers"][name],
file=path,
name=name,
)
try:
with open(path, "w") as handler:
handler.write(json.dumps(config, indent=4))
module.exit_json(
msg="Handler configuration updated", changed=True, config=config["handlers"][name], file=path, name=name
)
except OSError as e:
module.fail_json(msg=f"Unable to write file {path}: {e}")
if __name__ == "__main__":
main()

View File

@@ -1,270 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017, Steven Bambling <smbambling@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: sensu_silence
author: Steven Bambling (@smbambling)
short_description: Manage Sensu silence entries
description:
- Create and clear (delete) a silence entries using the Sensu API for subscriptions and checks.
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
check:
type: str
description:
- Specifies the check which the silence entry applies to.
creator:
type: str
description:
- Specifies the entity responsible for this entry.
expire:
type: int
description:
- If specified, the silence entry is automatically cleared after this number of seconds.
expire_on_resolve:
description:
- If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved.
type: bool
reason:
type: str
description:
- If specified, this free-form string is used to provide context or rationale for the reason this silence entry was
created.
state:
type: str
description:
- Specifies to create or clear (delete) a silence entry using the Sensu API.
default: present
choices: ['present', 'absent']
subscription:
type: str
description:
- Specifies the subscription which the silence entry applies to.
- To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev).
required: true
url:
type: str
description:
- Specifies the URL of the Sensu monitoring host server.
default: http://127.0.01:4567
"""
EXAMPLES = r"""
# Silence ALL checks for a given client
- name: Silence server1.example.dev
community.general.sensu_silence:
subscription: client:server1.example.dev
creator: "{{ ansible_user_id }}"
reason: Performing maintenance
# Silence specific check for a client
- name: Silence CPU_Usage check for server1.example.dev
community.general.sensu_silence:
subscription: client:server1.example.dev
check: CPU_Usage
creator: "{{ ansible_user_id }}"
reason: Investigation alert issue
# Silence multiple clients from a dict
silence:
server1.example.dev:
reason: 'Deployment in progress'
server2.example.dev:
reason: 'Deployment in progress'
- name: Silence several clients from a dict
community.general.sensu_silence:
subscription: "client:{{ item.key }}"
reason: "{{ item.value.reason }}"
creator: "{{ ansible_user_id }}"
with_dict: "{{ silence }}"
"""
RETURN = r"""
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def query(module, url, check, subscription):
headers = {
"Content-Type": "application/json",
}
url = url + "/silenced"
request_data = {
"check": check,
"subscription": subscription,
}
# Remove keys with None value
for k, v in dict(request_data).items():
if v is None:
del request_data[k]
response, info = fetch_url(module, url, method="GET", headers=headers, data=json.dumps(request_data))
if info["status"] == 500:
module.fail_json(msg=f"Failed to query silence {subscription}. Reason: {info}")
try:
json_out = json.loads(response.read())
except Exception:
json_out = ""
return False, json_out, False
def clear(module, url, check, subscription):
# Test if silence exists before clearing
(rc, out, changed) = query(module, url, check, subscription)
d = {i["subscription"]: i["check"] for i in out}
subscription_exists = subscription in d
if check and subscription_exists:
exists = check == d[subscription]
else:
exists = subscription_exists
# If check/subscription doesn't exist
# exit with changed state of False
if not exists:
return False, out, changed
# module.check_mode is inherited from the AnsibleMOdule class
if not module.check_mode:
headers = {
"Content-Type": "application/json",
}
url = url + "/silenced/clear"
request_data = {
"check": check,
"subscription": subscription,
}
# Remove keys with None value
for k, v in dict(request_data).items():
if v is None:
del request_data[k]
response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data))
if info["status"] != 204:
module.fail_json(msg=f"Failed to silence {subscription}. Reason: {info}")
try:
json_out = json.loads(response.read())
except Exception:
json_out = ""
return False, json_out, True
return False, out, True
def create(module, url, check, creator, expire, expire_on_resolve, reason, subscription):
(rc, out, changed) = query(module, url, check, subscription)
for i in out:
if i["subscription"] == subscription:
if (
(check is None or check == i["check"])
and (creator == "" or creator == i["creator"])
and (reason == "" or reason == i["reason"])
and (expire is None or expire == i["expire"])
and (expire_on_resolve is None or expire_on_resolve == i["expire_on_resolve"])
):
return False, out, False
# module.check_mode is inherited from the AnsibleMOdule class
if not module.check_mode:
headers = {
"Content-Type": "application/json",
}
url = url + "/silenced"
request_data = {
"check": check,
"creator": creator,
"expire": expire,
"expire_on_resolve": expire_on_resolve,
"reason": reason,
"subscription": subscription,
}
# Remove keys with None value
for k, v in dict(request_data).items():
if v is None:
del request_data[k]
response, info = fetch_url(module, url, method="POST", headers=headers, data=json.dumps(request_data))
if info["status"] != 201:
module.fail_json(msg=f"Failed to silence {subscription}. Reason: {info['msg']}")
try:
json_out = json.loads(response.read())
except Exception:
json_out = ""
return False, json_out, True
return False, out, True
def main():
module = AnsibleModule(
argument_spec=dict(
check=dict(),
creator=dict(),
expire=dict(type="int"),
expire_on_resolve=dict(type="bool"),
reason=dict(),
state=dict(default="present", choices=["present", "absent"]),
subscription=dict(required=True),
url=dict(default="http://127.0.01:4567"),
),
supports_check_mode=True,
)
url = module.params["url"]
check = module.params["check"]
creator = module.params["creator"]
expire = module.params["expire"]
expire_on_resolve = module.params["expire_on_resolve"]
reason = module.params["reason"]
subscription = module.params["subscription"]
state = module.params["state"]
if state == "present":
(rc, out, changed) = create(module, url, check, creator, expire, expire_on_resolve, reason, subscription)
if state == "absent":
(rc, out, changed) = clear(module, url, check, subscription)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out, changed=changed)
if __name__ == "__main__":
main()

View File

@@ -1,155 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2014, Anders Ingemann <aim@secoya.dk>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: sensu_subscription
short_description: Manage Sensu subscriptions
description:
- Manage which I(sensu channels) a machine should subscribe to.
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go).
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
type: str
description:
- The name of the channel.
required: true
state:
type: str
description:
- Whether the machine should subscribe or unsubscribe from the channel.
choices: ['present', 'absent']
default: present
path:
type: str
description:
- Path to the subscriptions JSON file.
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow
clobbered it incorrectly.
type: bool
default: false
requirements: []
author: Anders Ingemann (@andsens)
"""
RETURN = r"""
reasons:
description: The reasons why the module changed or did not change something.
returned: success
type: list
sample: ["channel subscription was absent and state is 'present'"]
"""
EXAMPLES = r"""
# Subscribe to the nginx channel
- name: Subscribe to nginx checks
community.general.sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: Unsubscribe from common checks
community.general.sensu_subscription: name=common state=absent
"""
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
def sensu_subscription(module, path, name, state="present", backup=False):
changed = False
reasons = []
try:
config = json.load(open(path))
except OSError as e:
if e.errno == 2: # File not found, non-fatal
if state == "absent":
reasons.append("file did not exist and state is 'absent'")
return changed, reasons
config = {}
else:
module.fail_json(msg=f"{e}", exception=traceback.format_exc())
except ValueError:
msg = f"{path} contains invalid JSON"
module.fail_json(msg=msg)
if "client" not in config:
if state == "absent":
reasons.append("'client' did not exist and state is 'absent'")
return changed, reasons
config["client"] = {}
changed = True
reasons.append("'client' did not exist")
if "subscriptions" not in config["client"]:
if state == "absent":
reasons.append("'client.subscriptions' did not exist and state is 'absent'")
return changed, reasons
config["client"]["subscriptions"] = []
changed = True
reasons.append("'client.subscriptions' did not exist")
if name not in config["client"]["subscriptions"]:
if state == "absent":
reasons.append("channel subscription was absent")
return changed, reasons
config["client"]["subscriptions"].append(name)
changed = True
reasons.append("channel subscription was absent and state is 'present'")
else:
if state == "absent":
config["client"]["subscriptions"].remove(name)
changed = True
reasons.append("channel subscription was present and state is 'absent'")
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, "w").write(json.dumps(config, indent=2) + "\n")
except OSError as e:
module.fail_json(msg=f"Failed to write to file {path}: {e}", exception=traceback.format_exc())
return changed, reasons
def main():
arg_spec = {
"name": {"type": "str", "required": True},
"path": {"type": "str", "default": "/etc/sensu/conf.d/subscriptions.json"},
"state": {"type": "str", "default": "present", "choices": ["present", "absent"]},
"backup": {"type": "bool", "default": False},
}
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
path = module.params["path"]
name = module.params["name"]
state = module.params["state"]
backup = module.params["backup"]
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg="OK", reasons=reasons)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -1,138 +0,0 @@
#!/usr/bin/python
#
# Copyright Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
module: typetalk
short_description: Send a message to typetalk
description:
- Send a message to typetalk using typetalk API.
deprecated:
removed_in: 13.0.0
why: The typetalk service will be discontinued on Dec 2025. See U(https://nulab.com/blog/company-news/typetalk-sunsetting/).
alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
client_id:
type: str
description:
- OAuth2 client ID.
required: true
client_secret:
type: str
description:
- OAuth2 client secret.
required: true
topic:
type: int
description:
- Topic ID to post message.
required: true
msg:
type: str
description:
- Message body.
required: true
requirements: [json]
author: "Takashi Someda (@tksmd)"
"""
EXAMPLES = r"""
- name: Send a message to typetalk
community.general.typetalk:
client_id: 12345
client_secret: 12345
topic: 1
msg: install completed
"""
import json
from urllib.parse import urlencode
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import ConnectionError, fetch_url
def do_request(module, url, params, headers=None):
data = urlencode(params)
if headers is None:
headers = dict()
headers = dict(
headers,
**{
"User-Agent": "Ansible/typetalk module",
},
)
r, info = fetch_url(module, url, data=data, headers=headers)
if info["status"] != 200:
exc = ConnectionError(info["msg"])
exc.code = info["status"]
raise exc
return r
def get_access_token(module, client_id, client_secret):
params = {
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "client_credentials",
"scope": "topic.post",
}
res = do_request(module, "https://typetalk.com/oauth2/access_token", params)
return json.load(res)["access_token"]
def send_message(module, client_id, client_secret, topic, msg):
"""
send message to typetalk
"""
try:
access_token = get_access_token(module, client_id, client_secret)
url = f"https://typetalk.com/api/v1/topics/{topic}"
headers = {
"Authorization": f"Bearer {access_token}",
}
do_request(module, url, {"message": msg}, headers)
return True, {"access_token": access_token}
except ConnectionError as e:
return False, e
def main():
module = AnsibleModule(
argument_spec=dict(
client_id=dict(required=True),
client_secret=dict(required=True, no_log=True),
topic=dict(required=True, type="int"),
msg=dict(required=True),
),
supports_check_mode=False,
)
if not json:
module.fail_json(msg="json module is required")
client_id = module.params["client_id"]
client_secret = module.params["client_secret"]
topic = module.params["topic"]
msg = module.params["msg"]
res, error = send_message(module, client_id, client_secret, topic, msg)
if not res:
module.fail_json(msg=f"fail to send message with response code {error.code}")
module.exit_json(changed=True, topic=topic, msg=msg)
if __name__ == "__main__":
main()

View File

@@ -1,6 +0,0 @@
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
azp/posix/1
needs/root

View File

@@ -1,179 +0,0 @@
---
####################################################################
# WARNING: These are designed specifically for Ansible tests #
# and should not be used as examples of how to write Ansible roles #
####################################################################
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Creating a client if the directory doesn't exist should work
sensu_client:
subscriptions:
- default
- name: Set variable for client file
set_fact:
client_file: "/etc/sensu/conf.d/client.json"
- name: Insert invalid JSON in the client file
lineinfile:
state: "present"
create: "yes"
path: "{{ client_file }}"
line: "{'foo' = bar}"
- name: Configure Sensu client with an existing invalid file
sensu_client:
name: "client"
state: "present"
subscriptions:
- default
register: client
- name: Retrieve configuration file
slurp:
src: "{{ client_file }}"
register: client_config
- name: Assert that client data was set successfully and properly
assert:
that:
- "client is successful"
- "client is changed"
- "client['config']['name'] == 'client'"
- "'default' in client['config']['subscriptions']"
- "client['file'] == client_file"
- name: Assert that the client configuration file is actually configured properly
vars:
config: "{{ client_config.content | b64decode | from_json }}"
assert:
that:
- "config['client']['keepalives'] == true"
- "config['client']['name'] == 'client'"
- "config['client']['safe_mode'] == false"
- "'default' in config['client']['subscriptions']"
- name: Delete Sensu client configuration
sensu_client:
state: "absent"
register: client_delete
- name: Delete Sensu client configuration (again)
sensu_client:
state: "absent"
register: client_delete_twice
- name: Retrieve configuration file stat
stat:
path: "{{ client_file }}"
register: client_stat
- name: Assert that client deletion was successful
assert:
that:
- "client_delete is successful"
- "client_delete is changed"
- "client_delete_twice is successful"
- "client_delete_twice is not changed"
- "client_stat.stat.exists == false"
- name: Configuring a client without subscriptions should fail
sensu_client:
name: "failure"
register: failure
ignore_errors: true
- name: Assert failure to create client
assert:
that:
- failure is failed
- "'the following are missing: subscriptions' in failure['msg']"
- name: Configure a new client from scratch with custom parameters
sensu_client:
name: "custom"
address: "host.fqdn"
subscriptions:
- "default"
- "webserver"
redact:
- "password"
socket:
bind: "127.0.0.1"
port: "3030"
keepalive:
thresholds:
warning: "180"
critical: "300"
handlers:
- "email"
custom:
- broadcast: "irc"
occurrences: "3"
register: client
- name: Configure a new client from scratch with custom parameters (twice)
sensu_client:
name: "custom"
address: "host.fqdn"
subscriptions:
- "default"
- "webserver"
redact:
- "password"
socket:
bind: "127.0.0.1"
port: "3030"
keepalive:
thresholds:
warning: "180"
critical: "300"
handlers:
- "email"
custom:
- broadcast: "irc"
occurrences: "3"
register: client_twice
- name: Retrieve configuration file
slurp:
src: "{{ client_file }}"
register: client_config
- name: Assert that client data was set successfully and properly
assert:
that:
- "client is successful"
- "client is changed"
- "client_twice is successful"
- "client_twice is not changed"
- "client['config']['name'] == 'custom'"
- "client['config']['address'] == 'host.fqdn'"
- "'default' in client['config']['subscriptions']"
- "'webserver' in client['config']['subscriptions']"
- "'password' in client['config']['redact']"
- "client['config']['keepalive']['thresholds']['warning'] == '180'"
- "client['config']['keepalive']['thresholds']['critical'] == '300'"
- "'email' in client['config']['keepalive']['handlers']"
- "client['config']['keepalive']['occurrences'] == '3'"
- "client['file'] == client_file"
- name: Assert that the client configuration file is actually configured properly
vars:
config: "{{ client_config.content | b64decode | from_json }}"
assert:
that:
- "config['client']['name'] == 'custom'"
- "config['client']['address'] == 'host.fqdn'"
- "config['client']['keepalives'] == true"
- "config['client']['safe_mode'] == false"
- "'default' in config['client']['subscriptions']"
- "'webserver' in config['client']['subscriptions']"
- "'password' in config['client']['redact']"
- "config['client']['keepalive']['thresholds']['warning'] == '180'"
- "config['client']['keepalive']['thresholds']['critical'] == '300'"
- "'email' in config['client']['keepalive']['handlers']"
- "config['client']['keepalive']['occurrences'] == '3'"

View File

@@ -1,6 +0,0 @@
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
azp/posix/1
needs/root

View File

@@ -1,129 +0,0 @@
---
####################################################################
# WARNING: These are designed specifically for Ansible tests #
# and should not be used as examples of how to write Ansible roles #
####################################################################
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Creating a handler if the directory doesn't exist should work
sensu_handler:
name: "handler"
type: "pipe"
command: "/bin/bash"
state: "present"
- name: Insert junk JSON in a handlers file
lineinfile:
state: "present"
create: "yes"
path: "/etc/sensu/conf.d/handlers/handler.json"
line: "{'foo' = bar}"
- name: Configure a handler with an existing invalid file
sensu_handler:
name: "handler"
type: "pipe"
command: "/bin/bash"
state: "present"
register: handler
- name: Configure a handler (again)
sensu_handler:
name: "handler"
type: "pipe"
command: "/bin/bash"
state: "present"
register: handler_twice
- name: Retrieve configuration file
slurp:
src: "{{ handler['file'] }}"
register: handler_config
- name: Assert that handler data was set successfully and properly
assert:
that:
- "handler is successful"
- "handler is changed"
- "handler_twice is successful"
- "handler_twice is not changed"
- "handler['name'] == 'handler'"
- "handler['file'] == '/etc/sensu/conf.d/handlers/handler.json'"
- "handler['config']['type'] == 'pipe'"
- "handler['config']['command'] == '/bin/bash'"
- "handler['config']['timeout'] == 10"
- "handler['config']['handle_flapping'] == false"
- "handler['config']['handle_silenced'] == false"
- name: Assert that the handler configuration file is actually configured properly
vars:
config: "{{ handler_config.content | b64decode | from_json }}"
assert:
that:
- "'handler' in config['handlers']"
- "config['handlers']['handler']['type'] == 'pipe'"
- "config['handlers']['handler']['command'] == '/bin/bash'"
- "config['handlers']['handler']['timeout'] == 10"
- "config['handlers']['handler']['handle_flapping'] == false"
- "config['handlers']['handler']['handle_silenced'] == false"
- name: Delete Sensu handler configuration
sensu_handler:
name: "handler"
state: "absent"
register: handler_delete
- name: Delete Sensu handler configuration (again)
sensu_handler:
name: "handler"
state: "absent"
register: handler_delete_twice
- name: Retrieve configuration file stat
stat:
path: "{{ handler['file'] }}"
register: handler_stat
- name: Assert that handler deletion was successful
assert:
that:
- "handler_delete is successful"
- "handler_delete is changed"
- "handler_delete_twice is successful"
- "handler_delete_twice is not changed"
- "handler_stat.stat.exists == false"
- name: Configuring a handler without a name should fail
sensu_handler:
type: "pipe"
command: "/bin/bash"
register: failure
ignore_errors: true
- name: Assert that configuring a handler without a name fails
assert:
that:
- failure is failed
- "'required arguments: name' in failure['msg']"
- name: Configuring a handler without a type should fail
sensu_handler:
name: "pipe"
command: "/bin/bash"
register: failure
ignore_errors: true
- name: Assert that configuring a handler without a type fails
assert:
that:
- failure is failed
- "'the following are missing: type' in failure['msg']"
- include_tasks: pipe.yml
- include_tasks: tcp.yml
- include_tasks: udp.yml
- include_tasks: set.yml
- include_tasks: transport.yml

View File

@@ -1,25 +0,0 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note: Pipe handlers are also tested and used as part of basic main.yml coverage
- name: Configuring a handler with missing pipe parameters should fail
sensu_handler:
name: "pipe"
type: "pipe"
register: failure
ignore_errors: true
- name: Assert that configuring a handler with missing pipe parameters fails
assert:
that:
- failure is failed
- "'the following are missing: command' in failure['msg']"
- name: Configure a handler with pipe parameters
sensu_handler:
name: "pipe"
type: "pipe"
command: "/bin/bash"
register: handler

View File

@@ -1,53 +0,0 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Configuring a handler with missing set parameters should fail
sensu_handler:
name: "set"
type: "set"
register: failure
ignore_errors: true
- name: Assert that configuring a handler with missing set parameters fails
assert:
that:
- failure is failed
- "'the following are missing: handlers' in failure['msg']"
- name: Configure a set handler
sensu_handler:
name: "set"
type: "set"
handlers:
- anotherhandler
register: handler
- name: Retrieve configuration file
slurp:
src: "{{ handler['file'] }}"
register: handler_config
- name: Validate set handler return data
assert:
that:
- "handler is successful"
- "handler is changed"
- "handler['name'] == 'set'"
- "handler['file'] == '/etc/sensu/conf.d/handlers/set.json'"
- "handler['config']['type'] == 'set'"
- "'anotherhandler' in handler['config']['handlers']"
- "handler['config']['handle_flapping'] == false"
- "handler['config']['handle_silenced'] == false"
- name: Assert that the handler configuration file is actually configured properly
vars:
config: "{{ handler_config.content | b64decode | from_json }}"
assert:
that:
- "'set' in config['handlers']"
- "config['handlers']['set']['type'] == 'set'"
- "'anotherhandler' in config['handlers']['set']['handlers']"
- "config['handlers']['set']['handle_flapping'] == false"
- "config['handlers']['set']['handle_silenced'] == false"

View File

@@ -1,56 +0,0 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Configuring a handler with missing tcp parameters should fail
sensu_handler:
name: "tcp"
type: "tcp"
register: failure
ignore_errors: true
- name: Assert that configuring a handler with missing tcp parameters fails
assert:
that:
- failure is failed
- "'the following are missing: socket' in failure['msg']"
- name: Configure a tcp handler
sensu_handler:
name: "tcp"
type: "tcp"
socket:
host: 127.0.0.1
port: 8000
register: handler
- name: Retrieve configuration file
slurp:
src: "{{ handler['file'] }}"
register: handler_config
- name: Validate tcp handler return data
assert:
that:
- "handler is successful"
- "handler is changed"
- "handler['name'] == 'tcp'"
- "handler['file'] == '/etc/sensu/conf.d/handlers/tcp.json'"
- "handler['config']['type'] == 'tcp'"
- "handler['config']['socket']['host'] == '127.0.0.1'"
- "handler['config']['socket']['port'] == 8000"
- "handler['config']['handle_flapping'] == false"
- "handler['config']['handle_silenced'] == false"
- name: Assert that the handler configuration file is actually configured properly
vars:
config: "{{ handler_config.content | b64decode | from_json }}"
assert:
that:
- "'tcp' in config['handlers']"
- "config['handlers']['tcp']['type'] == 'tcp'"
- "config['handlers']['tcp']['socket']['host'] == '127.0.0.1'"
- "config['handlers']['tcp']['socket']['port'] == 8000"
- "config['handlers']['tcp']['handle_flapping'] == false"
- "config['handlers']['tcp']['handle_silenced'] == false"

View File

@@ -1,56 +0,0 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Configuring a handler with missing transport parameters should fail
sensu_handler:
name: "transport"
type: "transport"
register: failure
ignore_errors: true
- name: Assert that configuring a handler with missing transport parameters fails
assert:
that:
- failure is failed
- "'the following are missing: pipe' in failure['msg']"
- name: Configure a transport handler
sensu_handler:
name: "transport"
type: "transport"
pipe:
type: "topic"
name: "transport_handler"
register: handler
- name: Retrieve configuration file
slurp:
src: "{{ handler['file'] }}"
register: handler_config
- name: Validate transport handler return data
assert:
that:
- "handler is successful"
- "handler is changed"
- "handler['name'] == 'transport'"
- "handler['file'] == '/etc/sensu/conf.d/handlers/transport.json'"
- "handler['config']['type'] == 'transport'"
- "handler['config']['pipe']['type'] == 'topic'"
- "handler['config']['pipe']['name'] == 'transport_handler'"
- "handler['config']['handle_flapping'] == false"
- "handler['config']['handle_silenced'] == false"
- name: Assert that the handler configuration file is actually configured properly
vars:
config: "{{ handler_config.content | b64decode | from_json }}"
assert:
that:
- "'transport' in config['handlers']"
- "config['handlers']['transport']['type'] == 'transport'"
- "config['handlers']['transport']['pipe']['type'] == 'topic'"
- "config['handlers']['transport']['pipe']['name'] == 'transport_handler'"
- "config['handlers']['transport']['handle_flapping'] == false"
- "config['handlers']['transport']['handle_silenced'] == false"

View File

@@ -1,56 +0,0 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Configuring a handler with missing udp parameters should fail
sensu_handler:
name: "udp"
type: "udp"
register: failure
ignore_errors: true
- name: Assert that configuring a handler with missing udp parameters fails
assert:
that:
- failure is failed
- "'the following are missing: socket' in failure['msg']"
- name: Configure a udp handler
sensu_handler:
name: "udp"
type: "udp"
socket:
host: 127.0.0.1
port: 8000
register: handler
- name: Retrieve configuration file
slurp:
src: "{{ handler['file'] }}"
register: handler_config
- name: Validate udp handler return data
assert:
that:
- "handler is successful"
- "handler is changed"
- "handler['name'] == 'udp'"
- "handler['file'] == '/etc/sensu/conf.d/handlers/udp.json'"
- "handler['config']['type'] == 'udp'"
- "handler['config']['socket']['host'] == '127.0.0.1'"
- "handler['config']['socket']['port'] == 8000"
- "handler['config']['handle_flapping'] == false"
- "handler['config']['handle_silenced'] == false"
- name: Assert that the handler configuration file is actually configured properly
vars:
config: "{{ handler_config.content | b64decode | from_json }}"
assert:
that:
- "'udp' in config['handlers']"
- "config['handlers']['udp']['type'] == 'udp'"
- "config['handlers']['udp']['socket']['host'] == '127.0.0.1'"
- "config['handlers']['udp']['socket']['port'] == 8000"
- "config['handlers']['udp']['handle_flapping'] == false"
- "config['handlers']['udp']['handle_silenced'] == false"

View File

@@ -1,51 +0,0 @@
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
import random
import unittest
from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, _full_jitter_backoff
class ExponentialBackoffStrategyTestCase(unittest.TestCase):
def test_no_retries(self):
strategy = _exponential_backoff(retries=0)
result = list(strategy())
self.assertEqual(result, [], "list should be empty")
def test_exponential_backoff(self):
strategy = _exponential_backoff(retries=5, delay=1, backoff=2)
result = list(strategy())
self.assertEqual(result, [1, 2, 4, 8, 16])
def test_max_delay(self):
strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=60)
result = list(strategy())
self.assertEqual(result, [1, 2, 4, 8, 16, 32, 60])
def test_max_delay_none(self):
strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=None)
result = list(strategy())
self.assertEqual(result, [1, 2, 4, 8, 16, 32, 64])
class FullJitterBackoffStrategyTestCase(unittest.TestCase):
def test_no_retries(self):
strategy = _full_jitter_backoff(retries=0)
result = list(strategy())
self.assertEqual(result, [], "list should be empty")
def test_full_jitter(self):
retries = 5
seed = 1
r = random.Random(seed)
expected = [r.randint(0, 2**i) for i in range(0, retries)]
strategy = _full_jitter_backoff(retries=retries, delay=1, _random=random.Random(seed))
result = list(strategy())
self.assertEqual(result, expected)

View File

@@ -1,139 +0,0 @@
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
import pytest
from ansible_collections.community.general.plugins.module_utils.database import (
SQLParseError,
is_input_dangerous,
pg_quote_identifier,
)
# These are all valid strings
# The results are based on interpreting the identifier as a table name
VALID = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
"public.table": '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
"schema test.table test": '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
"table.": '"table."',
}
INVALID = {
("test.too.many.dots", "table"): "PostgreSQL does not support table with more than 3 dots",
('"test.too".many.dots', "database"): "PostgreSQL does not support database with more than 1 dots",
('test.too."many.dots"', "database"): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', "database"): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', "schema"): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', "table"): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', "column"): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', "table"): "User escaped identifiers must escape extra quotes",
('"schema "invalid"""."table "invalid"', "table"): "User escaped identifiers must escape extra quotes",
('"schema."table"', "table"): "User escaped identifiers must escape extra quotes",
('"schema".', "table"): "Identifier name unspecified or unquoted trailing dot",
}
HOW_MANY_DOTS = (
("role", "role", '"role"', "PostgreSQL does not support role with more than 1 dots"),
("db", "database", '"db"', "PostgreSQL does not support database with more than 1 dots"),
("db.schema", "schema", '"db"."schema"', "PostgreSQL does not support schema with more than 2 dots"),
("db.schema.table", "table", '"db"."schema"."table"', "PostgreSQL does not support table with more than 3 dots"),
(
"db.schema.table.column",
"column",
'"db"."schema"."table"."column"',
"PostgreSQL does not support column with more than 4 dots",
),
)
VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
IS_STRINGS_DANGEROUS = (
("", False),
(" ", False),
("alternative database", False),
("backup of TRUNCATED table", False),
("bob.dropper", False),
("d'artagnan", False),
("user_with_select_update_truncate_right", False),
(";DROP DATABASE fluffy_pets_photos", True),
(";drop DATABASE fluffy_pets_photos", True),
("; TRUNCATE TABLE his_valuable_table", True),
("; truncate TABLE his_valuable_table", True),
("'--", True),
('"--', True),
("' union select username, password from admin_credentials", True),
("' UNION SELECT username, password from admin_credentials", True),
("' intersect select", True),
("' INTERSECT select", True),
("' except select", True),
("' EXCEPT select", True),
(";ALTER TABLE prices", True),
(";alter table prices", True),
("; UPDATE products SET price = '0'", True),
(";update products SET price = '0'", True),
("; DELETE FROM products", True),
("; delete FROM products", True),
("; SELECT * FROM products", True),
(" ; select * from products", True),
)
@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
def test_valid_quotes(identifier, quoted_identifier):
assert pg_quote_identifier(identifier, "table") == quoted_identifier
@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
def test_invalid_quotes(identifier, id_type, msg):
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
assert pg_quote_identifier(identifier, id_type) == quoted_identifier
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(f"{identifier}.more", id_type)
ex.match(msg)
@pytest.mark.parametrize("string, result", IS_STRINGS_DANGEROUS)
def test_is_input_dangerous(string, result):
assert is_input_dangerous(string) == result

View File

@@ -1,120 +0,0 @@
# (c) 2015, Michael Scherer <mscherer@redhat.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
import pytest
from ansible_collections.community.general.plugins.module_utils import known_hosts
URLS = {
"ssh://one.example.org/example.git": {
"is_ssh_url": True,
"get_fqdn": "one.example.org",
"add_host_key_cmd": " -t rsa one.example.org",
"port": None,
},
"ssh+git://two.example.org/example.git": {
"is_ssh_url": True,
"get_fqdn": "two.example.org",
"add_host_key_cmd": " -t rsa two.example.org",
"port": None,
},
"rsync://three.example.org/user/example.git": {
"is_ssh_url": False,
"get_fqdn": "three.example.org",
"add_host_key_cmd": None, # not called for non-ssh urls
"port": None,
},
"git@four.example.org:user/example.git": {
"is_ssh_url": True,
"get_fqdn": "four.example.org",
"add_host_key_cmd": " -t rsa four.example.org",
"port": None,
},
"git+ssh://five.example.org/example.git": {
"is_ssh_url": True,
"get_fqdn": "five.example.org",
"add_host_key_cmd": " -t rsa five.example.org",
"port": None,
},
"ssh://six.example.org:21/example.org": {
# ssh on FTP Port?
"is_ssh_url": True,
"get_fqdn": "six.example.org",
"add_host_key_cmd": " -t rsa -p 21 six.example.org",
"port": "21",
},
"ssh://[2001:DB8::abcd:abcd]/example.git": {
"is_ssh_url": True,
"get_fqdn": "[2001:DB8::abcd:abcd]",
"add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]",
"port": None,
},
"ssh://[2001:DB8::abcd:abcd]:22/example.git": {
"is_ssh_url": True,
"get_fqdn": "[2001:DB8::abcd:abcd]",
"add_host_key_cmd": " -t rsa -p 22 [2001:DB8::abcd:abcd]",
"port": "22",
},
"username@[2001:DB8::abcd:abcd]/example.git": {
"is_ssh_url": True,
"get_fqdn": "[2001:DB8::abcd:abcd]",
"add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]",
"port": None,
},
"username@[2001:DB8::abcd:abcd]:path/example.git": {
"is_ssh_url": True,
"get_fqdn": "[2001:DB8::abcd:abcd]",
"add_host_key_cmd": " -t rsa [2001:DB8::abcd:abcd]",
"port": None,
},
"ssh://internal.git.server:7999/repos/repo.git": {
"is_ssh_url": True,
"get_fqdn": "internal.git.server",
"add_host_key_cmd": " -t rsa -p 7999 internal.git.server",
"port": "7999",
},
}
@pytest.mark.parametrize("url, is_ssh_url", ((k, URLS[k]["is_ssh_url"]) for k in sorted(URLS)))
def test_is_ssh_url(url, is_ssh_url):
assert known_hosts.is_ssh_url(url) == is_ssh_url
@pytest.mark.parametrize("url, fqdn, port", ((k, URLS[k]["get_fqdn"], URLS[k]["port"]) for k in sorted(URLS)))
def test_get_fqdn_and_port(url, fqdn, port):
assert known_hosts.get_fqdn_and_port(url) == (fqdn, port)
@pytest.mark.parametrize(
"fqdn, port, add_host_key_cmd",
(
(URLS[k]["get_fqdn"], URLS[k]["port"], URLS[k]["add_host_key_cmd"])
for k in sorted(URLS)
if URLS[k]["is_ssh_url"]
),
)
def test_add_host_key(mocker, fqdn, port, add_host_key_cmd):
am = mocker.MagicMock()
get_bin_path = mocker.MagicMock()
get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan"
am.get_bin_path = get_bin_path
run_command = mocker.MagicMock()
run_command.return_value = (0, "Needs output, otherwise thinks ssh-keyscan timed out'", "")
am.run_command = run_command
append_to_file = mocker.MagicMock()
append_to_file.return_value = (None,)
am.append_to_file = append_to_file
mocker.patch("os.path.isdir", return_value=True)
mocker.patch("os.path.exists", return_value=True)
known_hosts.add_host_key(am, fqdn, port=port)
run_command.assert_called_with(keyscan_cmd + add_host_key_cmd, environ_update={"LANGUAGE": "C", "LC_ALL": "C"})

View File

@@ -1,54 +0,0 @@
# Copyright (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com>
# Copyright (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
import pytest
from ansible_collections.community.general.plugins.module_utils.saslprep import saslprep
VALID = [
("", ""),
("\u00a0", " "),
("a", "a"),
("й", "й"),
("\u30de\u30c8\u30ea\u30c3\u30af\u30b9", "\u30de\u30c8\u30ea\u30c3\u30af\u30b9"),
("The\u00adM\u00aatr\u2168", "TheMatrIX"),
("I\u00adX", "IX"),
("user", "user"),
("USER", "USER"),
("\u00aa", "a"),
("\u2168", "IX"),
("\u05be\u00a0\u05be", "\u05be\u0020\u05be"),
]
INVALID = [
(None, TypeError),
(b"", TypeError),
("\u0221", ValueError),
("\u0007", ValueError),
("\u0627\u0031", ValueError),
("\ue0001", ValueError),
("\ue0020", ValueError),
("\ufff9", ValueError),
("\ufdd0", ValueError),
("\u0000", ValueError),
("\u06dd", ValueError),
("\uffffD", ValueError),
("\ud800", ValueError),
("\u200e", ValueError),
("\u05be\u00aa\u05be", ValueError),
]
@pytest.mark.parametrize("source,target", VALID)
def test_saslprep_conversions(source, target):
assert saslprep(source) == target
@pytest.mark.parametrize("source,exception", INVALID)
def test_saslprep_exceptions(source, exception):
with pytest.raises(exception):
saslprep(source)

View File

@@ -7,52 +7,6 @@ anchors:
environ_true: &env-def-true {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true}
environ_false: &env-def-false {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false}
test_cases:
- id: install_dancer_compatibility
input:
name: Dancer
mode: compatibility
output:
changed: true
cpanm_version: '1.7047'
mocks:
run_command:
- command: [/testbin/cpanm, --version]
environ: *env-def-true
rc: 0
out: |
cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm)
perl version 5.041005 (/usr/local/bin/perl)
err: ''
- command: [/testbin/perl, -le, use Dancer;]
environ: *env-def-false
rc: 2
out: ''
err: error, not installed
- command: [/testbin/cpanm, Dancer]
environ: *env-def-true
rc: 0
out: ''
err: ''
- id: install_dancer_already_installed_compatibility
input:
name: Dancer
mode: compatibility
output:
changed: false
mocks:
run_command:
- command: [/testbin/cpanm, --version]
environ: *env-def-true
rc: 0
out: |
cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm)
perl version 5.041005 (/usr/local/bin/perl)
err: ''
- command: [/testbin/perl, -le, use Dancer;]
environ: *env-def-false
rc: 0
out: ''
err: ''
- id: install_dancer
input:
name: Dancer
@@ -72,26 +26,6 @@ test_cases:
rc: 0
out: ''
err: ''
- id: install_distribution_file_compatibility
input:
name: MIYAGAWA/Plack-0.99_05.tar.gz
mode: compatibility
output:
changed: true
mocks:
run_command:
- command: [/testbin/cpanm, --version]
environ: *env-def-true
rc: 0
out: |
cpanm (App::cpanminus) version 1.7047 (/usr/local/bin/cpanm)
perl version 5.041005 (/usr/local/bin/perl)
err: ''
- command: [/testbin/cpanm, MIYAGAWA/Plack-0.99_05.tar.gz]
environ: *env-def-true
rc: 0
out: ''
err: ''
- id: install_distribution_file
input:
name: MIYAGAWA/Plack-0.99_05.tar.gz