Reformat everything.

This commit is contained in:
Felix Fontein
2025-11-01 12:08:41 +01:00
parent 3f2213791a
commit 340ff8586d
1008 changed files with 61301 additions and 58309 deletions

View File

@@ -21,20 +21,21 @@ class LockTimeout(Exception):
class FileLock:
'''
"""
Currently FileLock is implemented via fcntl.flock on a lock file, however this
behaviour may change in the future. Avoid mixing lock types fcntl.flock,
fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
unwanted and/or unexpected behaviour
'''
"""
def __init__(self):
self.lockfd = None
@contextmanager
def lock_file(self, path, tmpdir, lock_timeout=None):
'''
"""
Context for lock acquisition
'''
"""
try:
self.set_lock(path, tmpdir, lock_timeout)
yield
@@ -42,7 +43,7 @@ class FileLock:
self.unlock()
def set_lock(self, path, tmpdir, lock_timeout=None):
'''
"""
Create a lock file based on path with flock to prevent other processes
using given path.
Please note that currently file locking only works when it is executed by
@@ -55,14 +56,14 @@ class FileLock:
0 = Do not wait, fail if lock cannot be acquired immediately,
Default is None, wait indefinitely until lock is released.
:returns: True
'''
lock_path = os.path.join(tmpdir, f'ansible-{os.path.basename(path)}.lock')
"""
lock_path = os.path.join(tmpdir, f"ansible-{os.path.basename(path)}.lock")
l_wait = 0.1
r_exception = IOError
if sys.version_info[0] == 3:
r_exception = BlockingIOError
self.lockfd = open(lock_path, 'w')
self.lockfd = open(lock_path, "w")
if lock_timeout <= 0:
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
@@ -82,7 +83,7 @@ class FileLock:
continue
self.lockfd.close()
raise LockTimeout(f'{lock_timeout} sec')
raise LockTimeout(f"{lock_timeout} sec")
fcntl.flock(self.lockfd, fcntl.LOCK_EX)
os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
@@ -90,12 +91,12 @@ class FileLock:
return True
def unlock(self):
'''
"""
Make sure lock file is available for everyone and Unlock the file descriptor
locked by set_lock
:returns: True
'''
"""
if not self.lockfd:
return True

View File

@@ -32,9 +32,9 @@ def ismount(path):
return False
if isinstance(path, bytes):
parent = os.path.join(path, b'..')
parent = os.path.join(path, b"..")
else:
parent = os.path.join(path, '..')
parent = os.path.join(path, "..")
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
@@ -44,9 +44,9 @@ def ismount(path):
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return True # path/.. is the same i-node as path
return False

View File

@@ -31,37 +31,44 @@ class StormConfig(SSHConfig):
@type file_obj: file
"""
order = 1
host = {"host": ['*'], "config": {}, }
host = {
"host": ["*"],
"config": {},
}
for line in file_obj:
line = line.rstrip('\n').lstrip()
if line == '':
self._config.append({
'type': 'empty_line',
'value': line,
'host': '',
'order': order,
})
line = line.rstrip("\n").lstrip()
if line == "":
self._config.append(
{
"type": "empty_line",
"value": line,
"host": "",
"order": order,
}
)
order += 1
continue
if line.startswith('#'):
self._config.append({
'type': 'comment',
'value': line,
'host': '',
'order': order,
})
if line.startswith("#"):
self._config.append(
{
"type": "comment",
"value": line,
"host": "",
"order": order,
}
)
order += 1
continue
if '=' in line:
if "=" in line:
# Ensure ProxyCommand gets properly split
if line.lower().strip().startswith('proxycommand'):
if line.lower().strip().startswith("proxycommand"):
proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
match = proxy_re.match(line)
key, value = match.group(1).lower(), match.group(2)
else:
key, value = line.split('=', 1)
key, value = line.split("=", 1)
key = key.strip().lower()
else:
# find first whitespace, and split there
@@ -69,26 +76,21 @@ class StormConfig(SSHConfig):
while (i < len(line)) and not line[i].isspace():
i += 1
if i == len(line):
raise Exception(f'Unparsable line: {line!r}')
raise Exception(f"Unparsable line: {line!r}")
key = line[:i].lower()
value = line[i:].lstrip()
if key == 'host':
if key == "host":
self._config.append(host)
value = value.split()
host = {
key: value,
'config': {},
'type': 'entry',
'order': order
}
host = {key: value, "config": {}, "type": "entry", "order": order}
order += 1
elif key in ['identityfile', 'localforward', 'remoteforward']:
if key in host['config']:
host['config'][key].append(value)
elif key in ["identityfile", "localforward", "remoteforward"]:
if key in host["config"]:
host["config"][key].append(value)
else:
host['config'][key] = [value]
elif key not in host['config']:
host['config'].update({key: value})
host["config"][key] = [value]
elif key not in host["config"]:
host["config"].update({key: value})
self._config.append(host)
@@ -108,7 +110,7 @@ class ConfigParser:
if not os.path.exists(self.ssh_config_file):
if not os.path.exists(os.path.dirname(self.ssh_config_file)):
os.makedirs(os.path.dirname(self.ssh_config_file))
open(self.ssh_config_file, 'w+').close()
open(self.ssh_config_file, "w+").close()
os.chmod(self.ssh_config_file, 0o600)
self.config_data = []
@@ -131,16 +133,18 @@ class ConfigParser:
continue
host_item = {
'host': entry["host"][0],
'options': entry.get("config"),
'type': 'entry',
'order': entry.get("order", 0),
"host": entry["host"][0],
"options": entry.get("config"),
"type": "entry",
"order": entry.get("order", 0),
}
if len(entry["host"]) > 1:
host_item.update({
'host': " ".join(entry["host"]),
})
host_item.update(
{
"host": " ".join(entry["host"]),
}
)
# minor bug in paramiko.SSHConfig that duplicates
# "Host *" entries.
if entry.get("config") and len(entry.get("config")) > 0:
@@ -149,20 +153,20 @@ class ConfigParser:
return self.config_data
def add_host(self, host, options):
self.config_data.append({
'host': host,
'options': options,
'order': self.get_last_index(),
})
self.config_data.append(
{
"host": host,
"options": options,
"order": self.get_last_index(),
}
)
return self
def update_host(self, host, options, use_regex=False):
for index, host_entry in enumerate(self.config_data):
if host_entry.get("host") == host or \
(use_regex and re.match(host, host_entry.get("host"))):
if 'deleted_fields' in options:
if host_entry.get("host") == host or (use_regex and re.match(host, host_entry.get("host"))):
if "deleted_fields" in options:
deleted_fields = options.pop("deleted_fields")
for deleted_field in deleted_fields:
del self.config_data[index]["options"][deleted_field]
@@ -174,7 +178,7 @@ class ConfigParser:
def search_host(self, search_string):
results = []
for host_entry in self.config_data:
if host_entry.get("type") != 'entry':
if host_entry.get("type") != "entry":
continue
if host_entry.get("host") == "*":
continue
@@ -201,7 +205,7 @@ class ConfigParser:
found += 1
if found == 0:
raise ValueError('No host found')
raise ValueError("No host found")
return self
def delete_all_hosts(self):
@@ -218,7 +222,7 @@ class ConfigParser:
self.config_data = sorted(self.config_data, key=itemgetter("order"))
for host_item in self.config_data:
if host_item.get("type") in ['comment', 'empty_line']:
if host_item.get("type") in ["comment", "empty_line"]:
file_content += f"{host_item.get('value')}\n"
continue
host_item_content = f"Host {host_item.get('host')}\n"
@@ -235,7 +239,7 @@ class ConfigParser:
return file_content
def write_to_ssh_config(self):
with open(self.ssh_config_file, 'w+') as f:
with open(self.ssh_config_file, "w+") as f:
data = self.dump()
if data:
f.write(data)

View File

@@ -41,13 +41,20 @@ class AnsibleACSError(Exception):
def acs_common_argument_spec():
return dict(
alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
alicloud_security_token=dict(aliases=['security_token'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME']))
alicloud_access_key=dict(
aliases=["access_key_id", "access_key"],
no_log=True,
fallback=(env_fallback, ["ALICLOUD_ACCESS_KEY", "ALICLOUD_ACCESS_KEY_ID"]),
),
alicloud_secret_key=dict(
aliases=["secret_access_key", "secret_key"],
no_log=True,
fallback=(env_fallback, ["ALICLOUD_SECRET_KEY", "ALICLOUD_SECRET_ACCESS_KEY"]),
),
alicloud_security_token=dict(
aliases=["security_token"], no_log=True, fallback=(env_fallback, ["ALICLOUD_SECURITY_TOKEN"])
),
ecs_role_name=dict(aliases=["role_name"], fallback=(env_fallback, ["ALICLOUD_ECS_ROLE_NAME"])),
)
@@ -55,31 +62,38 @@ def ecs_argument_spec():
spec = acs_common_argument_spec()
spec.update(
dict(
alicloud_region=dict(required=True, aliases=['region', 'region_id'],
fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']),
aliases=['assume_role_arn']),
alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']),
aliases=['assume_role_session_name']),
alicloud_assume_role_session_expiration=dict(type='int',
fallback=(env_fallback,
['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']),
aliases=['assume_role_session_expiration']),
alicloud_assume_role=dict(type='dict', aliases=['assume_role']),
profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])),
shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE']))
alicloud_region=dict(
required=True,
aliases=["region", "region_id"],
fallback=(env_fallback, ["ALICLOUD_REGION", "ALICLOUD_REGION_ID"]),
),
alicloud_assume_role_arn=dict(
fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_ARN"]), aliases=["assume_role_arn"]
),
alicloud_assume_role_session_name=dict(
fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_SESSION_NAME"]), aliases=["assume_role_session_name"]
),
alicloud_assume_role_session_expiration=dict(
type="int",
fallback=(env_fallback, ["ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"]),
aliases=["assume_role_session_expiration"],
),
alicloud_assume_role=dict(type="dict", aliases=["assume_role"]),
profile=dict(fallback=(env_fallback, ["ALICLOUD_PROFILE"])),
shared_credentials_file=dict(fallback=(env_fallback, ["ALICLOUD_SHARED_CREDENTIALS_FILE"])),
)
)
return spec
def get_acs_connection_info(params):
ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'),
acs_secret_access_key=params.get('alicloud_secret_key'),
security_token=params.get('alicloud_security_token'),
ecs_role_name=params.get('ecs_role_name'),
user_agent='Ansible-Provider-Alicloud')
ecs_params = dict(
acs_access_key_id=params.get("alicloud_access_key"),
acs_secret_access_key=params.get("alicloud_secret_key"),
security_token=params.get("alicloud_security_token"),
ecs_role_name=params.get("ecs_role_name"),
user_agent="Ansible-Provider-Alicloud",
)
return ecs_params
@@ -88,76 +102,98 @@ def connect_to_acs(acs_module, region, **params):
if not conn:
if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
raise AnsibleACSError(
f"Region {region} does not seem to be available for acs module {acs_module.__name__}.")
f"Region {region} does not seem to be available for acs module {acs_module.__name__}."
)
else:
raise AnsibleACSError(
f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}.")
f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}."
)
return conn
def get_assume_role(params):
""" Return new params """
"""Return new params"""
sts_params = get_acs_connection_info(params)
assume_role = {}
if params.get('assume_role'):
assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn')
assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name')
assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration')
assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy')
if params.get("assume_role"):
assume_role["alicloud_assume_role_arn"] = params["assume_role"].get("role_arn")
assume_role["alicloud_assume_role_session_name"] = params["assume_role"].get("session_name")
assume_role["alicloud_assume_role_session_expiration"] = params["assume_role"].get("session_expiration")
assume_role["alicloud_assume_role_policy"] = params["assume_role"].get("policy")
assume_role_params = {
'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'),
'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name')
else assume_role.get('alicloud_assume_role_session_name'),
'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration')
else assume_role.get('alicloud_assume_role_session_expiration', 3600),
'policy': assume_role.get('alicloud_assume_role_policy', {})
"role_arn": params.get("alicloud_assume_role_arn")
if params.get("alicloud_assume_role_arn")
else assume_role.get("alicloud_assume_role_arn"),
"role_session_name": params.get("alicloud_assume_role_session_name")
if params.get("alicloud_assume_role_session_name")
else assume_role.get("alicloud_assume_role_session_name"),
"duration_seconds": params.get("alicloud_assume_role_session_expiration")
if params.get("alicloud_assume_role_session_expiration")
else assume_role.get("alicloud_assume_role_session_expiration", 3600),
"policy": assume_role.get("alicloud_assume_role_policy", {}),
}
try:
sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read()
sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \
= sts['access_key_id'], sts['access_key_secret'], sts['security_token']
sts = (
connect_to_acs(footmark.sts, params.get("alicloud_region"), **sts_params)
.assume_role(**assume_role_params)
.read()
)
sts_params["acs_access_key_id"], sts_params["acs_secret_access_key"], sts_params["security_token"] = (
sts["access_key_id"],
sts["access_key_secret"],
sts["security_token"],
)
except AnsibleACSError as e:
params.fail_json(msg=str(e))
return sts_params
def get_profile(params):
if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']:
path = params['shared_credentials_file'] if params['shared_credentials_file'] else f"{os.getenv('HOME')}/.aliyun/config.json"
if not params["alicloud_access_key"] and not params["ecs_role_name"] and params["profile"]:
path = (
params["shared_credentials_file"]
if params["shared_credentials_file"]
else f"{os.getenv('HOME')}/.aliyun/config.json"
)
auth = {}
with open(path, 'r') as f:
for pro in json.load(f)['profiles']:
if params['profile'] == pro['name']:
with open(path, "r") as f:
for pro in json.load(f)["profiles"]:
if params["profile"] == pro["name"]:
auth = pro
if auth:
if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'):
params['alicloud_access_key'] = auth.get('access_key_id')
params['alicloud_secret_key'] = auth.get('access_key_secret')
params['alicloud_region'] = auth.get('region_id')
if auth["mode"] == "AK" and auth.get("access_key_id") and auth.get("access_key_secret"):
params["alicloud_access_key"] = auth.get("access_key_id")
params["alicloud_secret_key"] = auth.get("access_key_secret")
params["alicloud_region"] = auth.get("region_id")
params = get_acs_connection_info(params)
elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'):
params['alicloud_access_key'] = auth.get('access_key_id')
params['alicloud_secret_key'] = auth.get('access_key_secret')
params['security_token'] = auth.get('sts_token')
params['alicloud_region'] = auth.get('region_id')
elif (
auth["mode"] == "StsToken"
and auth.get("access_key_id")
and auth.get("access_key_secret")
and auth.get("sts_token")
):
params["alicloud_access_key"] = auth.get("access_key_id")
params["alicloud_secret_key"] = auth.get("access_key_secret")
params["security_token"] = auth.get("sts_token")
params["alicloud_region"] = auth.get("region_id")
params = get_acs_connection_info(params)
elif auth['mode'] == 'EcsRamRole':
params['ecs_role_name'] = auth.get('ram_role_name')
params['alicloud_region'] = auth.get('region_id')
elif auth["mode"] == "EcsRamRole":
params["ecs_role_name"] = auth.get("ram_role_name")
params["alicloud_region"] = auth.get("region_id")
params = get_acs_connection_info(params)
elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'):
params['alicloud_access_key'] = auth.get('access_key_id')
params['alicloud_secret_key'] = auth.get('access_key_secret')
params['security_token'] = auth.get('sts_token')
params['ecs_role_name'] = auth.get('ram_role_name')
params['alicloud_assume_role_arn'] = auth.get('ram_role_arn')
params['alicloud_assume_role_session_name'] = auth.get('ram_session_name')
params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds')
params['alicloud_region'] = auth.get('region_id')
elif auth["mode"] == "RamRoleArn" and auth.get("ram_role_arn"):
params["alicloud_access_key"] = auth.get("access_key_id")
params["alicloud_secret_key"] = auth.get("access_key_secret")
params["security_token"] = auth.get("sts_token")
params["ecs_role_name"] = auth.get("ram_role_name")
params["alicloud_assume_role_arn"] = auth.get("ram_role_arn")
params["alicloud_assume_role_session_name"] = auth.get("ram_session_name")
params["alicloud_assume_role_session_expiration"] = auth.get("expired_seconds")
params["alicloud_region"] = auth.get("region_id")
params = get_assume_role(params)
elif params.get('alicloud_assume_role_arn') or params.get('assume_role'):
elif params.get("alicloud_assume_role_arn") or params.get("assume_role"):
params = get_assume_role(params)
else:
params = get_acs_connection_info(params)
@@ -165,10 +201,10 @@ def get_profile(params):
def ecs_connect(module):
""" Return an ecs connection"""
"""Return an ecs connection"""
ecs_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
@@ -179,10 +215,10 @@ def ecs_connect(module):
def slb_connect(module):
""" Return an slb connection"""
"""Return an slb connection"""
slb_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
slb = connect_to_acs(footmark.slb, region, **slb_params)
@@ -193,10 +229,10 @@ def slb_connect(module):
def dns_connect(module):
""" Return an dns connection"""
"""Return an dns connection"""
dns_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
dns = connect_to_acs(footmark.dns, region, **dns_params)
@@ -207,10 +243,10 @@ def dns_connect(module):
def vpc_connect(module):
""" Return an vpc connection"""
"""Return an vpc connection"""
vpc_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
@@ -221,10 +257,10 @@ def vpc_connect(module):
def rds_connect(module):
""" Return an rds connection"""
"""Return an rds connection"""
rds_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
rds = connect_to_acs(footmark.rds, region, **rds_params)
@@ -235,10 +271,10 @@ def rds_connect(module):
def ess_connect(module):
""" Return an ess connection"""
"""Return an ess connection"""
ess_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
ess = connect_to_acs(footmark.ess, region, **ess_params)
@@ -249,10 +285,10 @@ def ess_connect(module):
def sts_connect(module):
""" Return an sts connection"""
"""Return an sts connection"""
sts_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
sts = connect_to_acs(footmark.sts, region, **sts_params)
@@ -263,10 +299,10 @@ def sts_connect(module):
def ram_connect(module):
""" Return an ram connection"""
"""Return an ram connection"""
ram_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
ram = connect_to_acs(footmark.ram, region, **ram_params)
@@ -277,10 +313,10 @@ def ram_connect(module):
def market_connect(module):
""" Return an market connection"""
"""Return an market connection"""
market_params = get_profile(module.params)
# If we have a region specified, connect to its endpoint.
region = module.params.get('alicloud_region')
region = module.params.get("alicloud_region")
if region:
try:
market = connect_to_acs(footmark.market, region, **market_params)

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2024, Stanislav Shamilov <shamilovstas@protonmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -10,18 +9,10 @@ import re
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
__state_map = {
"present": "--install",
"absent": "--uninstall"
}
__state_map = {"present": "--install", "absent": "--uninstall"}
# sdkmanager --help 2>&1 | grep -A 2 -- --channel
__channel_map = {
"stable": 0,
"beta": 1,
"dev": 2,
"canary": 3
}
__channel_map = {"stable": 0, "beta": 1, "dev": 2, "canary": 3}
def __map_channel(channel_name):
@@ -33,18 +24,18 @@ def __map_channel(channel_name):
def sdkmanager_runner(module, **kwargs):
return CmdRunner(
module,
command='sdkmanager',
command="sdkmanager",
arg_formats=dict(
state=cmd_runner_fmt.as_map(__state_map),
name=cmd_runner_fmt.as_list(),
installed=cmd_runner_fmt.as_fixed("--list_installed"),
list=cmd_runner_fmt.as_fixed('--list'),
list=cmd_runner_fmt.as_fixed("--list"),
newer=cmd_runner_fmt.as_fixed("--newer"),
sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"),
channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"])
channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]),
),
force_lang="C.UTF-8", # Without this, sdkmanager binary crashes
**kwargs
**kwargs,
)
@@ -72,43 +63,45 @@ class SdkManagerException(Exception):
class AndroidSdkManager:
_RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$')
_RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$')
_RE_INSTALLED_PACKAGES_HEADER = re.compile(r"^Installed packages:$")
_RE_UPDATABLE_PACKAGES_HEADER = re.compile(r"^Available Updates:$")
# Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools '
_RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P<name>\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$')
_RE_INSTALLED_PACKAGE = re.compile(r"^\s*(?P<name>\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$")
# Example: ' platform-tools | 27.0.0 | 35.0.2'
_RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P<name>\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$')
_RE_UPDATABLE_PACKAGE = re.compile(r"^\s*(?P<name>\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$")
_RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P<package>\S+)\'\s*$')
_RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of '
r'the packages they depend on were not accepted')
_RE_UNKNOWN_PACKAGE = re.compile(r"^Warning: Failed to find package \'(?P<package>\S+)\'\s*$")
_RE_ACCEPT_LICENSE = re.compile(
r"^The following packages can not be installed since their licenses or those of "
r"the packages they depend on were not accepted"
)
def __init__(self, module):
self.runner = sdkmanager_runner(module)
def get_installed_packages(self):
with self.runner('installed sdk_root channel') as ctx:
with self.runner("installed sdk_root channel") as ctx:
rc, stdout, stderr = ctx.run()
return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE)
def get_updatable_packages(self):
with self.runner('list newer sdk_root channel') as ctx:
with self.runner("list newer sdk_root channel") as ctx:
rc, stdout, stderr = ctx.run()
return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE)
def apply_packages_changes(self, packages, accept_licenses=False):
""" Install or delete packages, depending on the `module.vars.state` parameter """
"""Install or delete packages, depending on the `module.vars.state` parameter"""
if len(packages) == 0:
return 0, '', ''
return 0, "", ""
if accept_licenses:
license_prompt_answer = 'y'
license_prompt_answer = "y"
else:
license_prompt_answer = 'N'
license_prompt_answer = "N"
for package in packages:
with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx:
with self.runner("state name sdk_root channel", data=license_prompt_answer) as ctx:
rc, stdout, stderr = ctx.run(name=package.name)
for line in stdout.splitlines():
@@ -118,14 +111,14 @@ class AndroidSdkManager:
if rc != 0:
self._try_parse_stderr(stderr)
return rc, stdout, stderr
return 0, '', ''
return 0, "", ""
def _try_parse_stderr(self, stderr):
data = stderr.splitlines()
for line in data:
unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line)
if unknown_package_regex:
package = unknown_package_regex.group('package')
package = unknown_package_regex.group("package")
raise SdkManagerException(f"Unknown package {package}")
@staticmethod
@@ -142,5 +135,5 @@ class AndroidSdkManager:
else:
p = row_regexp.match(line)
if p:
packages.add(Package(p.group('name')))
packages.add(Package(p.group("name")))
return packages

View File

@@ -14,9 +14,9 @@ def normalize_subvolume_path(path):
Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
"""
fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
result = re.sub(r'/+$', '', re.sub(r'/+', '/', f"/{fstree_stripped}"))
return result if len(result) > 0 else '/'
fstree_stripped = re.sub(r"^<FS_TREE>", "", path)
result = re.sub(r"/+$", "", re.sub(r"/+", "/", f"/{fstree_stripped}"))
return result if len(result) > 0 else "/"
class BtrfsModuleException(Exception):
@@ -24,7 +24,6 @@ class BtrfsModuleException(Exception):
class BtrfsCommands:
"""
Provides access to a subset of the Btrfs command line
"""
@@ -40,43 +39,43 @@ class BtrfsCommands:
filesystems = []
current = None
for line in stdout:
if line.startswith('Label'):
if line.startswith("Label"):
current = self.__parse_filesystem(line)
filesystems.append(current)
elif line.startswith('devid'):
current['devices'].append(self.__parse_filesystem_device(line))
elif line.startswith("devid"):
current["devices"].append(self.__parse_filesystem_device(line))
return filesystems
def __parse_filesystem(self, line):
label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
id = re.sub(r'^.*uuid:\s*', '', line)
label = re.sub(r"\s*uuid:.*$", "", re.sub(r"^Label:\s*", "", line))
id = re.sub(r"^.*uuid:\s*", "", line)
filesystem = {}
filesystem['label'] = label.strip("'") if label != 'none' else None
filesystem['uuid'] = id
filesystem['devices'] = []
filesystem['mountpoints'] = []
filesystem['subvolumes'] = []
filesystem['default_subvolid'] = None
filesystem["label"] = label.strip("'") if label != "none" else None
filesystem["uuid"] = id
filesystem["devices"] = []
filesystem["mountpoints"] = []
filesystem["subvolumes"] = []
filesystem["default_subvolid"] = None
return filesystem
def __parse_filesystem_device(self, line):
return re.sub(r'^.*path\s', '', line)
return re.sub(r"^.*path\s", "", line)
def subvolumes_list(self, filesystem_path):
command = f"{self.__btrfs} subvolume list -tap {filesystem_path}"
result = self.__module.run_command(command, check_rc=True)
stdout = [x.split('\t') for x in result[1].splitlines()]
subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
stdout = [x.split("\t") for x in result[1].splitlines()]
subvolumes = [{"id": 5, "parent": None, "path": "/"}]
if len(stdout) > 2:
subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
return subvolumes
def __parse_subvolume_list_record(self, item):
return {
'id': int(item[0]),
'parent': int(item[2]),
'path': normalize_subvolume_path(item[5]),
"id": int(item[0]),
"parent": int(item[2]),
"path": normalize_subvolume_path(item[5]),
}
def subvolume_get_default(self, filesystem_path):
@@ -103,7 +102,6 @@ class BtrfsCommands:
class BtrfsInfoProvider:
"""
Utility providing details of the currently available btrfs filesystems
"""
@@ -117,15 +115,14 @@ class BtrfsInfoProvider:
filesystems = self.__btrfs_api.filesystem_show()
mountpoints = self.__find_mountpoints()
for filesystem in filesystems:
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
filesystem['mountpoints'] = device_mountpoints
device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem["devices"])
filesystem["mountpoints"] = device_mountpoints
if len(device_mountpoints) > 0:
# any path within the filesystem can be used to query metadata
mountpoint = device_mountpoints[0]['mountpoint']
filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
mountpoint = device_mountpoints[0]["mountpoint"]
filesystem["subvolumes"] = self.get_subvolumes(mountpoint)
filesystem["default_subvolid"] = self.get_default_subvolume_id(mountpoint)
return filesystems
@@ -140,7 +137,7 @@ class BtrfsInfoProvider:
return self.__btrfs_api.subvolume_get_default(filesystem_path)
def __filter_mountpoints_for_devices(self, mountpoints, devices):
return [m for m in mountpoints if (m['device'] in devices)]
return [m for m in mountpoints if (m["device"] in devices)]
def __find_mountpoints(self):
command = f"{self.__findmnt_path} -t btrfs -nvP"
@@ -154,28 +151,29 @@ class BtrfsInfoProvider:
return mountpoints
def __parse_mountpoint_pairs(self, line):
pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
pattern = re.compile(
r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$'
)
match = pattern.search(line)
if match is not None:
groups = match.groupdict()
return {
'mountpoint': groups['target'],
'device': groups['source'],
'subvolid': self.__extract_mount_subvolid(groups['options']),
"mountpoint": groups["target"],
"device": groups["source"],
"subvolid": self.__extract_mount_subvolid(groups["options"]),
}
else:
raise BtrfsModuleException(f"Failed to parse findmnt result for line: '{line}'")
def __extract_mount_subvolid(self, mount_options):
for option in mount_options.split(','):
if option.startswith('subvolid='):
return int(option[len('subvolid='):])
for option in mount_options.split(","):
if option.startswith("subvolid="):
return int(option[len("subvolid=") :])
raise BtrfsModuleException(f"Failed to find subvolid for mountpoint in options '{mount_options}'")
class BtrfsSubvolume:
"""
Wrapper class providing convenience methods for inspection of a btrfs subvolume
"""
@@ -219,8 +217,8 @@ class BtrfsSubvolume:
"""
path = self.path
if absolute_child_path.startswith(path):
relative = absolute_child_path[len(path):]
return re.sub(r'^/*', '', relative)
relative = absolute_child_path[len(path) :]
return re.sub(r"^/*", "", relative)
else:
raise BtrfsModuleException(f"Path '{absolute_child_path}' doesn't start with '{path}'")
@@ -241,19 +239,18 @@ class BtrfsSubvolume:
@property
def name(self):
return self.path.split('/').pop()
return self.path.split("/").pop()
@property
def path(self):
return self.__info['path']
return self.__info["path"]
@property
def parent(self):
return self.__info['parent']
return self.__info["parent"]
class BtrfsFilesystem:
"""
Wrapper class providing convenience methods for inspection of a btrfs filesystem
"""
@@ -262,14 +259,14 @@ class BtrfsFilesystem:
self.__provider = provider
# constant for module execution
self.__uuid = info['uuid']
self.__label = info['label']
self.__devices = info['devices']
self.__uuid = info["uuid"]
self.__label = info["label"]
self.__devices = info["devices"]
# refreshable
self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
self.__default_subvolid = info["default_subvolid"] if "default_subvolid" in info else None
self.__update_mountpoints(info["mountpoints"] if "mountpoints" in info else [])
self.__update_subvolumes(info["subvolumes"] if "subvolumes" in info else [])
@property
def uuid(self):
@@ -299,8 +296,8 @@ class BtrfsFilesystem:
def __update_mountpoints(self, mountpoints):
self.__mountpoints = dict()
for i in mountpoints:
subvolid = i['subvolid']
mountpoint = i['mountpoint']
subvolid = i["subvolid"]
mountpoint = i["mountpoint"]
if subvolid not in self.__mountpoints:
self.__mountpoints[subvolid] = []
self.__mountpoints[subvolid].append(mountpoint)
@@ -315,7 +312,7 @@ class BtrfsFilesystem:
# TODO strategy for retaining information on deleted subvolumes?
self.__subvolumes = dict()
for subvolume in subvolumes:
self.__subvolumes[subvolume['id']] = subvolume
self.__subvolumes[subvolume["id"]] = subvolume
def refresh_default_subvolume(self):
filesystem_path = self.get_any_mountpoint()
@@ -336,8 +333,8 @@ class BtrfsFilesystem:
def get_subvolume_by_name(self, subvolume):
for subvolume_info in self.__subvolumes.values():
if subvolume_info['path'] == subvolume:
return BtrfsSubvolume(self, subvolume_info['id'])
if subvolume_info["path"] == subvolume:
return BtrfsSubvolume(self, subvolume_info["id"])
return None
def get_any_mountpoint(self):
@@ -361,9 +358,9 @@ class BtrfsFilesystem:
subvolumes_by_path = self.__get_subvolumes_by_path()
while len(subvolume) > 1:
if subvolume in subvolumes_by_path:
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
return BtrfsSubvolume(self, subvolumes_by_path[subvolume]["id"])
else:
subvolume = re.sub(r'/[^/]+$', '', subvolume)
subvolume = re.sub(r"/[^/]+$", "", subvolume)
return BtrfsSubvolume(self, 5)
@@ -378,12 +375,12 @@ class BtrfsFilesystem:
return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
def get_subvolume_children(self, subvolume_id):
return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
return [BtrfsSubvolume(self, x["id"]) for x in self.__subvolumes.values() if x["parent"] == subvolume_id]
def __get_subvolumes_by_path(self):
result = {}
for s in self.__subvolumes.values():
path = s['path']
path = s["path"]
result[path] = s
return result
@@ -394,25 +391,26 @@ class BtrfsFilesystem:
subvolumes = []
sources = self.__subvolumes.values() if self.__subvolumes is not None else []
for subvolume in sources:
id = subvolume['id']
subvolumes.append({
'id': id,
'path': subvolume['path'],
'parent': subvolume['parent'],
'mountpoints': self.get_mountpoints_by_subvolume_id(id),
})
id = subvolume["id"]
subvolumes.append(
{
"id": id,
"path": subvolume["path"],
"parent": subvolume["parent"],
"mountpoints": self.get_mountpoints_by_subvolume_id(id),
}
)
return {
'default_subvolume': self.__default_subvolid,
'devices': self.__devices,
'label': self.__label,
'uuid': self.__uuid,
'subvolumes': subvolumes,
"default_subvolume": self.__default_subvolid,
"devices": self.__devices,
"label": self.__label,
"uuid": self.__uuid,
"subvolumes": subvolumes,
}
class BtrfsFilesystemsProvider:
"""
Provides methods to query available btrfs filesystems
"""
@@ -423,8 +421,8 @@ class BtrfsFilesystemsProvider:
self.__filesystems = None
def get_matching_filesystem(self, criteria):
if criteria['device'] is not None:
criteria['device'] = os.path.realpath(criteria['device'])
if criteria["device"] is not None:
criteria["device"] = os.path.realpath(criteria["device"])
self.__check_init()
matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
@@ -436,9 +434,11 @@ class BtrfsFilesystemsProvider:
)
def __filesystem_matches_criteria(self, filesystem, criteria):
return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
(criteria['label'] is None or filesystem.label == criteria['label']) and
(criteria['device'] is None or filesystem.contains_device(criteria['device'])))
return (
(criteria["uuid"] is None or filesystem.uuid == criteria["uuid"])
and (criteria["label"] is None or filesystem.label == criteria["label"])
and (criteria["device"] is None or filesystem.contains_device(criteria["device"]))
)
def get_filesystem_for_device(self, device):
real_device = os.path.realpath(device)
@@ -456,5 +456,5 @@ class BtrfsFilesystemsProvider:
if self.__filesystems is None:
self.__filesystems = dict()
for f in self.__provider.get_filesystems():
uuid = f['uuid']
uuid = f["uuid"]
self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)

View File

@@ -36,7 +36,7 @@ import time
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
""" Customizable exponential backoff strategy.
"""Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
@@ -54,15 +54,17 @@ def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff ** retry
sleep = delay * backoff**retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
""" Implements the "Full Jitter" backoff strategy described here
"""Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
@@ -83,23 +85,26 @@ def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
yield _random.randint(0, min(max_delay, delay * 2**retry))
return backoff_gen
class CloudRetry:
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
"""Return the status code from the exception object
Args:
error (object): The exception itself.
"""
@@ -107,7 +112,7 @@ class CloudRetry:
@staticmethod
def found(response_code, catch_extra_error_codes=None):
""" Return True if the Response Code to retry on was found.
"""Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
@@ -115,13 +120,14 @@ class CloudRetry:
@classmethod
def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
""" Retry calling the Cloud decorated function using the provided
"""Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
@@ -163,8 +169,10 @@ class CloudRetry:
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_exponential_backoff(
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
return cls._backoff(
_exponential_backoff(retries=retries, delay=delay, backoff=backoff, max_delay=max_delay),
catch_extra_error_codes,
)
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
@@ -182,8 +190,9 @@ class CloudRetry:
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_full_jitter_backoff(
retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
return cls._backoff(
_full_jitter_backoff(retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes
)
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
@@ -204,4 +213,9 @@ class CloudRetry:
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
retries=tries - 1,
delay=delay,
backoff=backoff,
max_delay=None,
catch_extra_error_codes=catch_extra_error_codes,
)

View File

@@ -75,8 +75,17 @@ class CmdRunner:
def _prepare_args_order(order):
return tuple(order) if is_sequence(order) else tuple(order.split())
def __init__(self, module, command, arg_formats=None, default_args_order=(),
check_rc=False, force_lang="C", path_prefix=None, environ_update=None):
def __init__(
self,
module,
command,
arg_formats=None,
default_args_order=(),
check_rc=False,
force_lang="C",
path_prefix=None,
environ_update=None,
):
self.module = module
self.command = _ensure_list(command)
self.default_args_order = self._prepare_args_order(default_args_order)
@@ -101,7 +110,11 @@ class CmdRunner:
self.environ_update = environ_update
_cmd = self.command[0]
self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
self.command[0] = (
_cmd
if (os.path.isabs(_cmd) or "/" in _cmd)
else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
)
@property
def binary(self):
@@ -116,11 +129,14 @@ class CmdRunner:
for p in args_order:
if p not in self.arg_formats:
raise MissingArgumentFormat(p, args_order, tuple(self.arg_formats.keys()))
return _CmdRunnerContext(runner=self,
args_order=args_order,
output_process=output_process,
check_mode_skip=check_mode_skip,
check_mode_return=check_mode_return, **kwargs)
return _CmdRunnerContext(
runner=self,
args_order=args_order,
output_process=output_process,
check_mode_skip=check_mode_skip,
check_mode_return=check_mode_return,
**kwargs,
)
def has_arg_format(self, arg):
return arg in self.arg_formats
@@ -139,17 +155,19 @@ class _CmdRunnerContext:
self.run_command_args = dict(kwargs)
self.environ_update = runner.environ_update
self.environ_update.update(self.run_command_args.get('environ_update', {}))
self.environ_update.update(self.run_command_args.get("environ_update", {}))
if runner.force_lang:
self.environ_update.update({
'LANGUAGE': runner.force_lang,
'LC_ALL': runner.force_lang,
})
self.run_command_args['environ_update'] = self.environ_update
self.environ_update.update(
{
"LANGUAGE": runner.force_lang,
"LC_ALL": runner.force_lang,
}
)
self.run_command_args["environ_update"] = self.environ_update
if 'check_rc' not in self.run_command_args:
self.run_command_args['check_rc'] = runner.check_rc
self.check_rc = self.run_command_args['check_rc']
if "check_rc" not in self.run_command_args:
self.run_command_args["check_rc"] = runner.check_rc
self.check_rc = self.run_command_args["check_rc"]
self.cmd = None
self.results_rc = None

View File

@@ -46,7 +46,9 @@ def as_bool(args_true, args_false=None, ignore_none=None):
ignore_none = False
else:
args_false = []
return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none)
return _ArgFormat(
lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none
)
def as_bool_not(args):
@@ -73,6 +75,7 @@ def as_list(ignore_none=None, min_len=0, max_len=None):
if max_len is not None and len(value) > max_len:
raise ValueError(f"Parameter must have at most {max_len} element(s)")
return value
return _ArgFormat(func, ignore_none=ignore_none)
@@ -96,6 +99,7 @@ def unpack_args(func):
@wraps(func)
def wrapper(v):
return func(*v)
return wrapper
@@ -103,6 +107,7 @@ def unpack_kwargs(func):
@wraps(func)
def wrapper(v):
return func(**v)
return wrapper
@@ -115,7 +120,9 @@ def stack(fmt):
stack = [new_func(v) for v in value if v]
stack = [x for args in stack for x in args]
return stack
return _ArgFormat(stacking, ignore_none=True)
return wrapper

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2022, Håkon Lerring
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -65,12 +64,12 @@ def camel_case_key(key):
def validate_check(check):
validate_duration_keys = ['Interval', 'Ttl', 'Timeout']
validate_duration_keys = ["Interval", "Ttl", "Timeout"]
validate_tcp_regex = r"(?P<host>.*):(?P<port>(?:[0-9]+))$"
if check.get('Tcp') is not None:
match = re.match(validate_tcp_regex, check['Tcp'])
if check.get("Tcp") is not None:
match = re.match(validate_tcp_regex, check["Tcp"])
if not match:
raise Exception('tcp check must be in host:port format')
raise Exception("tcp check must be in host:port format")
for duration in validate_duration_keys:
if duration in check and check[duration] is not None:
check[duration] = validate_duration(check[duration])
@@ -99,12 +98,7 @@ def _normalize_params(params, arg_spec):
if k not in arg_spec or v is None: # Alias
continue
spec = arg_spec[k]
if (
spec.get("type") == "list"
and spec.get("elements") == "dict"
and spec.get("options")
and v
):
if spec.get("type") == "list" and spec.get("elements") == "dict" and spec.get("options") and v:
v = [_normalize_params(d, spec["options"]) for d in v]
elif spec.get("type") == "dict" and spec.get("options") and v:
v = _normalize_params(v, spec["options"])
@@ -130,9 +124,7 @@ class _ConsulModule:
self._module = module
self.params = _normalize_params(module.params, module.argument_spec)
self.api_params = {
k: camel_case_key(k)
for k in self.params
if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC
k: camel_case_key(k) for k in self.params if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC
}
self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"})
@@ -192,11 +184,9 @@ class _ConsulModule:
def needs_camel_case(k):
spec = self._module.argument_spec[k]
return (
spec.get("type") == "list"
and spec.get("elements") == "dict"
and spec.get("options")
) or (spec.get("type") == "dict" and spec.get("options"))
return (spec.get("type") == "list" and spec.get("elements") == "dict" and spec.get("options")) or (
spec.get("type") == "dict" and spec.get("options")
)
if k in self.api_params and v is not None:
if isinstance(v, dict) and needs_camel_case(k):
@@ -221,9 +211,7 @@ class _ConsulModule:
return False
def prepare_object(self, existing, obj):
existing = {
k: v for k, v in existing.items() if k not in self.operational_attributes
}
existing = {k: v for k, v in existing.items() if k not in self.operational_attributes}
for k, v in obj.items():
existing[k] = v
return existing
@@ -319,9 +307,7 @@ class _ConsulModule:
ca_path=ca_path,
)
response_data = response.read()
status = (
response.status if hasattr(response, "status") else response.getcode()
)
status = response.status if hasattr(response, "status") else response.getcode()
except urllib_error.URLError as e:
if isinstance(e, urllib_error.HTTPError):

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
# Copyright (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -27,11 +26,12 @@ def initialize_dialect(dialect, **kwargs):
# Add Unix dialect from Python 3
class unix_dialect(csv.Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
delimiter = ","
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
lineterminator = "\n"
quoting = csv.QUOTE_ALL
csv.register_dialect("unix", unix_dialect)
@@ -43,19 +43,19 @@ def initialize_dialect(dialect, **kwargs):
dialect_params = {k: v for k, v in kwargs.items() if v is not None}
if dialect_params:
try:
csv.register_dialect('custom', dialect, **dialect_params)
csv.register_dialect("custom", dialect, **dialect_params)
except TypeError as e:
raise CustomDialectFailureError(f"Unable to create custom dialect: {e}")
dialect = 'custom'
dialect = "custom"
return dialect
def read_csv(data, dialect, fieldnames=None):
BOM = to_native('\ufeff')
data = to_native(data, errors='surrogate_or_strict')
BOM = to_native("\ufeff")
data = to_native(data, errors="surrogate_or_strict")
if data.startswith(BOM):
data = data[len(BOM):]
data = data[len(BOM) :]
fake_fh = StringIO(data)

View File

@@ -18,13 +18,13 @@ import re
#
# 1. '"' in string and '--' in string or
# "'" in string and '--' in string
PATTERN_1 = re.compile(r'(\'|\").*--')
PATTERN_1 = re.compile(r"(\'|\").*--")
# 2. union \ intersect \ except + select
PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
PATTERN_2 = re.compile(r"(UNION|INTERSECT|EXCEPT).*SELECT", re.IGNORECASE)
# 3. ';' and any KEY_WORDS
PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
PATTERN_3 = re.compile(r";.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)", re.IGNORECASE)
class SQLParseError(Exception):
@@ -65,7 +65,7 @@ def _find_end_quote(identifier, quote_char):
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote + 2:]
identifier = identifier[quote + 2 :]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
@@ -75,7 +75,7 @@ def _find_end_quote(identifier, quote_char):
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
raise SQLParseError("Identifier name unspecified or unquoted trailing dot")
already_quoted = False
if identifier.startswith(quote_char):
@@ -86,20 +86,20 @@ def _identifier_parse(identifier, quote_char):
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote + 1] == '.':
if identifier[end_quote + 1] == ".":
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
next_identifier = identifier[dot + 1 :]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
raise SQLParseError("User escaped identifiers must escape extra quotes")
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
dot = identifier.index(".")
except ValueError:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = f"{quote_char}{identifier}{quote_char}"
@@ -111,7 +111,7 @@ def _identifier_parse(identifier, quote_char):
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
next_identifier = identifier[dot + 1 :]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
first_identifier = f"{quote_char}{first_identifier}{quote_char}"
@@ -123,23 +123,27 @@ def _identifier_parse(identifier, quote_char):
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError(f'PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots')
return '.'.join(identifier_fragments)
raise SQLParseError(
f"PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots"
)
return ".".join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
identifier_fragments = _identifier_parse(identifier, quote_char="`")
if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError(f'MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots')
raise SQLParseError(
f"MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots"
)
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
if fragment == "`*`":
special_cased_fragments.append("*")
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)
return ".".join(special_cased_fragments)
def is_input_dangerous(string):

View File

@@ -70,34 +70,30 @@ class DimensionDataModule:
self.module = module
if not HAS_LIBCLOUD:
self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("libcloud"), exception=LIBCLOUD_IMP_ERR)
# Credentials are common to all Dimension Data modules.
credentials = self.get_credentials()
self.user_id = credentials['user_id']
self.key = credentials['key']
self.user_id = credentials["user_id"]
self.key = credentials["key"]
# Region and location are common to all Dimension Data modules.
region = self.module.params['region']
self.region = f'dd-{region}'
self.location = self.module.params['location']
region = self.module.params["region"]
self.region = f"dd-{region}"
self.location = self.module.params["location"]
libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
libcloud.security.VERIFY_SSL_CERT = self.module.params["validate_certs"]
self.driver = get_driver(Provider.DIMENSIONDATA)(
self.user_id,
self.key,
region=self.region
)
self.driver = get_driver(Provider.DIMENSIONDATA)(self.user_id, self.key, region=self.region)
# Determine the MCP API version (this depends on the target datacenter).
self.mcp_version = self.get_mcp_version(self.location)
# Optional "wait-for-completion" arguments
if 'wait' in self.module.params:
self.wait = self.module.params['wait']
self.wait_time = self.module.params['wait_time']
self.wait_poll_interval = self.module.params['wait_poll_interval']
if "wait" in self.module.params:
self.wait = self.module.params["wait"]
self.wait_time = self.module.params["wait_time"]
self.wait_poll_interval = self.module.params["wait_poll_interval"]
else:
self.wait = False
self.wait_time = 0
@@ -122,29 +118,29 @@ class DimensionDataModule:
"""
if not HAS_LIBCLOUD:
self.module.fail_json(msg='libcloud is required for this module.')
self.module.fail_json(msg="libcloud is required for this module.")
user_id = None
key = None
# First, try the module configuration
if 'mcp_user' in self.module.params:
if 'mcp_password' not in self.module.params:
if "mcp_user" in self.module.params:
if "mcp_password" not in self.module.params:
self.module.fail_json(
msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
)
user_id = self.module.params['mcp_user']
key = self.module.params['mcp_password']
user_id = self.module.params["mcp_user"]
key = self.module.params["mcp_password"]
# Fall back to environment
if not user_id or not key:
user_id = os.environ.get('MCP_USER', None)
key = os.environ.get('MCP_PASSWORD', None)
user_id = os.environ.get("MCP_USER", None)
key = os.environ.get("MCP_PASSWORD", None)
# Finally, try dotfile (~/.dimensiondata)
if not user_id or not key:
home = expanduser('~')
home = expanduser("~")
config = configparser.RawConfigParser()
config.read(f"{home}/.dimensiondata")
@@ -171,9 +167,9 @@ class DimensionDataModule:
location = self.driver.ex_get_location_by_id(location)
if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
return '2.0'
return "2.0"
return '1.0'
return "1.0"
def get_network_domain(self, locator, location):
"""
@@ -184,7 +180,8 @@ class DimensionDataModule:
network_domain = self.driver.ex_get_network_domain(locator)
else:
matching_network_domains = [
network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
network_domain
for network_domain in self.driver.ex_list_network_domains(location=location)
if network_domain.name == locator
]
@@ -206,8 +203,7 @@ class DimensionDataModule:
vlan = self.driver.ex_get_vlan(locator)
else:
matching_vlans = [
vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
if vlan.name == locator
vlan for vlan in self.driver.ex_list_vlans(location, network_domain) if vlan.name == locator
]
if matching_vlans:
@@ -229,11 +225,11 @@ class DimensionDataModule:
"""
spec = dict(
region=dict(type='str', default='na'),
mcp_user=dict(type='str', required=False),
mcp_password=dict(type='str', required=False, no_log=True),
location=dict(type='str', required=True),
validate_certs=dict(type='bool', required=False, default=True)
region=dict(type="str", default="na"),
mcp_user=dict(type="str", required=False),
mcp_password=dict(type="str", required=False, no_log=True),
location=dict(type="str", required=True),
validate_certs=dict(type="bool", required=False, default=True),
)
if additional_argument_spec:
@@ -250,9 +246,9 @@ class DimensionDataModule:
"""
spec = DimensionDataModule.argument_spec(
wait=dict(type='bool', required=False, default=False),
wait_time=dict(type='int', required=False, default=600),
wait_poll_interval=dict(type='int', required=False, default=2)
wait=dict(type="bool", required=False, default=False),
wait_time=dict(type="int", required=False, default=600),
wait_poll_interval=dict(type="int", required=False, default=2),
)
if additional_argument_spec:
@@ -268,9 +264,7 @@ class DimensionDataModule:
:return: An array containing the argument specifications.
"""
required_together = [
['mcp_user', 'mcp_password']
]
required_together = [["mcp_user", "mcp_password"]]
if additional_required_together:
required_together.extend(additional_required_together)
@@ -319,7 +313,7 @@ def get_dd_regions():
all_regions = API_ENDPOINTS.keys()
# Only Dimension Data endpoints (no prefix)
regions = [region[3:] for region in all_regions if region.startswith('dd-')]
regions = [region[3:] for region in all_regions if region.startswith("dd-")]
return regions

View File

@@ -71,10 +71,10 @@ _django_std_arg_fmts: dict[str, ArgFormatType] = dict(
# keys can be used in _django_args
_args_menu = dict(
std=(django_std_args, _django_std_arg_fmts),
database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0
database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0
noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0
dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0
check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0
dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0
check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0
database_dash=(_database_dash, {}),
data=(_data, {}),
)
@@ -89,9 +89,17 @@ class _DjangoRunner(PythonRunner):
def __call__(self, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs):
args_order = (
("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order)
"command",
"no_color",
"settings",
"pythonpath",
"traceback",
"verbosity",
"skip_checks",
) + self._prepare_args_order(self.default_args_order)
return super().__call__(
args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs
)
return super().__call__(args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs)
def bare_context(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
@@ -106,9 +114,9 @@ class DjangoModuleHelper(ModuleHelper):
_check_mode_arg: str = ""
def __init__(self):
self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}),
self.arg_formats,
*(["std"] + self._django_args))
self.module["argument_spec"], self.arg_formats = self._build_args(
self.module.get("argument_spec", {}), self.arg_formats, *(["std"] + self._django_args)
)
super().__init__(self.module)
if self.django_admin_cmd is not None:
self.vars.command = self.django_admin_cmd
@@ -127,11 +135,13 @@ class DjangoModuleHelper(ModuleHelper):
return res_arg_spec, res_arg_fmts
def __run__(self):
runner = _DjangoRunner(self.module,
default_args_order=self.django_admin_arg_order,
arg_formats=self.arg_formats,
venv=self.vars.venv,
check_rc=True)
runner = _DjangoRunner(
self.module,
default_args_order=self.django_admin_arg_order,
arg_formats=self.arg_formats,
venv=self.vars.venv,
check_rc=True,
)
run_params = self.vars.as_dict()
if self._check_mode_arg:

View File

@@ -11,51 +11,42 @@ from ansible.module_utils.urls import fetch_url
class GandiLiveDNSAPI:
api_endpoint = 'https://api.gandi.net/v5/livedns'
api_endpoint = "https://api.gandi.net/v5/livedns"
changed = False
error_strings = {
400: 'Bad request',
401: 'Permission denied',
404: 'Resource not found',
400: "Bad request",
401: "Permission denied",
404: "Resource not found",
}
attribute_map = {
'record': 'rrset_name',
'type': 'rrset_type',
'ttl': 'rrset_ttl',
'values': 'rrset_values'
}
attribute_map = {"record": "rrset_name", "type": "rrset_type", "ttl": "rrset_ttl", "values": "rrset_values"}
def __init__(self, module):
self.module = module
self.api_key = module.params['api_key']
self.personal_access_token = module.params['personal_access_token']
self.api_key = module.params["api_key"]
self.personal_access_token = module.params["personal_access_token"]
def _build_error_message(self, module, info):
s = ''
body = info.get('body')
s = ""
body = info.get("body")
if body:
errors = module.from_json(body).get('errors')
errors = module.from_json(body).get("errors")
if errors:
error = errors[0]
name = error.get('name')
name = error.get("name")
if name:
s += f'{name} :'
description = error.get('description')
s += f"{name} :"
description = error.get("description")
if description:
s += description
return s
def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
def _gandi_api_call(self, api_call, method="GET", payload=None, error_on_404=True):
authorization_header = (
f'Bearer {self.personal_access_token}'
if self.personal_access_token
else f'Apikey {self.api_key}'
f"Bearer {self.personal_access_token}" if self.personal_access_token else f"Apikey {self.api_key}"
)
headers = {'Authorization': authorization_header,
'Content-Type': 'application/json'}
headers = {"Authorization": authorization_header, "Content-Type": "application/json"}
data = None
if payload:
try:
@@ -63,15 +54,11 @@ class GandiLiveDNSAPI:
except Exception as e:
self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ")
resp, info = fetch_url(self.module,
self.api_endpoint + api_call,
headers=headers,
data=data,
method=method)
resp, info = fetch_url(self.module, self.api_endpoint + api_call, headers=headers, data=data, method=method)
error_msg = ''
if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
err_s = self.error_strings.get(info['status'], '')
error_msg = ""
if info["status"] >= 400 and (info["status"] != 404 or error_on_404):
err_s = self.error_strings.get(info["status"], "")
error_msg = f"API Error {err_s}: {self._build_error_message(self.module, info)}"
@@ -83,14 +70,14 @@ class GandiLiveDNSAPI:
if content:
try:
result = json.loads(to_text(content, errors='surrogate_or_strict'))
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
result = json.loads(to_text(content, errors="surrogate_or_strict"))
except getattr(json, "JSONDecodeError", ValueError) as e:
error_msg += f"; Failed to parse API response with error {e}: {content}"
if error_msg:
self.module.fail_json(msg=error_msg)
return result, info['status']
return result, info["status"]
def build_result(self, result, domain):
if result is None:
@@ -100,11 +87,11 @@ class GandiLiveDNSAPI:
for k in self.attribute_map:
v = result.get(self.attribute_map[k], None)
if v is not None:
if k == 'record' and v == '@':
v = ''
if k == "record" and v == "@":
v = ""
res[k] = v
res['domain'] = domain
res["domain"] = domain
return res
@@ -114,11 +101,11 @@ class GandiLiveDNSAPI:
return [self.build_result(r, domain) for r in results]
def get_records(self, record, type, domain):
url = f'/domains/{domain}/records'
url = f"/domains/{domain}/records"
if record:
url += f'/{record}'
url += f"/{record}"
if type:
url += f'/{type}'
url += f"/{type}"
records, status = self._gandi_api_call(url, error_on_404=False)
@@ -130,44 +117,45 @@ class GandiLiveDNSAPI:
# filter by type if record is not set
if not record and type:
records = [r
for r in records
if r['rrset_type'] == type]
records = [r for r in records if r["rrset_type"] == type]
return records
def create_record(self, record, type, values, ttl, domain):
url = f'/domains/{domain}/records'
url = f"/domains/{domain}/records"
new_record = {
'rrset_name': record,
'rrset_type': type,
'rrset_values': values,
'rrset_ttl': ttl,
"rrset_name": record,
"rrset_type": type,
"rrset_values": values,
"rrset_ttl": ttl,
}
record, status = self._gandi_api_call(url, method='POST', payload=new_record)
record, status = self._gandi_api_call(url, method="POST", payload=new_record)
if status in (200, 201,):
if status in (
200,
201,
):
return new_record
return None
def update_record(self, record, type, values, ttl, domain):
url = f'/domains/{domain}/records/{record}/{type}'
url = f"/domains/{domain}/records/{record}/{type}"
new_record = {
'rrset_values': values,
'rrset_ttl': ttl,
"rrset_values": values,
"rrset_ttl": ttl,
}
record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
record = self._gandi_api_call(url, method="PUT", payload=new_record)[0]
return record
def delete_record(self, record, type, domain):
url = f'/domains/{domain}/records/{record}/{type}'
url = f"/domains/{domain}/records/{record}/{type}"
self._gandi_api_call(url, method='DELETE')
self._gandi_api_call(url, method="DELETE")
def delete_dns_record(self, record, type, values, domain):
if record == '':
record = '@'
if record == "":
record = "@"
records = self.get_records(record, type, domain)
@@ -176,11 +164,11 @@ class GandiLiveDNSAPI:
self.changed = True
if values is not None and set(cur_record['rrset_values']) != set(values):
new_values = set(cur_record['rrset_values']) - set(values)
if values is not None and set(cur_record["rrset_values"]) != set(values):
new_values = set(cur_record["rrset_values"]) - set(values)
if new_values:
# Removing one or more values from a record, we update the record with the remaining values
self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
self.update_record(record, type, list(new_values), cur_record["rrset_ttl"], domain)
records = self.get_records(record, type, domain)
return records[0], self.changed
@@ -192,8 +180,8 @@ class GandiLiveDNSAPI:
return None, self.changed
def ensure_dns_record(self, record, type, ttl, values, domain):
if record == '':
record = '@'
if record == "":
record = "@"
records = self.get_records(record, type, domain)
@@ -201,19 +189,14 @@ class GandiLiveDNSAPI:
cur_record = records[0]
do_update = False
if ttl is not None and cur_record['rrset_ttl'] != ttl:
if ttl is not None and cur_record["rrset_ttl"] != ttl:
do_update = True
if values is not None and set(cur_record['rrset_values']) != set(values):
if values is not None and set(cur_record["rrset_values"]) != set(values):
do_update = True
if do_update:
if self.module.check_mode:
result = dict(
rrset_type=type,
rrset_name=record,
rrset_values=values,
rrset_ttl=ttl
)
result = dict(rrset_type=type, rrset_name=record, rrset_values=values, rrset_ttl=ttl)
else:
self.update_record(record, type, values, ttl, domain)
@@ -225,12 +208,7 @@ class GandiLiveDNSAPI:
return cur_record, self.changed
if self.module.check_mode:
new_record = dict(
rrset_type=type,
rrset_name=record,
rrset_values=values,
rrset_ttl=ttl
)
new_record = dict(rrset_type=type, rrset_name=record, rrset_values=values, rrset_ttl=ttl)
result = new_record
else:
result = self.create_record(record, type, values, ttl, domain)

View File

@@ -17,7 +17,7 @@ _state_map = {
def gconftool2_runner(module, **kwargs):
return CmdRunner(
module,
command='gconftool-2',
command="gconftool-2",
arg_formats=dict(
state=cmd_runner_fmt.as_map(_state_map),
key=cmd_runner_fmt.as_list(),
@@ -27,5 +27,5 @@ def gconftool2_runner(module, **kwargs):
config_source=cmd_runner_fmt.as_opt_val("--config-source"),
version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
**kwargs,
)

View File

@@ -10,14 +10,14 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
def gio_mime_runner(module, **kwargs):
return CmdRunner(
module,
command=['gio'],
command=["gio"],
arg_formats=dict(
mime=cmd_runner_fmt.as_fixed('mime'),
mime=cmd_runner_fmt.as_fixed("mime"),
mime_type=cmd_runner_fmt.as_list(),
handler=cmd_runner_fmt.as_list(),
version=cmd_runner_fmt.as_fixed('--version'),
version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
**kwargs,
)

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -19,20 +18,21 @@ import traceback
def _determine_list_all_kwargs(version) -> dict[str, t.Any]:
gitlab_version = LooseVersion(version)
if gitlab_version >= LooseVersion('4.0.0'):
if gitlab_version >= LooseVersion("4.0.0"):
# 4.0.0 removed 'as_list'
return {'iterator': True, 'per_page': 100}
elif gitlab_version >= LooseVersion('3.7.0'):
return {"iterator": True, "per_page": 100}
elif gitlab_version >= LooseVersion("3.7.0"):
# 3.7.0 added 'get_all'
return {'as_list': False, 'get_all': True, 'per_page': 100}
return {"as_list": False, "get_all": True, "per_page": 100}
else:
return {'as_list': False, 'all': True, 'per_page': 100}
return {"as_list": False, "all": True, "per_page": 100}
GITLAB_IMP_ERR: str | None = None
try:
import gitlab
import requests
HAS_GITLAB_PACKAGE = True
list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__)
except Exception:
@@ -43,12 +43,12 @@ except Exception:
def auth_argument_spec(spec=None):
arg_spec = (dict(
ca_path=dict(type='str'),
api_token=dict(type='str', no_log=True),
api_oauth_token=dict(type='str', no_log=True),
api_job_token=dict(type='str', no_log=True),
))
arg_spec = dict(
ca_path=dict(type="str"),
api_token=dict(type="str", no_log=True),
api_oauth_token=dict(type="str", no_log=True),
api_job_token=dict(type="str", no_log=True),
)
if spec:
arg_spec.update(spec)
return arg_spec
@@ -79,28 +79,30 @@ def find_group(gitlab_instance, identifier):
def ensure_gitlab_package(module, min_version=None):
if not HAS_GITLAB_PACKAGE:
module.fail_json(
msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'),
exception=GITLAB_IMP_ERR
msg=missing_required_lib("python-gitlab", url="https://python-gitlab.readthedocs.io/en/stable/"),
exception=GITLAB_IMP_ERR,
)
gitlab_version = gitlab.__version__
if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version):
module.fail_json(msg=(
f"This module requires python-gitlab Python module >= {min_version} (installed version: "
f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above."
))
module.fail_json(
msg=(
f"This module requires python-gitlab Python module >= {min_version} (installed version: "
f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above."
)
)
def gitlab_authentication(module, min_version=None):
ensure_gitlab_package(module, min_version=min_version)
gitlab_url = module.params['api_url']
validate_certs = module.params['validate_certs']
ca_path = module.params['ca_path']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
gitlab_oauth_token = module.params['api_oauth_token']
gitlab_job_token = module.params['api_job_token']
gitlab_url = module.params["api_url"]
validate_certs = module.params["validate_certs"]
ca_path = module.params["ca_path"]
gitlab_user = module.params["api_username"]
gitlab_password = module.params["api_password"]
gitlab_token = module.params["api_token"]
gitlab_oauth_token = module.params["api_oauth_token"]
gitlab_job_token = module.params["api_job_token"]
verify = ca_path if validate_certs and ca_path else validate_certs
@@ -108,21 +110,29 @@ def gitlab_authentication(module, min_version=None):
# We can create an oauth_token using a username and password
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
if gitlab_user:
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
data = {"grant_type": "password", "username": gitlab_user, "password": gitlab_password}
resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify)
resp_data = resp.json()
gitlab_oauth_token = resp_data["access_token"]
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token,
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
gitlab_instance = gitlab.Gitlab(
url=gitlab_url,
ssl_verify=verify,
private_token=gitlab_token,
oauth_token=gitlab_oauth_token,
job_token=gitlab_job_token,
api_version=4,
)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg=f"Failed to connect to GitLab server: {e}")
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg=(
f"Failed to connect to GitLab server: {e}. GitLab remove Session API now "
"that private tokens are removed from user API endpoints since version 10.2."
))
except gitlab.exceptions.GitlabHttpError as e:
module.fail_json(
msg=(
f"Failed to connect to GitLab server: {e}. GitLab remove Session API now "
"that private tokens are removed from user API endpoints since version 10.2."
)
)
return gitlab_instance
@@ -130,7 +140,17 @@ def gitlab_authentication(module, min_version=None):
def filter_returned_variables(gitlab_variables):
# pop properties we don't know
existing_variables = [dict(x.attributes) for x in gitlab_variables]
KNOWN = ['key', 'value', 'description', 'masked', 'hidden', 'protected', 'variable_type', 'environment_scope', 'raw']
KNOWN = [
"key",
"value",
"description",
"masked",
"hidden",
"protected",
"variable_type",
"environment_scope",
"raw",
]
for item in existing_variables:
for key in list(item.keys()):
if key not in KNOWN:
@@ -159,17 +179,17 @@ def vars_to_variables(vars, module):
elif isinstance(value, dict):
new_item = {
"name": item,
"value": value.get('value'),
"description": value.get('description'),
"masked": value.get('masked'),
"hidden": value.get('hidden'),
"protected": value.get('protected'),
"raw": value.get('raw'),
"variable_type": value.get('variable_type'),
"value": value.get("value"),
"description": value.get("description"),
"masked": value.get("masked"),
"hidden": value.get("hidden"),
"protected": value.get("protected"),
"raw": value.get("raw"),
"variable_type": value.get("variable_type"),
}
if value.get('environment_scope'):
new_item['environment_scope'] = value.get('environment_scope')
if value.get("environment_scope"):
new_item["environment_scope"] = value.get("environment_scope")
variables.append(new_item)

View File

@@ -12,12 +12,13 @@ HAS_HEROKU = False
HEROKU_IMP_ERR = None
try:
import heroku3
HAS_HEROKU = True
except ImportError:
HEROKU_IMP_ERR = traceback.format_exc()
class HerokuHelper():
class HerokuHelper:
def __init__(self, module):
self.module = module
self.check_lib()
@@ -25,17 +26,18 @@ class HerokuHelper():
def check_lib(self):
if not HAS_HEROKU:
self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("heroku3"), exception=HEROKU_IMP_ERR)
@staticmethod
def heroku_argument_spec():
return dict(
api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
api_key=dict(fallback=(env_fallback, ["HEROKU_API_KEY", "TF_VAR_HEROKU_API_KEY"]), type="str", no_log=True)
)
def get_heroku_client(self):
client = heroku3.from_key(self.api_key)
if not client.is_authenticated:
self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
self.module.fail_json(msg="Heroku authentication failure, please check your API Key")
return client

View File

@@ -92,9 +92,7 @@ class HomebrewValidate:
if brew_path is None:
return True
return isinstance(
brew_path, str
) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
return isinstance(brew_path, str) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
@classmethod
def valid_package(cls, package):
@@ -103,9 +101,7 @@ class HomebrewValidate:
if package is None:
return True
return isinstance(
package, str
) and not cls.INVALID_PACKAGE_REGEX.search(package)
return isinstance(package, str) and not cls.INVALID_PACKAGE_REGEX.search(package)
def parse_brew_path(module):

View File

@@ -13,13 +13,13 @@ try:
from keystoneauth1.adapter import Adapter
from keystoneauth1.identity import v3
from keystoneauth1 import session
HAS_THIRD_LIBRARIES = True
except ImportError:
THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
HAS_THIRD_LIBRARIES = False
from ansible.module_utils.basic import (AnsibleModule, env_fallback,
missing_required_lib)
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common.text.converters import to_text
@@ -59,21 +59,19 @@ def session_method_wrapper(f):
url = self.endpoint + url
r = f(self, url, *args, **kwargs)
except Exception as ex:
raise HwcClientException(
0, f"Sending request failed, error={ex}")
raise HwcClientException(0, f"Sending request failed, error={ex}")
result = None
if r.content:
try:
result = r.json()
except Exception as ex:
raise HwcClientException(
0, f"Parsing response to json failed, error: {ex}")
raise HwcClientException(0, f"Parsing response to json failed, error: {ex}")
code = r.status_code
if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
msg = ""
for i in ['message', 'error.message']:
for i in ["message", "error.message"]:
try:
msg = navigate_value(result, i)
break
@@ -97,8 +95,8 @@ class _ServiceClient:
self._client = client
self._endpoint = endpoint
self._default_header = {
'User-Agent': f"Huawei-Ansible-MM-{product}",
'Accept': 'application/json',
"User-Agent": f"Huawei-Ansible-MM-{product}",
"Accept": "application/json",
}
@property
@@ -111,23 +109,19 @@ class _ServiceClient:
@session_method_wrapper
def get(self, url, body=None, header=None, timeout=None):
return self._client.get(url, json=body, timeout=timeout,
headers=self._header(header))
return self._client.get(url, json=body, timeout=timeout, headers=self._header(header))
@session_method_wrapper
def post(self, url, body=None, header=None, timeout=None):
return self._client.post(url, json=body, timeout=timeout,
headers=self._header(header))
return self._client.post(url, json=body, timeout=timeout, headers=self._header(header))
@session_method_wrapper
def delete(self, url, body=None, header=None, timeout=None):
return self._client.delete(url, json=body, timeout=timeout,
headers=self._header(header))
return self._client.delete(url, json=body, timeout=timeout, headers=self._header(header))
@session_method_wrapper
def put(self, url, body=None, header=None, timeout=None):
return self._client.put(url, json=body, timeout=timeout,
headers=self._header(header))
return self._client.put(url, json=body, timeout=timeout, headers=self._header(header))
def _header(self, header):
if header and isinstance(header, dict):
@@ -167,22 +161,18 @@ class Config:
def _gen_provider_client(self):
m = self._module
p = {
"auth_url": m.params['identity_endpoint'],
"password": m.params['password'],
"username": m.params['user'],
"project_name": m.params['project'],
"user_domain_name": m.params['domain'],
"reauthenticate": True
"auth_url": m.params["identity_endpoint"],
"password": m.params["password"],
"username": m.params["user"],
"project_name": m.params["project"],
"user_domain_name": m.params["domain"],
"reauthenticate": True,
}
self._project_client = Adapter(
session.Session(auth=v3.Password(**p)),
raise_exc=False)
self._project_client = Adapter(session.Session(auth=v3.Password(**p)), raise_exc=False)
p.pop("project_name")
self._domain_client = Adapter(
session.Session(auth=v3.Password(**p)),
raise_exc=False)
self._domain_client = Adapter(session.Session(auth=v3.Password(**p)), raise_exc=False)
def _get_service_endpoint(self, client, service_type, region):
k = f"{service_type}.{region if region else ''}"
@@ -192,15 +182,12 @@ class Config:
url = None
try:
url = client.get_endpoint(service_type=service_type,
region_name=region, interface="public")
url = client.get_endpoint(service_type=service_type, region_name=region, interface="public")
except Exception as ex:
raise HwcClientException(
0, f"Getting endpoint failed, error={ex}")
raise HwcClientException(0, f"Getting endpoint failed, error={ex}")
if url == "":
raise HwcClientException(
0, f"Cannot find the endpoint for {service_type}")
raise HwcClientException(0, f"Cannot find the endpoint for {service_type}")
if url[-1] != "/":
url += "/"
@@ -210,42 +197,46 @@ class Config:
def _validate(self):
if not HAS_THIRD_LIBRARIES:
self.module.fail_json(
msg=missing_required_lib('keystoneauth1'),
exception=THIRD_LIBRARIES_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("keystoneauth1"), exception=THIRD_LIBRARIES_IMP_ERR)
class HwcModule(AnsibleModule):
def __init__(self, *args, **kwargs):
arg_spec = kwargs.setdefault('argument_spec', {})
arg_spec = kwargs.setdefault("argument_spec", {})
arg_spec.update(
dict(
identity_endpoint=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
required=True,
type="str",
fallback=(env_fallback, ["ANSIBLE_HWC_IDENTITY_ENDPOINT"]),
),
user=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
required=True,
type="str",
fallback=(env_fallback, ["ANSIBLE_HWC_USER"]),
),
password=dict(
required=True, type='str', no_log=True,
fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
required=True,
type="str",
no_log=True,
fallback=(env_fallback, ["ANSIBLE_HWC_PASSWORD"]),
),
domain=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
required=True,
type="str",
fallback=(env_fallback, ["ANSIBLE_HWC_DOMAIN"]),
),
project=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
required=True,
type="str",
fallback=(env_fallback, ["ANSIBLE_HWC_PROJECT"]),
),
region=dict(
type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
type="str",
fallback=(env_fallback, ["ANSIBLE_HWC_REGION"]),
),
id=dict(type='str')
id=dict(type="str"),
)
)
@@ -253,14 +244,14 @@ class HwcModule(AnsibleModule):
class _DictComparison:
''' This class takes in two dictionaries `a` and `b`.
These are dictionaries of arbitrary depth, but made up of standard
Python types only.
This differ will compare all values in `a` to those in `b`.
If value in `a` is None, always returns True, indicating
this value is no need to compare.
Note: On all lists, order does matter.
'''
"""This class takes in two dictionaries `a` and `b`.
These are dictionaries of arbitrary depth, but made up of standard
Python types only.
This differ will compare all values in `a` to those in `b`.
If value in `a` is None, always returns True, indicating
this value is no need to compare.
Note: On all lists, order does matter.
"""
def __init__(self, request):
self.request = request
@@ -316,8 +307,7 @@ class _DictComparison:
return self._compare_dicts(value1, value2)
# Always use to_text values to avoid unicode issues.
return (to_text(value1, errors='surrogate_or_strict') == to_text(
value2, errors='surrogate_or_strict'))
return to_text(value1, errors="surrogate_or_strict") == to_text(value2, errors="surrogate_or_strict")
def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
@@ -338,8 +328,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
not_found_times += 1
if not_found_times > 10:
raise HwcModuleException(
f"not found the object for {not_found_times} times")
raise HwcModuleException(f"not found the object for {not_found_times} times")
else:
not_found_times = 0
@@ -347,8 +336,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
return obj
if pending and status not in pending:
raise HwcModuleException(
f"unexpected status({status}) occurred")
raise HwcModuleException(f"unexpected status({status}) occurred")
if not is_last_time:
wait *= 2
@@ -372,13 +360,11 @@ def navigate_value(data, index, array_index=None):
return None
if not isinstance(d, dict):
raise HwcModuleException(
"can't navigate value from a non-dict object")
raise HwcModuleException("can't navigate value from a non-dict object")
i = index[n]
if i not in d:
raise HwcModuleException(
f"navigate value failed: key({i}) is not exist in dict")
raise HwcModuleException(f"navigate value failed: key({i}) is not exist in dict")
d = d[i]
if not array_index:
@@ -392,13 +378,11 @@ def navigate_value(data, index, array_index=None):
return None
if not isinstance(d, list):
raise HwcModuleException(
"can't navigate value from a non-list object")
raise HwcModuleException("can't navigate value from a non-list object")
j = array_index.get(k)
if j >= len(d):
raise HwcModuleException(
"navigate value failed: the index is out of list")
raise HwcModuleException("navigate value failed: the index is out of list")
d = d[j]
return d
@@ -425,14 +409,14 @@ def build_path(module, path, kv=None):
def get_region(module):
if module.params['region']:
return module.params['region']
if module.params["region"]:
return module.params["region"]
return module.params['project'].split("_")[0]
return module.params["project"].split("_")[0]
def is_empty_value(v):
return (not v)
return not v
def are_different_dicts(dict1, dict2):

View File

@@ -20,48 +20,61 @@ except ImportError:
PYXCLI_IMP_ERR = traceback.format_exc()
PYXCLI_INSTALLED = False
AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
'domain', 'perf_class', 'vol',
'iscsi_chap_name', 'iscsi_chap_secret',
'cluster', 'host', 'lun', 'override',
'fcaddress', 'iscsi_name', 'max_dms',
'max_cgs', 'ldap_id', 'max_mirrors',
'max_pools', 'max_volumes', 'hard_capacity',
'soft_capacity']
AVAILABLE_PYXCLI_FIELDS = [
"pool",
"size",
"snapshot_size",
"domain",
"perf_class",
"vol",
"iscsi_chap_name",
"iscsi_chap_secret",
"cluster",
"host",
"lun",
"override",
"fcaddress",
"iscsi_name",
"max_dms",
"max_cgs",
"ldap_id",
"max_mirrors",
"max_pools",
"max_volumes",
"hard_capacity",
"soft_capacity",
]
def xcli_wrapper(func):
""" Catch xcli errors and return a proper message"""
"""Catch xcli errors and return a proper message"""
@wraps(func)
def wrapper(module, *args, **kwargs):
try:
return func(module, *args, **kwargs)
except errors.CommandExecutionError as e:
module.fail_json(msg=to_native(e))
return wrapper
@xcli_wrapper
def connect_ssl(module):
endpoints = module.params['endpoints']
username = module.params['username']
password = module.params['password']
endpoints = module.params["endpoints"]
username = module.params["username"]
password = module.params["password"]
if not (username and password and endpoints):
module.fail_json(
msg="Username, password or endpoints arguments "
"are missing from the module arguments")
module.fail_json(msg="Username, password or endpoints arguments are missing from the module arguments")
try:
return client.XCLIClient.connect_multiendpoint_ssl(username,
password,
endpoints)
return client.XCLIClient.connect_multiendpoint_ssl(username, password, endpoints)
except errors.CommandFailedConnectionError as e:
module.fail_json(
msg=f"Connection with Spectrum Accelerate system has failed: {e}.")
module.fail_json(msg=f"Connection with Spectrum Accelerate system has failed: {e}.")
def spectrum_accelerate_spec():
""" Return arguments spec for AnsibleModule """
"""Return arguments spec for AnsibleModule"""
return dict(
endpoints=dict(required=True),
username=dict(required=True),
@@ -77,17 +90,16 @@ def execute_pyxcli_command(module, xcli_command, xcli_client):
def build_pyxcli_command(fields):
""" Builds the args for pyxcli using the exact args from ansible"""
"""Builds the args for pyxcli using the exact args from ansible"""
pyxcli_args = {}
for field in fields:
if not fields[field]:
continue
if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != "":
pyxcli_args[field] = fields[field]
return pyxcli_args
def is_pyxcli_installed(module):
if not PYXCLI_INSTALLED:
module.fail_json(msg=missing_required_lib('pyxcli'),
exception=PYXCLI_IMP_ERR)
module.fail_json(msg=missing_required_lib("pyxcli"), exception=PYXCLI_IMP_ERR)

File diff suppressed because it is too large Load Diff

View File

@@ -8,8 +8,7 @@ from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
keycloak_argument_spec
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import keycloak_argument_spec
def keycloak_clientsecret_module():
@@ -22,9 +21,9 @@ def keycloak_clientsecret_module():
argument_spec = keycloak_argument_spec()
meta_args = dict(
realm=dict(default='master'),
id=dict(type='str'),
client_id=dict(type='str', aliases=['clientId']),
realm=dict(default="master"),
id=dict(type="str"),
client_id=dict(type="str", aliases=["clientId"]),
)
argument_spec.update(meta_args)
@@ -32,14 +31,15 @@ def keycloak_clientsecret_module():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'client_id'],
['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
required_together=([['auth_username', 'auth_password']]),
mutually_exclusive=[
['token', 'auth_realm'],
['token', 'auth_username'],
['token', 'auth_password']
])
required_one_of=(
[
["id", "client_id"],
["token", "auth_realm", "auth_username", "auth_password", "auth_client_id", "auth_client_secret"],
]
),
required_together=([["auth_username", "auth_password"]]),
mutually_exclusive=[["token", "auth_realm"], ["token", "auth_username"], ["token", "auth_password"]],
)
return module
@@ -54,9 +54,9 @@ def keycloak_clientsecret_module_resolve_params(module, kc):
:return: tuple of id, realm
"""
realm = module.params.get('realm')
id = module.params.get('id')
client_id = module.params.get('client_id')
realm = module.params.get("realm")
id = module.params.get("id")
client_id = module.params.get("client_id")
# only lookup the client_id if id isn't provided.
# in the case that both are provided, prefer the ID, since it is one
@@ -66,10 +66,8 @@ def keycloak_clientsecret_module_resolve_params(module, kc):
client = kc.get_client_by_clientid(client_id, realm=realm)
if client is None:
module.fail_json(
msg=f'Client does not exist {client_id}'
)
module.fail_json(msg=f"Client does not exist {client_id}")
id = client['id']
id = client["id"]
return id, realm

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -10,38 +9,37 @@ import time
class iLORedfishUtils(RedfishUtils):
def get_ilo_sessions(self):
result = {}
# listing all users has always been slower than other operations, why?
session_list = []
sessions_results = []
# Get these entries, but does not fail if not found
properties = ['Description', 'Id', 'Name', 'UserName']
properties = ["Description", "Id", "Name", "UserName"]
# Changed self.sessions_uri to Hardcoded string.
response = self.get_request(f"{self.root_uri}{self.service_root}SessionService/Sessions/")
if not response['ret']:
if not response["ret"]:
return response
result['ret'] = True
data = response['data']
result["ret"] = True
data = response["data"]
current_session = None
if 'Oem' in data:
if "Oem" in data:
if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
for sessions in data['Members']:
for sessions in data["Members"]:
# session_list[] are URIs
session_list.append(sessions['@odata.id'])
session_list.append(sessions["@odata.id"])
# for each session, get details
for uri in session_list:
session = {}
if uri != current_session:
response = self.get_request(self.root_uri + uri)
if not response['ret']:
if not response["ret"]:
return response
data = response['data']
data = response["data"]
for property in properties:
if property in data:
session[property] = data[property]
@@ -52,41 +50,37 @@ class iLORedfishUtils(RedfishUtils):
def set_ntp_server(self, mgr_attributes):
result = {}
setkey = mgr_attributes['mgr_attr_name']
setkey = mgr_attributes["mgr_attr_name"]
nic_info = self.get_manager_ethernet_uri()
ethuri = nic_info["nic_addr"]
response = self.get_request(self.root_uri + ethuri)
if not response['ret']:
if not response["ret"]:
return response
result['ret'] = True
data = response['data']
payload = {"DHCPv4": {
"UseNTPServers": ""
}}
result["ret"] = True
data = response["data"]
payload = {"DHCPv4": {"UseNTPServers": ""}}
if data["DHCPv4"]["UseNTPServers"]:
payload["DHCPv4"]["UseNTPServers"] = False
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
if not res_dhv4['ret']:
if not res_dhv4["ret"]:
return res_dhv4
payload = {"DHCPv6": {
"UseNTPServers": ""
}}
payload = {"DHCPv6": {"UseNTPServers": ""}}
if data["DHCPv6"]["UseNTPServers"]:
payload["DHCPv6"]["UseNTPServers"] = False
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
if not res_dhv6['ret']:
if not res_dhv6["ret"]:
return res_dhv6
datetime_uri = f"{self.manager_uri}DateTime"
listofips = mgr_attributes['mgr_attr_value'].split(" ")
listofips = mgr_attributes["mgr_attr_value"].split(" ")
if len(listofips) > 2:
return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"}
return {"ret": False, "changed": False, "msg": "More than 2 NTP Servers mentioned"}
ntp_list = []
for ips in listofips:
@@ -98,46 +92,46 @@ class iLORedfishUtils(RedfishUtils):
payload = {setkey: ntp_list}
response1 = self.patch_request(self.root_uri + datetime_uri, payload)
if not response1['ret']:
if not response1["ret"]:
return response1
return {'ret': True, 'changed': True, 'msg': f"Modified {mgr_attributes['mgr_attr_name']}"}
return {"ret": True, "changed": True, "msg": f"Modified {mgr_attributes['mgr_attr_name']}"}
def set_time_zone(self, attr):
key = attr['mgr_attr_name']
key = attr["mgr_attr_name"]
uri = f"{self.manager_uri}DateTime/"
response = self.get_request(self.root_uri + uri)
if not response['ret']:
if not response["ret"]:
return response
data = response["data"]
if key not in data:
return {'ret': False, 'changed': False, 'msg': f"Key {key} not found"}
return {"ret": False, "changed": False, "msg": f"Key {key} not found"}
timezones = data["TimeZoneList"]
index = ""
for tz in timezones:
if attr['mgr_attr_value'] in tz["Name"]:
if attr["mgr_attr_value"] in tz["Name"]:
index = tz["Index"]
break
payload = {key: {"Index": index}}
response = self.patch_request(self.root_uri + uri, payload)
if not response['ret']:
if not response["ret"]:
return response
return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"}
return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"}
def set_dns_server(self, attr):
key = attr['mgr_attr_name']
key = attr["mgr_attr_name"]
nic_info = self.get_manager_ethernet_uri()
uri = nic_info["nic_addr"]
listofips = attr['mgr_attr_value'].split(" ")
listofips = attr["mgr_attr_value"].split(" ")
if len(listofips) > 3:
return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"}
return {"ret": False, "changed": False, "msg": "More than 3 DNS Servers mentioned"}
dns_list = []
for ips in listofips:
@@ -146,87 +140,63 @@ class iLORedfishUtils(RedfishUtils):
while len(dns_list) < 3:
dns_list.append("0.0.0.0")
payload = {
"Oem": {
"Hpe": {
"IPv4": {
key: dns_list
}
}
}
}
payload = {"Oem": {"Hpe": {"IPv4": {key: dns_list}}}}
response = self.patch_request(self.root_uri + uri, payload)
if not response['ret']:
if not response["ret"]:
return response
return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"}
return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"}
def set_domain_name(self, attr):
key = attr['mgr_attr_name']
key = attr["mgr_attr_name"]
nic_info = self.get_manager_ethernet_uri()
ethuri = nic_info["nic_addr"]
response = self.get_request(self.root_uri + ethuri)
if not response['ret']:
if not response["ret"]:
return response
data = response['data']
data = response["data"]
payload = {"DHCPv4": {
"UseDomainName": ""
}}
payload = {"DHCPv4": {"UseDomainName": ""}}
if data["DHCPv4"]["UseDomainName"]:
payload["DHCPv4"]["UseDomainName"] = False
res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
if not res_dhv4['ret']:
if not res_dhv4["ret"]:
return res_dhv4
payload = {"DHCPv6": {
"UseDomainName": ""
}}
payload = {"DHCPv6": {"UseDomainName": ""}}
if data["DHCPv6"]["UseDomainName"]:
payload["DHCPv6"]["UseDomainName"] = False
res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
if not res_dhv6['ret']:
if not res_dhv6["ret"]:
return res_dhv6
domain_name = attr['mgr_attr_value']
domain_name = attr["mgr_attr_value"]
payload = {"Oem": {
"Hpe": {
key: domain_name
}
}}
payload = {"Oem": {"Hpe": {key: domain_name}}}
response = self.patch_request(self.root_uri + ethuri, payload)
if not response['ret']:
if not response["ret"]:
return response
return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"}
return {"ret": True, "changed": True, "msg": f"Modified {attr['mgr_attr_name']}"}
def set_wins_registration(self, mgrattr):
Key = mgrattr['mgr_attr_name']
Key = mgrattr["mgr_attr_name"]
nic_info = self.get_manager_ethernet_uri()
ethuri = nic_info["nic_addr"]
payload = {
"Oem": {
"Hpe": {
"IPv4": {
Key: False
}
}
}
}
payload = {"Oem": {"Hpe": {"IPv4": {Key: False}}}}
response = self.patch_request(self.root_uri + ethuri, payload)
if not response['ret']:
if not response["ret"]:
return response
return {'ret': True, 'changed': True, 'msg': f"Modified {mgrattr['mgr_attr_name']}"}
return {"ret": True, "changed": True, "msg": f"Modified {mgrattr['mgr_attr_name']}"}
def get_server_poststate(self):
# Get server details
@@ -236,15 +206,9 @@ class iLORedfishUtils(RedfishUtils):
server_data = response["data"]
if "Hpe" in server_data["Oem"]:
return {
"ret": True,
"server_poststate": server_data["Oem"]["Hpe"]["PostState"]
}
return {"ret": True, "server_poststate": server_data["Oem"]["Hpe"]["PostState"]}
else:
return {
"ret": True,
"server_poststate": server_data["Oem"]["Hp"]["PostState"]
}
return {"ret": True, "server_poststate": server_data["Oem"]["Hp"]["PostState"]}
def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
# This method checks if OOB controller reboot is completed
@@ -270,19 +234,11 @@ class iLORedfishUtils(RedfishUtils):
break
pcount = pcount + 1
if state["server_poststate"] in ["PowerOff", "Off"]:
return {
"ret": False,
"changed": False,
"msg": "Server is powered OFF"
}
return {"ret": False, "changed": False, "msg": "Server is powered OFF"}
# When server is not rebooting
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
return {
"ret": True,
"changed": False,
"msg": "Server is not rebooting"
}
return {"ret": True, "changed": False, "msg": "Server is not rebooting"}
while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
state = self.get_server_poststate()
@@ -290,16 +246,8 @@ class iLORedfishUtils(RedfishUtils):
return state
if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
return {
"ret": True,
"changed": True,
"msg": "Server reboot is completed"
}
return {"ret": True, "changed": True, "msg": "Server reboot is completed"}
time.sleep(polling_interval)
times = times + 1
return {
"ret": False,
"changed": False,
"msg": f"Server Reboot has failed, server state: {state} "
}
return {"ret": False, "changed": False, "msg": f"Server Reboot has failed, server state: {state} "}

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2017, Ansible Project
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
@@ -14,6 +13,7 @@ from ansible_collections.community.general.plugins.module_utils.version import L
REQUESTS_IMP_ERR = None
try:
import requests.exceptions # noqa: F401, pylint: disable=unused-import
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
@@ -24,46 +24,47 @@ try:
from influxdb import InfluxDBClient
from influxdb import __version__ as influxdb_version
from influxdb import exceptions # noqa: F401, pylint: disable=unused-import
HAS_INFLUXDB = True
except ImportError:
INFLUXDB_IMP_ERR = traceback.format_exc()
HAS_INFLUXDB = False
class InfluxDb():
class InfluxDb:
def __init__(self, module):
self.module = module
self.params = self.module.params
self.check_lib()
self.hostname = self.params['hostname']
self.port = self.params['port']
self.path = self.params['path']
self.username = self.params['username']
self.password = self.params['password']
self.database_name = self.params.get('database_name')
self.hostname = self.params["hostname"]
self.port = self.params["port"]
self.path = self.params["path"]
self.username = self.params["username"]
self.password = self.params["password"]
self.database_name = self.params.get("database_name")
def check_lib(self):
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
if not HAS_INFLUXDB:
self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("influxdb"), exception=INFLUXDB_IMP_ERR)
@staticmethod
def influxdb_argument_spec():
return dict(
hostname=dict(type='str', default='localhost'),
port=dict(type='int', default=8086),
path=dict(type='str', default=''),
username=dict(type='str', default='root', aliases=['login_username']),
password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
ssl=dict(type='bool', default=False),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int'),
retries=dict(type='int', default=3),
proxies=dict(type='dict', default={}),
use_udp=dict(type='bool', default=False),
udp_port=dict(type='int', default=4444),
hostname=dict(type="str", default="localhost"),
port=dict(type="int", default=8086),
path=dict(type="str", default=""),
username=dict(type="str", default="root", aliases=["login_username"]),
password=dict(type="str", default="root", no_log=True, aliases=["login_password"]),
ssl=dict(type="bool", default=False),
validate_certs=dict(type="bool", default=True),
timeout=dict(type="int"),
retries=dict(type="int", default=3),
proxies=dict(type="dict", default={}),
use_udp=dict(type="bool", default=False),
udp_port=dict(type="int", default=4444),
)
def connect_to_influxdb(self):
@@ -73,19 +74,19 @@ class InfluxDb():
username=self.username,
password=self.password,
database=self.database_name,
ssl=self.params['ssl'],
verify_ssl=self.params['validate_certs'],
timeout=self.params['timeout'],
use_udp=self.params['use_udp'],
udp_port=self.params['udp_port'],
proxies=self.params['proxies'],
ssl=self.params["ssl"],
verify_ssl=self.params["validate_certs"],
timeout=self.params["timeout"],
use_udp=self.params["use_udp"],
udp_port=self.params["udp_port"],
proxies=self.params["proxies"],
)
influxdb_api_version = LooseVersion(influxdb_version)
if influxdb_api_version >= LooseVersion('4.1.0'):
if influxdb_api_version >= LooseVersion("4.1.0"):
# retries option is added in version 4.1.0
args.update(retries=self.params['retries'])
args.update(retries=self.params["retries"])
if influxdb_api_version >= LooseVersion('5.1.0'):
if influxdb_api_version >= LooseVersion("5.1.0"):
# path argument is added in version 5.1.0
args.update(path=self.path)

View File

@@ -24,10 +24,10 @@ from urllib.parse import quote
def _env_then_dns_fallback(*args, **kwargs):
''' Load value from environment or DNS in that order'''
"""Load value from environment or DNS in that order"""
try:
result = env_fallback(*args, **kwargs)
if result == '':
if result == "":
raise AnsibleFallbackNotFound
return result
except AnsibleFallbackNotFound:
@@ -35,7 +35,7 @@ def _env_then_dns_fallback(*args, **kwargs):
# The ipa-ca entry is a standard entry that IPA will have set for
# the CA.
try:
return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
return socket.gethostbyaddr(socket.gethostbyname("ipa-ca"))[0]
except Exception:
raise AnsibleFallbackNotFound
@@ -47,61 +47,67 @@ class IPAClient:
self.protocol = protocol
self.module = module
self.headers = None
self.timeout = module.params.get('ipa_timeout')
self.timeout = module.params.get("ipa_timeout")
self.use_gssapi = False
def get_base_url(self):
return f'{self.protocol}://{self.host}/ipa'
return f"{self.protocol}://{self.host}/ipa"
def get_json_url(self):
return f'{self.get_base_url()}/session/json'
return f"{self.get_base_url()}/session/json"
def login(self, username, password):
if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
if "KRB5CCNAME" in os.environ and HAS_GSSAPI:
self.use_gssapi = True
elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
elif "KRB5_CLIENT_KTNAME" in os.environ and HAS_GSSAPI:
ccache = f"MEMORY:{uuid.uuid4()!s}"
os.environ['KRB5CCNAME'] = ccache
os.environ["KRB5CCNAME"] = ccache
self.use_gssapi = True
else:
if not password:
if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
if "KRB5CCNAME" in os.environ or "KRB5_CLIENT_KTNAME" in os.environ:
self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
self._fail('login', 'Password is required if not using '
'GSSAPI. To use GSSAPI, please set the '
'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
' environment variables.')
url = f'{self.get_base_url()}/session/login_password'
self._fail(
"login",
"Password is required if not using "
"GSSAPI. To use GSSAPI, please set the "
"KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) "
" environment variables.",
)
url = f"{self.get_base_url()}/session/login_password"
data = f"user={quote(username, safe='')}&password={quote(password, safe='')}"
headers = {'referer': self.get_base_url(),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
headers = {
"referer": self.get_base_url(),
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
status_code = info['status']
resp, info = fetch_url(
module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout
)
status_code = info["status"]
if status_code not in [200, 201, 204]:
self._fail('login', info['msg'])
self._fail("login", info["msg"])
self.headers = {'Cookie': info.get('set-cookie')}
self.headers = {"Cookie": info.get("set-cookie")}
except Exception as e:
self._fail('login', to_native(e))
self._fail("login", to_native(e))
if not self.headers:
self.headers = dict()
self.headers.update({
'referer': self.get_base_url(),
'Content-Type': 'application/json',
'Accept': 'application/json'})
self.headers.update(
{"referer": self.get_base_url(), "Content-Type": "application/json", "Accept": "application/json"}
)
def _fail(self, msg, e):
if 'message' in e:
err_string = e.get('message')
if "message" in e:
err_string = e.get("message")
else:
err_string = e
self.module.fail_json(msg=f'{msg}: {err_string}')
self.module.fail_json(msg=f"{msg}: {err_string}")
def get_ipa_version(self):
response = self.ping()['summary']
ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*')
response = self.ping()["summary"]
ipa_ver_regex = re.compile(r"IPA server version (\d+\.\d+\.\d+).*")
version_match = ipa_ver_regex.match(response)
ipa_version = None
if version_match:
@@ -109,41 +115,47 @@ class IPAClient:
return ipa_version
def ping(self):
return self._post_json(method='ping', name=None)
return self._post_json(method="ping", name=None)
def _post_json(self, method, name, item=None):
if item is None:
item = {}
url = f'{self.get_base_url()}/session/json'
url = f"{self.get_base_url()}/session/json"
data = dict(method=method)
# TODO: We should probably handle this a little better.
if method in ('ping', 'config_show', 'otpconfig_show'):
data['params'] = [[], {}]
elif method in ('config_mod', 'otpconfig_mod'):
data['params'] = [[], item]
if method in ("ping", "config_show", "otpconfig_show"):
data["params"] = [[], {}]
elif method in ("config_mod", "otpconfig_mod"):
data["params"] = [[], item]
else:
data['params'] = [[name], item]
data["params"] = [[name], item]
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
status_code = info['status']
resp, info = fetch_url(
module=self.module,
url=url,
data=to_bytes(json.dumps(data)),
headers=self.headers,
timeout=self.timeout,
use_gssapi=self.use_gssapi,
)
status_code = info["status"]
if status_code not in [200, 201, 204]:
self._fail(method, info['msg'])
self._fail(method, info["msg"])
except Exception as e:
self._fail(f'post {method}', to_native(e))
self._fail(f"post {method}", to_native(e))
charset = resp.headers.get_content_charset('latin-1')
charset = resp.headers.get_content_charset("latin-1")
resp = json.loads(to_text(resp.read(), encoding=charset))
err = resp.get('error')
err = resp.get("error")
if err is not None:
self._fail(f'response {method}', err)
self._fail(f"response {method}", err)
if 'result' in resp:
result = resp.get('result')
if 'result' in result:
result = result.get('result')
if "result" in resp:
result = resp.get("result")
if "result" in result:
result = result.get("result")
if isinstance(result, list):
if len(result) > 0:
return result[0]
@@ -195,11 +207,11 @@ class IPAClient:
def ipa_argument_spec():
return dict(
ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
validate_certs=dict(type='bool', default=True),
ipa_prot=dict(type="str", default="https", choices=["http", "https"], fallback=(env_fallback, ["IPA_PROT"])),
ipa_host=dict(type="str", default="ipa.example.com", fallback=(_env_then_dns_fallback, ["IPA_HOST"])),
ipa_port=dict(type="int", default=443, fallback=(env_fallback, ["IPA_PORT"])),
ipa_user=dict(type="str", default="admin", fallback=(env_fallback, ["IPA_USER"])),
ipa_pass=dict(type="str", no_log=True, fallback=(env_fallback, ["IPA_PASS"])),
ipa_timeout=dict(type="int", default=10, fallback=(env_fallback, ["IPA_TIMEOUT"])),
validate_certs=dict(type="bool", default=True),
)

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -12,8 +11,8 @@ import time
def download_updates_file(updates_expiration):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_filename = "jenkins-plugin-cache.json"
updates_dir = os.path.expanduser("~/.ansible/tmp")
updates_file = os.path.join(updates_dir, updates_filename)
download_updates = True

View File

@@ -26,8 +26,7 @@ HASHED_KEY_MAGIC = "|1|"
def is_ssh_url(url):
""" check if url is ssh """
"""check if url is ssh"""
if "@" in url and "://" not in url:
return True
@@ -38,12 +37,11 @@ def is_ssh_url(url):
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
"""chop the hostname and port out of a url"""
fqdn = None
port = None
ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
ipv6_re = re.compile(r"(\[[^]]*\])(?::([0-9]+))?")
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
@@ -76,9 +74,9 @@ def check_hostkey(module, fqdn):
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
def not_in_host_file(self, host):
if "USER" in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
@@ -111,10 +109,10 @@ def not_in_host_file(self, host):
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC) :].split("|", 2)
hash = hmac.new(kn_salt.decode("base64"), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
if hash.digest() == kn_host.decode("base64"):
return False
except Exception:
# invalid hashed host key, skip it
@@ -128,12 +126,11 @@ def not_in_host_file(self, host):
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
"""use ssh-keyscan to add the hostkey"""
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path("ssh-keyscan", True)
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
if "USER" in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
@@ -144,7 +141,7 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
os.makedirs(user_ssh_dir, int("700", 8))
except Exception:
module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}")
else:
@@ -160,14 +157,14 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothing on timeout
if rc != 0 or not out:
msg = 'failed to retrieve hostkey'
msg = "failed to retrieve hostkey"
if not out:
msg += f'. "{this_cmd}" returned no matches.'
else:
msg += f' using command "{this_cmd}". [stdout]: {out}'
if err:
msg += f' [stderr]: {err}'
msg += f" [stderr]: {err}"
module.fail_json(msg=msg)

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
@@ -21,51 +20,53 @@ try:
HAS_LDAP = True
SASCL_CLASS = {
'gssapi': ldap.sasl.gssapi,
'external': ldap.sasl.external,
"gssapi": ldap.sasl.gssapi,
"external": ldap.sasl.external,
}
except ImportError:
HAS_LDAP = False
def gen_specs(**specs):
specs.update({
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'ca_path': dict(type='path'),
'dn': dict(required=True),
'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'validate_certs': dict(default=True, type='bool'),
'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'),
'client_cert': dict(default=None, type='path'),
'client_key': dict(default=None, type='path'),
})
specs.update(
{
"bind_dn": dict(),
"bind_pw": dict(default="", no_log=True),
"ca_path": dict(type="path"),
"dn": dict(required=True),
"referrals_chasing": dict(type="str", default="anonymous", choices=["disabled", "anonymous"]),
"server_uri": dict(default="ldapi:///"),
"start_tls": dict(default=False, type="bool"),
"validate_certs": dict(default=True, type="bool"),
"sasl_class": dict(choices=["external", "gssapi"], default="external", type="str"),
"xorder_discovery": dict(choices=["enable", "auto", "disable"], default="auto", type="str"),
"client_cert": dict(default=None, type="path"),
"client_key": dict(default=None, type="path"),
}
)
return specs
def ldap_required_together():
return [['client_cert', 'client_key']]
return [["client_cert", "client_key"]]
class LdapGeneric:
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.ca_path = self.module.params['ca_path']
self.referrals_chasing = self.module.params['referrals_chasing']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.verify_cert = self.module.params['validate_certs']
self.sasl_class = self.module.params['sasl_class']
self.xorder_discovery = self.module.params['xorder_discovery']
self.client_cert = self.module.params['client_cert']
self.client_key = self.module.params['client_key']
self.bind_dn = self.module.params["bind_dn"]
self.bind_pw = self.module.params["bind_pw"]
self.ca_path = self.module.params["ca_path"]
self.referrals_chasing = self.module.params["referrals_chasing"]
self.server_uri = self.module.params["server_uri"]
self.start_tls = self.module.params["start_tls"]
self.verify_cert = self.module.params["validate_certs"]
self.sasl_class = self.module.params["sasl_class"]
self.xorder_discovery = self.module.params["xorder_discovery"]
self.client_cert = self.module.params["client_cert"]
self.client_key = self.module.params["client_key"]
# Establish connection
self.connection = self._connect_to_ldap()
@@ -74,17 +75,13 @@ class LdapGeneric:
# Try to find the X_ORDERed version of the DN
self.dn = self._find_dn()
else:
self.dn = self.module.params['dn']
self.dn = self.module.params["dn"]
def fail(self, msg, exn):
self.module.fail_json(
msg=msg,
details=to_native(exn),
exception=traceback.format_exc()
)
self.module.fail_json(msg=msg, details=to_native(exn), exception=traceback.format_exc())
def _find_dn(self):
dn = self.module.params['dn']
dn = self.module.params["dn"]
explode_dn = ldap.dn.explode_dn(dn)
@@ -92,8 +89,7 @@ class LdapGeneric:
try:
escaped_value = ldap.filter.escape_filter_chars(explode_dn[0])
filterstr = f"({escaped_value})"
dns = self.connection.search_s(','.join(explode_dn[1:]),
ldap.SCOPE_ONELEVEL, filterstr)
dns = self.connection.search_s(",".join(explode_dn[1:]), ldap.SCOPE_ONELEVEL, filterstr)
if len(dns) == 1:
dn, dummy = dns[0]
except Exception:
@@ -114,7 +110,7 @@ class LdapGeneric:
connection = ldap.initialize(self.server_uri)
if self.referrals_chasing == 'disabled':
if self.referrals_chasing == "disabled":
# Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067)
connection.set_option(ldap.OPT_REFERRALS, 0)
@@ -129,7 +125,7 @@ class LdapGeneric:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)
connection.sasl_interactive_bind_s('', klass())
connection.sasl_interactive_bind_s("", klass())
except ldap.LDAPError as e:
self.fail("Cannot bind to the server.", e)
@@ -138,6 +134,6 @@ class LdapGeneric:
def _xorder_dn(self):
# match X_ORDERed DNs
regex = r".+\{\d+\}.+"
explode_dn = ldap.dn.explode_dn(self.module.params['dn'])
explode_dn = ldap.dn.explode_dn(self.module.params["dn"])
return re.match(regex, explode_dn[0]) is not None

View File

@@ -16,4 +16,4 @@ from ansible.module_utils.ansible_release import __version__ as ansible_version
def get_user_agent(module):
"""Retrieve a user-agent to send with LinodeClient requests."""
return f'Ansible-{module}/{ansible_version}'
return f"Ansible-{module}/{ansible_version}"

View File

@@ -22,7 +22,7 @@ def locale_gen_runner(module):
command="locale-gen",
arg_formats=dict(
name=cmd_runner_fmt.as_list(),
purge=cmd_runner_fmt.as_fixed('--purge'),
purge=cmd_runner_fmt.as_fixed("--purge"),
),
check_rc=True,
)

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
@@ -23,7 +22,7 @@ HTTPSConnection = http_client.HTTPSConnection
class UnixHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
HTTPConnection.__init__(self, "localhost")
self.path = path
def connect(self):
@@ -39,7 +38,9 @@ class LXDClientException(Exception):
class LXDClient:
def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True):
def __init__(
self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True
):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
@@ -58,7 +59,7 @@ class LXDClient:
self.url = url
self.debug = debug
self.logs = []
if url.startswith('https:'):
if url.startswith("https:"):
self.cert_file = cert_file
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
@@ -68,28 +69,28 @@ class LXDClient:
ctx.load_verify_locations(cafile=server_cert_file)
ctx.check_hostname = server_check_hostname
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
unix_socket_path = url[len('unix:'):]
self.connection = HTTPSConnection(parts.get("netloc"), context=ctx)
elif url.startswith("unix:"):
unix_socket_path = url[len("unix:") :]
self.connection = UnixHTTPConnection(unix_socket_path)
else:
raise LXDClientException('URL scheme must be unix: or https:')
raise LXDClientException("URL scheme must be unix: or https:")
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None):
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
if resp_json['type'] == 'async':
if resp_json["type"] == "async":
url = f"{resp_json['operation']}/wait"
resp_json = self._send_request('GET', url)
resp_json = self._send_request("GET", url)
if wait_for_container:
while resp_json['metadata']['status'] == 'Running':
resp_json = self._send_request('GET', url)
if resp_json['metadata']['status'] != 'Success':
while resp_json["metadata"]["status"] == "Running":
resp_json = self._send_request("GET", url)
if resp_json["metadata"]["status"] != "Success":
self._raise_err_from_json(resp_json)
return resp_json
def authenticate(self, trust_password):
body_json = {'type': 'client', 'password': trust_password}
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
body_json = {"type": "client", "password": trust_password}
return self._send_request("POST", "/1.0/certificates", body_json=body_json)
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
try:
@@ -97,44 +98,46 @@ class LXDClient:
self.connection.request(method, url, body=body)
resp = self.connection.getresponse()
resp_data = resp.read()
resp_data = to_text(resp_data, errors='surrogate_or_strict')
resp_data = to_text(resp_data, errors="surrogate_or_strict")
resp_json = json.loads(resp_data)
self.logs.append({
'type': 'sent request',
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
'response': {'json': resp_json}
})
resp_type = resp_json.get('type', None)
if resp_type == 'error':
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
self.logs.append(
{
"type": "sent request",
"request": {"method": method, "url": url, "json": body_json, "timeout": timeout},
"response": {"json": resp_json},
}
)
resp_type = resp_json.get("type", None)
if resp_type == "error":
if ok_error_codes is not None and resp_json["error_code"] in ok_error_codes:
return resp_json
if resp_json['error'] == "Certificate already in trust store":
if resp_json["error"] == "Certificate already in trust store":
return resp_json
self._raise_err_from_json(resp_json)
return resp_json
except socket.error as e:
raise LXDClientException('cannot connect to the LXD server', err=e)
raise LXDClientException("cannot connect to the LXD server", err=e)
def _raise_err_from_json(self, resp_json):
err_params = {}
if self.debug:
err_params['logs'] = self.logs
err_params["logs"] = self.logs
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
@staticmethod
def _get_err_from_resp_json(resp_json):
err = None
metadata = resp_json.get('metadata', None)
metadata = resp_json.get("metadata", None)
if metadata is not None:
err = metadata.get('err', None)
err = metadata.get("err", None)
if err is None:
err = resp_json.get('error', None)
err = resp_json.get("error", None)
return err
def default_key_file():
return os.path.expanduser('~/.config/lxc/client.key')
return os.path.expanduser("~/.config/lxc/client.key")
def default_cert_file():
return os.path.expanduser('~/.config/lxc/client.crt')
return os.path.expanduser("~/.config/lxc/client.crt")

View File

@@ -21,6 +21,7 @@ from ansible.module_utils.basic import missing_required_lib
CLIENT_IMP_ERR = None
try:
from manageiq_client.api import ManageIQClient
HAS_CLIENT = True
except ImportError:
CLIENT_IMP_ERR = traceback.format_exc()
@@ -29,54 +30,61 @@ except ImportError:
def manageiq_argument_spec():
options = dict(
url=dict(default=os.environ.get('MIQ_URL', None)),
username=dict(default=os.environ.get('MIQ_USERNAME', None)),
password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
url=dict(default=os.environ.get("MIQ_URL", None)),
username=dict(default=os.environ.get("MIQ_USERNAME", None)),
password=dict(default=os.environ.get("MIQ_PASSWORD", None), no_log=True),
token=dict(default=os.environ.get("MIQ_TOKEN", None), no_log=True),
validate_certs=dict(default=True, type="bool", aliases=["verify_ssl"]),
ca_cert=dict(required=False, default=None, aliases=["ca_bundle_path"]),
)
return dict(
manageiq_connection=dict(type='dict',
apply_defaults=True,
options=options),
manageiq_connection=dict(type="dict", apply_defaults=True, options=options),
)
def check_client(module):
if not HAS_CLIENT:
module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
module.fail_json(msg=missing_required_lib("manageiq-client"), exception=CLIENT_IMP_ERR)
def validate_connection_params(module):
params = module.params['manageiq_connection']
params = module.params["manageiq_connection"]
error_str = "missing required argument: manageiq_connection[{}]"
url = params['url']
token = params['token']
username = params['username']
password = params['password']
url = params["url"]
token = params["token"]
username = params["username"]
password = params["password"]
if (url and username and password) or (url and token):
return params
for arg in ['url', 'username', 'password']:
if params[arg] in (None, ''):
for arg in ["url", "username", "password"]:
if params[arg] in (None, ""):
module.fail_json(msg=error_str.format(arg))
def manageiq_entities():
return {
'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
'service template': 'service_templates', 'template': 'templates',
'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
"provider": "providers",
"host": "hosts",
"vm": "vms",
"category": "categories",
"cluster": "clusters",
"data store": "data_stores",
"group": "groups",
"resource pool": "resource_pools",
"service": "services",
"service template": "service_templates",
"template": "templates",
"tenant": "tenants",
"user": "users",
"blueprint": "blueprints",
}
class ManageIQ:
"""
class encapsulating ManageIQ API client.
class encapsulating ManageIQ API client.
"""
def __init__(self, module):
@@ -85,24 +93,26 @@ class ManageIQ:
params = validate_connection_params(module)
url = params['url']
username = params['username']
password = params['password']
token = params['token']
verify_ssl = params['validate_certs']
ca_bundle_path = params['ca_cert']
url = params["url"]
username = params["username"]
password = params["password"]
token = params["token"]
verify_ssl = params["validate_certs"]
ca_bundle_path = params["ca_cert"]
self._module = module
self._api_url = f"{url}/api"
self._auth = dict(user=username, password=password, token=token)
try:
self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
self._client = ManageIQClient(
self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path
)
except Exception as e:
self.module.fail_json(msg=f"failed to open connection ({url}): {e}")
@property
def module(self):
""" Ansible module module
"""Ansible module module
Returns:
the ansible module
@@ -111,7 +121,7 @@ class ManageIQ:
@property
def api_url(self):
""" Base ManageIQ API
"""Base ManageIQ API
Returns:
the base ManageIQ API
@@ -120,7 +130,7 @@ class ManageIQ:
@property
def client(self):
""" ManageIQ client
"""ManageIQ client
Returns:
the ManageIQ client
@@ -128,7 +138,7 @@ class ManageIQ:
return self._client
def find_collection_resource_by(self, collection_name, **params):
""" Searches the collection resource by the collection name and the param passed.
"""Searches the collection resource by the collection name and the param passed.
Returns:
the resource as an object if it exists in manageiq, None otherwise.
@@ -142,7 +152,7 @@ class ManageIQ:
return vars(entity)
def find_collection_resource_or_fail(self, collection_name, **params):
""" Searches the collection resource by the collection name and the param passed.
"""Searches the collection resource by the collection name and the param passed.
Returns:
the resource as an object if it exists in manageiq, Fail otherwise.
@@ -159,12 +169,12 @@ class ManageIQ:
# query resource id, fail if resource does not exist
if resource_id is None:
resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)["id"]
return ManageIQPolicies(manageiq, resource_type, resource_id)
def query_resource_id(self, resource_type, resource_name):
""" Query the resource name in ManageIQ.
"""Query the resource name in ManageIQ.
Returns:
the resource ID if it exists in ManageIQ, Fail otherwise.
@@ -179,7 +189,7 @@ class ManageIQ:
class ManageIQPolicies:
"""
Object to execute policies management operations of manageiq resources.
Object to execute policies management operations of manageiq resources.
"""
def __init__(self, manageiq, resource_type, resource_id):
@@ -191,29 +201,27 @@ class ManageIQPolicies:
self.resource_type = resource_type
self.resource_id = resource_id
self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}'
self.resource_url = f"{self.api_url}/{resource_type}/{resource_id}"
def query_profile_href(self, profile):
""" Add or Update the policy_profile href field
"""Add or Update the policy_profile href field
Example:
{name: STR, ...} => {name: STR, href: STR}
"""
resource = self.manageiq.find_collection_resource_or_fail(
"policy_profiles", **profile)
return dict(name=profile['name'], href=resource['href'])
resource = self.manageiq.find_collection_resource_or_fail("policy_profiles", **profile)
return dict(name=profile["name"], href=resource["href"])
def query_resource_profiles(self):
""" Returns a set of the profile objects objects assigned to the resource
"""
url = '{resource_url}/policy_profiles?expand=resources'
"""Returns a set of the profile objects objects assigned to the resource"""
url = "{resource_url}/policy_profiles?expand=resources"
try:
response = self.client.get(url.format(resource_url=self.resource_url))
except Exception as e:
msg = f"Failed to query {self.resource_type} policies: {e}"
self.module.fail_json(msg=msg)
resources = response.get('resources', [])
resources = response.get("resources", [])
# clean the returned rest api profile object to look like:
# {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
@@ -222,16 +230,15 @@ class ManageIQPolicies:
return profiles
def query_profile_policies(self, profile_id):
""" Returns a set of the policy objects assigned to the resource
"""
url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
"""Returns a set of the policy objects assigned to the resource"""
url = "{api_url}/policy_profiles/{profile_id}?expand=policies"
try:
response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
except Exception as e:
msg = f"Failed to query {self.resource_type} policies: {e}"
self.module.fail_json(msg=msg)
resources = response.get('policies', [])
resources = response.get("policies", [])
# clean the returned rest api policy object to look like:
# {name: STR, description: STR, active: BOOL}
@@ -240,42 +247,36 @@ class ManageIQPolicies:
return policies
def clean_policy_object(self, policy):
""" Clean a policy object to have human readable form of:
"""Clean a policy object to have human readable form of:
{
name: STR,
description: STR,
active: BOOL
}
"""
name = policy.get('name')
description = policy.get('description')
active = policy.get('active')
name = policy.get("name")
description = policy.get("description")
active = policy.get("active")
return dict(
name=name,
description=description,
active=active)
return dict(name=name, description=description, active=active)
def clean_profile_object(self, profile):
""" Clean a profile object to have human readable form of:
"""Clean a profile object to have human readable form of:
{
profile_name: STR,
profile_description: STR,
policies: ARR<POLICIES>
}
"""
profile_id = profile['id']
name = profile.get('name')
description = profile.get('description')
profile_id = profile["id"]
name = profile.get("name")
description = profile.get("description")
policies = self.query_profile_policies(profile_id)
return dict(
profile_name=name,
profile_description=description,
policies=policies)
return dict(profile_name=name, profile_description=description, policies=policies)
def profiles_to_update(self, profiles, action):
""" Create a list of policies we need to update in ManageIQ.
"""Create a list of policies we need to update in ManageIQ.
Returns:
Whether or not a change took place and a message describing the
@@ -286,12 +287,12 @@ class ManageIQPolicies:
# make a list of assigned full profile names strings
# e.g. ['openscap profile', ...]
assigned_profiles_set = set(profile['profile_name'] for profile in assigned_profiles)
assigned_profiles_set = set(profile["profile_name"] for profile in assigned_profiles)
for profile in profiles:
assigned = profile.get('name') in assigned_profiles_set
assigned = profile.get("name") in assigned_profiles_set
if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
if (action == "unassign" and assigned) or (action == "assign" and not assigned):
# add/update the policy profile href field
# {name: STR, ...} => {name: STR, href: STR}
profile = self.query_profile_href(profile)
@@ -300,17 +301,14 @@ class ManageIQPolicies:
return profiles_to_post
def assign_or_unassign_profiles(self, profiles, action):
""" Perform assign/unassign action
"""
"""Perform assign/unassign action"""
# get a list of profiles needed to be changed
profiles_to_post = self.profiles_to_update(profiles, action)
if not profiles_to_post:
return dict(
changed=False,
msg=f"Profiles {profiles} already {action}ed, nothing to do")
return dict(changed=False, msg=f"Profiles {profiles} already {action}ed, nothing to do")
# try to assign or unassign profiles to resource
url = f'{self.resource_url}/policy_profiles'
url = f"{self.resource_url}/policy_profiles"
try:
response = self.client.post(url, action=action, resources=profiles_to_post)
except Exception as e:
@@ -318,20 +316,18 @@ class ManageIQPolicies:
self.module.fail_json(msg=msg)
# check all entities in result to be successful
for result in response['results']:
if not result['success']:
for result in response["results"]:
if not result["success"]:
msg = f"Failed to {action}: {result['message']}"
self.module.fail_json(msg=msg)
# successfully changed all needed profiles
return dict(
changed=True,
msg=f"Successfully {action}ed profiles: {profiles}")
return dict(changed=True, msg=f"Successfully {action}ed profiles: {profiles}")
class ManageIQTags:
"""
Object to execute tags management operations of manageiq resources.
Object to execute tags management operations of manageiq resources.
"""
def __init__(self, manageiq, resource_type, resource_id):
@@ -343,15 +339,14 @@ class ManageIQTags:
self.resource_type = resource_type
self.resource_id = resource_id
self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}'
self.resource_url = f"{self.api_url}/{resource_type}/{resource_id}"
def full_tag_name(self, tag):
""" Returns the full tag name in manageiq
"""
"""Returns the full tag name in manageiq"""
return f"/managed/{tag['category']}/{tag['name']}"
def clean_tag_object(self, tag):
""" Clean a tag object to have human readable form of:
"""Clean a tag object to have human readable form of:
{
full_name: STR,
name: STR,
@@ -359,26 +354,26 @@ class ManageIQTags:
category: STR
}
"""
full_name = tag.get('name')
categorization = tag.get('categorization', {})
full_name = tag.get("name")
categorization = tag.get("categorization", {})
return dict(
full_name=full_name,
name=categorization.get('name'),
display_name=categorization.get('display_name'),
category=categorization.get('category', {}).get('name'))
name=categorization.get("name"),
display_name=categorization.get("display_name"),
category=categorization.get("category", {}).get("name"),
)
def query_resource_tags(self):
""" Returns a set of the tag objects assigned to the resource
"""
url = '{resource_url}/tags?expand=resources&attributes=categorization'
"""Returns a set of the tag objects assigned to the resource"""
url = "{resource_url}/tags?expand=resources&attributes=categorization"
try:
response = self.client.get(url.format(resource_url=self.resource_url))
except Exception as e:
msg = f"Failed to query {self.resource_type} tags: {e}"
self.module.fail_json(msg=msg)
resources = response.get('resources', [])
resources = response.get("resources", [])
# clean the returned rest api tag object to look like:
# {full_name: STR, name: STR, display_name: STR, category: STR}
@@ -387,7 +382,7 @@ class ManageIQTags:
return tags
def tags_to_update(self, tags, action):
""" Create a list of tags we need to update in ManageIQ.
"""Create a list of tags we need to update in ManageIQ.
Returns:
Whether or not a change took place and a message describing the
@@ -398,30 +393,27 @@ class ManageIQTags:
# make a list of assigned full tag names strings
# e.g. ['/managed/environment/prod', ...]
assigned_tags_set = set(tag['full_name'] for tag in assigned_tags)
assigned_tags_set = set(tag["full_name"] for tag in assigned_tags)
for tag in tags:
assigned = self.full_tag_name(tag) in assigned_tags_set
if assigned and action == 'unassign':
if assigned and action == "unassign":
tags_to_post.append(tag)
elif (not assigned) and action == 'assign':
elif (not assigned) and action == "assign":
tags_to_post.append(tag)
return tags_to_post
def assign_or_unassign_tags(self, tags, action):
""" Perform assign/unassign action
"""
"""Perform assign/unassign action"""
# get a list of tags needed to be changed
tags_to_post = self.tags_to_update(tags, action)
if not tags_to_post:
return dict(
changed=False,
msg=f"Tags already {action}ed, nothing to do")
return dict(changed=False, msg=f"Tags already {action}ed, nothing to do")
# try to assign or unassign tags to resource
url = f'{self.resource_url}/tags'
url = f"{self.resource_url}/tags"
try:
response = self.client.post(url, action=action, resources=tags)
except Exception as e:
@@ -429,12 +421,10 @@ class ManageIQTags:
self.module.fail_json(msg=msg)
# check all entities in result to be successful
for result in response['results']:
if not result['success']:
for result in response["results"]:
if not result["success"]:
msg = f"Failed to {action}: {result['message']}"
self.module.fail_json(msg=msg)
# successfully changed all needed tags
return dict(
changed=True,
msg=f"Successfully {action}ed tags")
return dict(changed=True, msg=f"Successfully {action}ed tags")

View File

@@ -18,9 +18,9 @@ import urllib.error as urllib_error
class Response:
'''
"""
Create a response object to mimic that of requests.
'''
"""
def __init__(self):
self.content = None
@@ -32,12 +32,12 @@ class Response:
def memset_api_call(api_key, api_method, payload=None):
'''
"""
Generic function which returns results back to calling function.
Requires an API key and an API method to assemble the API URL.
Returns response text to be analysed.
'''
"""
# instantiate a response object
response = Response()
@@ -53,13 +53,13 @@ def memset_api_call(api_key, api_method, payload=None):
msg = None
data = urlencode(payload)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
api_uri_base = 'https://api.memset.com/v1/json/'
api_uri = f'{api_uri_base}{api_method}/'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
api_uri_base = "https://api.memset.com/v1/json/"
api_uri = f"{api_uri_base}{api_method}/"
try:
resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
response.content = resp.read().decode('utf-8')
response.content = resp.read().decode("utf-8")
response.status_code = resp.getcode()
except urllib_error.HTTPError as e:
try:
@@ -68,7 +68,7 @@ def memset_api_call(api_key, api_method, payload=None):
errorcode = None
has_failed = True
response.content = e.read().decode('utf8')
response.content = e.read().decode("utf8")
response.status_code = errorcode
if response.status_code is not None:
@@ -87,29 +87,29 @@ def memset_api_call(api_key, api_method, payload=None):
def check_zone_domain(data, domain):
'''
"""
Returns true if domain already exists, and false if not.
'''
"""
exists = False
if data.status_code in [201, 200]:
for zone_domain in data.json():
if zone_domain['domain'] == domain:
if zone_domain["domain"] == domain:
exists = True
return exists
def check_zone(data, name):
'''
"""
Returns true if zone already exists, and false if not.
'''
"""
counter = 0
exists = False
if data.status_code in [201, 200]:
for zone in data.json():
if zone['nickname'] == name:
if zone["nickname"] == name:
counter += 1
if counter == 1:
exists = True
@@ -118,26 +118,26 @@ def check_zone(data, name):
def get_zone_id(zone_name, current_zones):
'''
"""
Returns the zone's id if it exists and is unique
'''
"""
zone_exists = False
zone_id, msg = None, None
zone_list = []
for zone in current_zones:
if zone['nickname'] == zone_name:
zone_list.append(zone['id'])
if zone["nickname"] == zone_name:
zone_list.append(zone["id"])
counter = len(zone_list)
if counter == 0:
msg = 'No matching zone found'
msg = "No matching zone found"
elif counter == 1:
zone_id = zone_list[0]
zone_exists = True
elif counter > 1:
zone_id = None
msg = 'Zone ID could not be returned as duplicate zone names were detected'
msg = "Zone ID could not be returned as duplicate zone names were detected"
return zone_exists, msg, counter, zone_id

View File

@@ -16,7 +16,11 @@ class ModuleHelperBase:
module: dict[str, t.Any] | None = None # TODO: better spec using t.TypedDict
ModuleHelperException = _MHE
_delegated_to_module: tuple[str, ...] = (
'check_mode', 'get_bin_path', 'warn', 'deprecate', 'debug',
"check_mode",
"get_bin_path",
"warn",
"deprecate",
"debug",
)
def __init__(self, module=None):
@@ -80,8 +84,8 @@ class ModuleHelperBase:
self.__run__()
self.__quit_module__()
output = self.output
if 'failed' not in output:
output['failed'] = False
if "failed" not in output:
output["failed"] = False
self.module.exit_json(changed=self.has_changed(), **output)
@classmethod

View File

@@ -33,7 +33,7 @@ def cause_changes(when=None):
def module_fails_on_exception(func):
conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
conflict_list = ("msg", "exception", "output", "vars", "changed")
@wraps(func)
def wrapper(self, *args, **kwargs):
@@ -51,14 +51,17 @@ def module_fails_on_exception(func):
self.update_output(e.update_output)
# patchy solution to resolve conflict with output variables
output = fix_var_conflicts(self.output)
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
output=self.output, vars=self.vars.output(), **output)
self.module.fail_json(
msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars.output(), **output
)
except Exception as e:
# patchy solution to resolve conflict with output variables
output = fix_var_conflicts(self.output)
msg = f"Module failed with exception: {str(e).strip()}"
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
output=self.output, vars=self.vars.output(), **output)
self.module.fail_json(
msg=msg, exception=traceback.format_exc(), output=self.output, vars=self.vars.output(), **output
)
return wrapper
@@ -72,22 +75,25 @@ def check_mode_skip(func):
def check_mode_skip_returns(callable=None, value=None):
def deco(func):
if callable is not None:
@wraps(func)
def wrapper_callable(self, *args, **kwargs):
if self.module.check_mode:
return callable(self, *args, **kwargs)
return func(self, *args, **kwargs)
return wrapper_callable
else:
@wraps(func)
def wrapper_value(self, *args, **kwargs):
if self.module.check_mode:
return value
return func(self, *args, **kwargs)
return wrapper_value
return deco

View File

@@ -10,7 +10,6 @@ from ansible.module_utils.basic import AnsibleModule
class DeprecateAttrsMixin:
def _deprecate_setup(self, attr, target, module):
if target is None:
target = self
@@ -22,7 +21,9 @@ class DeprecateAttrsMixin:
elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
module = target.module
else:
raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
raise ValueError(
"Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly."
)
# setup internal state dicts
value_attr = "__deprecated_attr_value"
@@ -35,7 +36,9 @@ class DeprecateAttrsMixin:
trigger_dict = getattr(target, trigger_attr)
return target, module, value_dict, trigger_dict
def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
def _deprecate_attr(
self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None
):
target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
value_dict[attr] = getattr(target, attr, value)

View File

@@ -7,7 +7,7 @@ from __future__ import annotations
class StateMixin:
state_param: str = 'state'
state_param: str = "state"
default_state: str | None = None
def _state(self):
@@ -23,7 +23,7 @@ class StateMixin:
# resolve aliases
if state not in self.module.params:
aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
aliased = [name for name, param in self.module.argument_spec.items() if state in param.get("aliases", [])]
if aliased:
state = aliased[0]
self.vars.effective_state = state

View File

@@ -31,7 +31,8 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
self.vars = VarDict()
for name, value in self.module.params.items():
self.vars.set(
name, value,
name,
value,
diff=name in self.diff_params,
output=name in self.output_params,
change=None if not self.change_params else name in self.change_params,
@@ -62,11 +63,11 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
if self.facts_name:
facts = self.vars.facts()
if facts is not None:
result['ansible_facts'] = {self.facts_name: facts}
result["ansible_facts"] = {self.facts_name: facts}
if self.diff_mode:
diff = result.get('diff', {})
diff = result.get("diff", {})
vars_diff = self.vars.diff() or {}
result['diff'] = dict_merge(dict(diff), vars_diff)
result["diff"] = dict_merge(dict(diff), vars_diff)
return result

View File

@@ -8,9 +8,13 @@ from __future__ import annotations
# pylint: disable=unused-import
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
ModuleHelper, StateModuleHelper,
ModuleHelper,
StateModuleHelper,
)
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401
from ansible_collections.community.general.plugins.module_utils.mh.deco import (
cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns,
cause_changes,
module_fails_on_exception,
check_mode_skip,
check_mode_skip_returns,
)

View File

@@ -54,9 +54,7 @@ def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=T
)
def _delete_pritunl_organization(
api_token, api_secret, base_url, organization_id, validate_certs=True
):
def _delete_pritunl_organization(api_token, api_secret, base_url, organization_id, validate_certs=True):
return pritunl_auth_request(
base_url=base_url,
api_token=api_token,
@@ -67,9 +65,7 @@ def _delete_pritunl_organization(
)
def _post_pritunl_organization(
api_token, api_secret, base_url, organization_data, validate_certs=True
):
def _post_pritunl_organization(api_token, api_secret, base_url, organization_data, validate_certs=True):
return pritunl_auth_request(
api_token=api_token,
api_secret=api_secret,
@@ -82,9 +78,7 @@ def _post_pritunl_organization(
)
def _get_pritunl_users(
api_token, api_secret, base_url, organization_id, validate_certs=True
):
def _get_pritunl_users(api_token, api_secret, base_url, organization_id, validate_certs=True):
return pritunl_auth_request(
api_token=api_token,
api_secret=api_secret,
@@ -95,9 +89,7 @@ def _get_pritunl_users(
)
def _delete_pritunl_user(
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
):
def _delete_pritunl_user(api_token, api_secret, base_url, organization_id, user_id, validate_certs=True):
return pritunl_auth_request(
api_token=api_token,
api_secret=api_secret,
@@ -108,9 +100,7 @@ def _delete_pritunl_user(
)
def _post_pritunl_user(
api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
):
def _post_pritunl_user(api_token, api_secret, base_url, organization_id, user_data, validate_certs=True):
return pritunl_auth_request(
api_token=api_token,
api_secret=api_secret,
@@ -144,9 +134,7 @@ def _put_pritunl_user(
)
def list_pritunl_organizations(
api_token, api_secret, base_url, validate_certs=True, filters=None
):
def list_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True, filters=None):
orgs = []
response = _get_pritunl_organizations(
@@ -164,18 +152,13 @@ def list_pritunl_organizations(
if filters is None:
orgs.append(org)
else:
if not any(
filter_val != org[filter_key]
for filter_key, filter_val in filters.items()
):
if not any(filter_val != org[filter_key] for filter_key, filter_val in filters.items()):
orgs.append(org)
return orgs
def list_pritunl_users(
api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
):
def list_pritunl_users(api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None):
users = []
response = _get_pritunl_users(
@@ -195,10 +178,7 @@ def list_pritunl_users(
users.append(user)
else:
if not any(
filter_val != user[filter_key]
for filter_key, filter_val in filters.items()
):
if not any(filter_val != user[filter_key] for filter_key, filter_val in filters.items()):
users.append(user)
return users
@@ -220,9 +200,7 @@ def post_pritunl_organization(
)
if response.getcode() != 200:
raise PritunlException(
f"Could not add organization {organization_name} to Pritunl"
)
raise PritunlException(f"Could not add organization {organization_name} to Pritunl")
# The user PUT request returns the updated user object
return json.loads(response.read())
@@ -248,9 +226,7 @@ def post_pritunl_user(
)
if response.getcode() != 200:
raise PritunlException(
f"Could not remove user {user_id} from organization {organization_id} from Pritunl"
)
raise PritunlException(f"Could not remove user {user_id} from organization {organization_id} from Pritunl")
# user POST request returns an array of a single item,
# so return this item instead of the list
return json.loads(response.read())[0]
@@ -266,16 +242,12 @@ def post_pritunl_user(
)
if response.getcode() != 200:
raise PritunlException(
f"Could not update user {user_id} from organization {organization_id} from Pritunl"
)
raise PritunlException(f"Could not update user {user_id} from organization {organization_id} from Pritunl")
# The user PUT request returns the updated user object
return json.loads(response.read())
def delete_pritunl_organization(
api_token, api_secret, base_url, organization_id, validate_certs=True
):
def delete_pritunl_organization(api_token, api_secret, base_url, organization_id, validate_certs=True):
response = _delete_pritunl_organization(
api_token=api_token,
api_secret=api_secret,
@@ -285,16 +257,12 @@ def delete_pritunl_organization(
)
if response.getcode() != 200:
raise PritunlException(
f"Could not remove organization {organization_id} from Pritunl"
)
raise PritunlException(f"Could not remove organization {organization_id} from Pritunl")
return json.loads(response.read())
def delete_pritunl_user(
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
):
def delete_pritunl_user(api_token, api_secret, base_url, organization_id, user_id, validate_certs=True):
response = _delete_pritunl_user(
api_token=api_token,
api_secret=api_secret,
@@ -305,9 +273,7 @@ def delete_pritunl_user(
)
if response.getcode() != 200:
raise PritunlException(
f"Could not remove user {user_id} from organization {organization_id} from Pritunl"
)
raise PritunlException(f"Could not remove user {user_id} from organization {organization_id} from Pritunl")
return json.loads(response.read())
@@ -332,9 +298,7 @@ def pritunl_auth_request(
auth_string = f"{api_token}&{auth_timestamp}&{auth_nonce}&{method.upper()}&{path}"
auth_signature = base64.b64encode(
hmac.new(
api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
).digest()
hmac.new(api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256).digest()
)
auth_headers = {

View File

@@ -15,16 +15,15 @@ from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native
GET_HEADERS = {'accept': 'application/json'}
PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
DELETE_HEADERS = {'accept': 'application/json'}
GET_HEADERS = {"accept": "application/json"}
PUT_HEADERS = {"content-type": "application/json", "accept": "application/json"}
POST_HEADERS = {"content-type": "application/json", "accept": "application/json"}
DELETE_HEADERS = {"accept": "application/json"}
HEALTH_OK = 5
class OcapiUtils:
def __init__(self, creds, base_uri, proxy_slot_number, timeout, module):
self.root_uri = base_uri
self.proxy_slot_number = proxy_slot_number
@@ -38,8 +37,8 @@ class OcapiUtils:
:return: tuple of username, password
"""
username = self.creds['user']
password = self.creds['pswd']
username = self.creds["user"]
password = self.creds["pswd"]
force_basic_auth = True
return username, password, force_basic_auth
@@ -47,77 +46,89 @@ class OcapiUtils:
req_headers = dict(GET_HEADERS)
username, password, basic_auth = self._auth_params()
try:
resp = open_url(uri, method="GET", headers=req_headers,
url_username=username, url_password=password,
force_basic_auth=basic_auth, validate_certs=False,
follow_redirects='all',
use_proxy=True, timeout=self.timeout)
resp = open_url(
uri,
method="GET",
headers=req_headers,
url_username=username,
url_password=password,
force_basic_auth=basic_auth,
validate_certs=False,
follow_redirects="all",
use_proxy=True,
timeout=self.timeout,
)
data = json.loads(to_native(resp.read()))
headers = {k.lower(): v for (k, v) in resp.info().items()}
except HTTPError as e:
return {'ret': False,
'msg': f"HTTP Error {e.code} on GET request to '{uri}'",
'status': e.code}
return {"ret": False, "msg": f"HTTP Error {e.code} on GET request to '{uri}'", "status": e.code}
except URLError as e:
return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"}
return {"ret": False, "msg": f"URL Error on GET request to '{uri}': '{e.reason}'"}
# Almost all errors should be caught above, but just in case
except Exception as e:
return {'ret': False,
'msg': f"Failed GET request to '{uri}': '{e}'"}
return {'ret': True, 'data': data, 'headers': headers}
return {"ret": False, "msg": f"Failed GET request to '{uri}': '{e}'"}
return {"ret": True, "data": data, "headers": headers}
def delete_request(self, uri, etag=None):
req_headers = dict(DELETE_HEADERS)
if etag is not None:
req_headers['If-Match'] = etag
req_headers["If-Match"] = etag
username, password, basic_auth = self._auth_params()
try:
resp = open_url(uri, method="DELETE", headers=req_headers,
url_username=username, url_password=password,
force_basic_auth=basic_auth, validate_certs=False,
follow_redirects='all',
use_proxy=True, timeout=self.timeout)
resp = open_url(
uri,
method="DELETE",
headers=req_headers,
url_username=username,
url_password=password,
force_basic_auth=basic_auth,
validate_certs=False,
follow_redirects="all",
use_proxy=True,
timeout=self.timeout,
)
if resp.status != 204:
data = json.loads(to_native(resp.read()))
else:
data = ""
headers = {k.lower(): v for (k, v) in resp.info().items()}
except HTTPError as e:
return {'ret': False,
'msg': f"HTTP Error {e.code} on DELETE request to '{uri}'",
'status': e.code}
return {"ret": False, "msg": f"HTTP Error {e.code} on DELETE request to '{uri}'", "status": e.code}
except URLError as e:
return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"}
return {"ret": False, "msg": f"URL Error on DELETE request to '{uri}': '{e.reason}'"}
# Almost all errors should be caught above, but just in case
except Exception as e:
return {'ret': False,
'msg': f"Failed DELETE request to '{uri}': '{e}'"}
return {'ret': True, 'data': data, 'headers': headers}
return {"ret": False, "msg": f"Failed DELETE request to '{uri}': '{e}'"}
return {"ret": True, "data": data, "headers": headers}
def put_request(self, uri, payload, etag=None):
req_headers = dict(PUT_HEADERS)
if etag is not None:
req_headers['If-Match'] = etag
req_headers["If-Match"] = etag
username, password, basic_auth = self._auth_params()
try:
resp = open_url(uri, data=json.dumps(payload),
headers=req_headers, method="PUT",
url_username=username, url_password=password,
force_basic_auth=basic_auth, validate_certs=False,
follow_redirects='all',
use_proxy=True, timeout=self.timeout)
resp = open_url(
uri,
data=json.dumps(payload),
headers=req_headers,
method="PUT",
url_username=username,
url_password=password,
force_basic_auth=basic_auth,
validate_certs=False,
follow_redirects="all",
use_proxy=True,
timeout=self.timeout,
)
headers = {k.lower(): v for (k, v) in resp.info().items()}
except HTTPError as e:
return {'ret': False,
'msg': f"HTTP Error {e.code} on PUT request to '{uri}'",
'status': e.code}
return {"ret": False, "msg": f"HTTP Error {e.code} on PUT request to '{uri}'", "status": e.code}
except URLError as e:
return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"}
return {"ret": False, "msg": f"URL Error on PUT request to '{uri}': '{e.reason}'"}
# Almost all errors should be caught above, but just in case
except Exception as e:
return {'ret': False,
'msg': f"Failed PUT request to '{uri}': '{e}'"}
return {'ret': True, 'headers': headers, 'resp': resp}
return {"ret": False, "msg": f"Failed PUT request to '{uri}': '{e}'"}
return {"ret": True, "headers": headers, "resp": resp}
def post_request(self, uri, payload, content_type="application/json", timeout=None):
req_headers = dict(POST_HEADERS)
@@ -129,24 +140,28 @@ class OcapiUtils:
else:
request_data = payload
try:
resp = open_url(uri, data=request_data,
headers=req_headers, method="POST",
url_username=username, url_password=password,
force_basic_auth=basic_auth, validate_certs=False,
follow_redirects='all',
use_proxy=True, timeout=self.timeout if timeout is None else timeout)
resp = open_url(
uri,
data=request_data,
headers=req_headers,
method="POST",
url_username=username,
url_password=password,
force_basic_auth=basic_auth,
validate_certs=False,
follow_redirects="all",
use_proxy=True,
timeout=self.timeout if timeout is None else timeout,
)
headers = {k.lower(): v for (k, v) in resp.info().items()}
except HTTPError as e:
return {'ret': False,
'msg': f"HTTP Error {e.code} on POST request to '{uri}'",
'status': e.code}
return {"ret": False, "msg": f"HTTP Error {e.code} on POST request to '{uri}'", "status": e.code}
except URLError as e:
return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"}
return {"ret": False, "msg": f"URL Error on POST request to '{uri}': '{e.reason}'"}
# Almost all errors should be caught above, but just in case
except Exception as e:
return {'ret': False,
'msg': f"Failed POST request to '{uri}': '{e}'"}
return {'ret': True, 'headers': headers, 'resp': resp}
return {"ret": False, "msg": f"Failed POST request to '{uri}': '{e}'"}
return {"ret": True, "headers": headers, "resp": resp}
def get_uri_with_slot_number_query_param(self, uri):
"""Return the URI with proxy slot number added as a query param, if there is one.
@@ -172,29 +187,25 @@ class OcapiUtils:
# Get the resource so that we have the Etag
response = self.get_request(resource_uri)
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
etag = response['headers']['etag']
if response['ret'] is False:
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
etag = response["headers"]["etag"]
if response["ret"] is False:
return response
# Issue the PUT to do the reboot (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
payload = {'Reboot': True}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
payload = {"Reboot": True}
response = self.put_request(resource_uri, payload, etag)
if response['ret'] is False:
if response["ret"] is False:
return response
elif command.startswith("PowerMode"):
return self.manage_power_mode(command)
else:
return {'ret': False, 'msg': f"Invalid command: {command}"}
return {"ret": False, "msg": f"Invalid command: {command}"}
return {'ret': True}
return {"ret": True}
def manage_chassis_indicator_led(self, command):
"""Process a command to manage the chassis indicator LED.
@@ -214,91 +225,73 @@ class OcapiUtils:
resource_uri = self.root_uri
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
payloads = {
'IndicatorLedOn': {
'ID': 2
},
'IndicatorLedOff': {
'ID': 4
}
}
payloads = {"IndicatorLedOn": {"ID": 2}, "IndicatorLedOff": {"ID": 4}}
response = self.get_request(resource_uri)
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
etag = response['headers']['etag']
if response['ret'] is False:
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
etag = response["headers"]["etag"]
if response["ret"] is False:
return response
data = response['data']
data = response["data"]
if key not in data:
return {'ret': False, 'msg': f"Key {key} not found"}
if 'ID' not in data[key]:
return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'}
return {"ret": False, "msg": f"Key {key} not found"}
if "ID" not in data[key]:
return {"ret": False, "msg": "IndicatorLED for resource has no ID."}
if command in payloads.keys():
# See if the LED is already set as requested.
current_led_status = data[key]['ID']
if current_led_status == payloads[command]['ID']:
return {'ret': True, 'changed': False}
current_led_status = data[key]["ID"]
if current_led_status == payloads[command]["ID"]:
return {"ret": True, "changed": False}
# Set the LED (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
payload = {'IndicatorLED': payloads[command]}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
payload = {"IndicatorLED": payloads[command]}
response = self.put_request(resource_uri, payload, etag)
if response['ret'] is False:
if response["ret"] is False:
return response
else:
return {'ret': False, 'msg': 'Invalid command'}
return {"ret": False, "msg": "Invalid command"}
return {'ret': True}
return {"ret": True}
def manage_power_mode(self, command):
key = "PowerState"
resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri)
payloads = {
"PowerModeNormal": 2,
"PowerModeLow": 4
}
payloads = {"PowerModeNormal": 2, "PowerModeLow": 4}
response = self.get_request(resource_uri)
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
etag = response['headers']['etag']
if response['ret'] is False:
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
etag = response["headers"]["etag"]
if response["ret"] is False:
return response
data = response['data']
data = response["data"]
if key not in data:
return {'ret': False, 'msg': f"Key {key} not found"}
if 'ID' not in data[key]:
return {'ret': False, 'msg': 'PowerState for resource has no ID.'}
return {"ret": False, "msg": f"Key {key} not found"}
if "ID" not in data[key]:
return {"ret": False, "msg": "PowerState for resource has no ID."}
if command in payloads.keys():
# See if the PowerState is already set as requested.
current_power_state = data[key]['ID']
current_power_state = data[key]["ID"]
if current_power_state == payloads[command]:
return {'ret': True, 'changed': False}
return {"ret": True, "changed": False}
# Set the Power State (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
payload = {'PowerState': {"ID": payloads[command]}}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
payload = {"PowerState": {"ID": payloads[command]}}
response = self.put_request(resource_uri, payload, etag)
if response['ret'] is False:
if response["ret"] is False:
return response
else:
return {'ret': False, 'msg': f"Invalid command: {command}"}
return {"ret": False, "msg": f"Invalid command: {command}"}
return {'ret': True}
return {"ret": True}
def prepare_multipart_firmware_upload(self, filename):
"""Prepare a multipart/form-data body for OCAPI firmware upload.
@@ -315,13 +308,12 @@ class OcapiUtils:
boundary = str(uuid.uuid4()) # Generate a random boundary
body = f"--{boundary}\r\n"
body += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{to_native(os.path.basename(filename))}"\r\n'
body += 'Content-Type: application/octet-stream\r\n\r\n'
body_bytes = bytearray(body, 'utf-8')
with open(filename, 'rb') as f:
body += "Content-Type: application/octet-stream\r\n\r\n"
body_bytes = bytearray(body, "utf-8")
with open(filename, "rb") as f:
body_bytes += f.read()
body_bytes += bytearray(f"\r\n--{boundary}--", 'utf-8')
return (f"multipart/form-data; boundary={boundary}",
body_bytes)
body_bytes += bytearray(f"\r\n--{boundary}--", "utf-8")
return (f"multipart/form-data; boundary={boundary}", body_bytes)
def upload_firmware_image(self, update_image_path):
"""Perform Firmware Upload to the OCAPI storage device.
@@ -329,22 +321,18 @@ class OcapiUtils:
:param str update_image_path: The path/filename of the firmware image, on the local filesystem.
"""
if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)):
return {'ret': False, 'msg': 'File does not exist.'}
return {"ret": False, "msg": "File does not exist."}
url = f"{self.root_uri}OperatingSystem"
url = self.get_uri_with_slot_number_query_param(url)
content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path)
# Post the firmware (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
result = self.post_request(url, b_form_data, content_type=content_type, timeout=300)
if result['ret'] is False:
if result["ret"] is False:
return result
return {'ret': True}
return {"ret": True}
def update_firmware_image(self):
"""Perform a Firmware Update on the OCAPI storage device."""
@@ -352,25 +340,21 @@ class OcapiUtils:
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
# We have to do a GET to obtain the Etag. It's required on the PUT.
response = self.get_request(resource_uri)
if response['ret'] is False:
if response["ret"] is False:
return response
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
etag = response['headers']['etag']
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
etag = response["headers"]["etag"]
# Issue the PUT (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
payload = {'FirmwareUpdate': True}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
payload = {"FirmwareUpdate": True}
response = self.put_request(resource_uri, payload, etag)
if response['ret'] is False:
if response["ret"] is False:
return response
return {'ret': True, 'jobUri': response["headers"]["location"]}
return {"ret": True, "jobUri": response["headers"]["location"]}
def activate_firmware_image(self):
"""Perform a Firmware Activate on the OCAPI storage device."""
@@ -378,25 +362,21 @@ class OcapiUtils:
resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
# We have to do a GET to obtain the Etag. It's required on the PUT.
response = self.get_request(resource_uri)
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
etag = response['headers']['etag']
if response['ret'] is False:
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
etag = response["headers"]["etag"]
if response["ret"] is False:
return response
# Issue the PUT (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
payload = {'FirmwareActivate': True}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
payload = {"FirmwareActivate": True}
response = self.put_request(resource_uri, payload, etag)
if response['ret'] is False:
if response["ret"] is False:
return response
return {'ret': True, 'jobUri': response["headers"]["location"]}
return {"ret": True, "jobUri": response["headers"]["location"]}
def get_job_status(self, job_uri):
"""Get the status of a job.
@@ -405,8 +385,8 @@ class OcapiUtils:
"""
job_uri = self.get_uri_with_slot_number_query_param(job_uri)
response = self.get_request(job_uri)
if response['ret'] is False:
if response.get('status') == 404:
if response["ret"] is False:
if response.get("status") == 404:
# Job not found -- assume 0%
return {
"ret": True,
@@ -416,7 +396,7 @@ class OcapiUtils:
"operationHealth": None,
"operationHealthId": None,
"details": "Job does not exist.",
"jobExists": False
"jobExists": False,
}
else:
return response
@@ -432,7 +412,7 @@ class OcapiUtils:
"operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None,
"operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None,
"details": details,
"jobExists": True
"jobExists": True,
}
return return_value
@@ -442,50 +422,28 @@ class OcapiUtils:
# We have to do a GET to obtain the Etag. It's required on the DELETE.
response = self.get_request(job_uri)
if response['ret'] is True:
if 'etag' not in response['headers']:
return {'ret': False, 'msg': 'Etag not found in response.'}
if response["ret"] is True:
if "etag" not in response["headers"]:
return {"ret": False, "msg": "Etag not found in response."}
else:
etag = response['headers']['etag']
etag = response["headers"]["etag"]
if response['data']['PercentComplete'] != 100:
return {
'ret': False,
'changed': False,
'msg': 'Cannot delete job because it is in progress.'
}
if response["data"]["PercentComplete"] != 100:
return {"ret": False, "changed": False, "msg": "Cannot delete job because it is in progress."}
if response['ret'] is False:
if response['status'] == 404:
return {
'ret': True,
'changed': False,
'msg': 'Job already deleted.'
}
if response["ret"] is False:
if response["status"] == 404:
return {"ret": True, "changed": False, "msg": "Job already deleted."}
return response
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
# Do the DELETE (unless we are in check mode)
response = self.delete_request(job_uri, etag)
if response['ret'] is False:
if response['status'] == 404:
return {
'ret': True,
'changed': False
}
elif response['status'] == 409:
return {
'ret': False,
'changed': False,
'msg': 'Cannot delete job because it is in progress.'
}
if response["ret"] is False:
if response["status"] == 404:
return {"ret": True, "changed": False}
elif response["status"] == 409:
return {"ret": False, "changed": False, "msg": "Cannot delete job because it is in progress."}
return response
return {
'ret': True,
'changed': True
}
return {"ret": True, "changed": True}

View File

@@ -14,28 +14,28 @@ import time
class OneAndOneResources:
firewall_policy = 'firewall_policy'
load_balancer = 'load_balancer'
monitoring_policy = 'monitoring_policy'
private_network = 'private_network'
public_ip = 'public_ip'
role = 'role'
server = 'server'
user = 'user'
vpn = 'vpn'
firewall_policy = "firewall_policy"
load_balancer = "load_balancer"
monitoring_policy = "monitoring_policy"
private_network = "private_network"
public_ip = "public_ip"
role = "role"
server = "server"
user = "user"
vpn = "vpn"
def get_resource(oneandone_conn, resource_type, resource_id):
switcher = {
'firewall_policy': oneandone_conn.get_firewall,
'load_balancer': oneandone_conn.get_load_balancer,
'monitoring_policy': oneandone_conn.get_monitoring_policy,
'private_network': oneandone_conn.get_private_network,
'public_ip': oneandone_conn.get_public_ip,
'role': oneandone_conn.get_role,
'server': oneandone_conn.get_server,
'user': oneandone_conn.get_user,
'vpn': oneandone_conn.get_vpn,
"firewall_policy": oneandone_conn.get_firewall,
"load_balancer": oneandone_conn.get_load_balancer,
"monitoring_policy": oneandone_conn.get_monitoring_policy,
"private_network": oneandone_conn.get_private_network,
"public_ip": oneandone_conn.get_public_ip,
"role": oneandone_conn.get_role,
"server": oneandone_conn.get_server,
"user": oneandone_conn.get_user,
"vpn": oneandone_conn.get_vpn,
}
return switcher.get(resource_type, None)(resource_id)
@@ -47,10 +47,10 @@ def get_datacenter(oneandone_conn, datacenter, full_object=False):
Returns the datacenter ID.
"""
for _datacenter in oneandone_conn.list_datacenters():
if datacenter in (_datacenter['id'], _datacenter['country_code']):
if datacenter in (_datacenter["id"], _datacenter["country_code"]):
if full_object:
return _datacenter
return _datacenter['id']
return _datacenter["id"]
def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
@@ -59,11 +59,10 @@ def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=Fal
Return the instance size ID.
"""
for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
if fixed_instance_size in (_fixed_instance_size['id'],
_fixed_instance_size['name']):
if fixed_instance_size in (_fixed_instance_size["id"], _fixed_instance_size["name"]):
if full_object:
return _fixed_instance_size
return _fixed_instance_size['id']
return _fixed_instance_size["id"]
def get_appliance(oneandone_conn, appliance, full_object=False):
@@ -71,11 +70,11 @@ def get_appliance(oneandone_conn, appliance, full_object=False):
Validates the appliance exists by ID or name.
Return the appliance ID.
"""
for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
if appliance in (_appliance['id'], _appliance['name']):
for _appliance in oneandone_conn.list_appliances(q="IMAGE"):
if appliance in (_appliance["id"], _appliance["name"]):
if full_object:
return _appliance
return _appliance['id']
return _appliance["id"]
def get_private_network(oneandone_conn, private_network, full_object=False):
@@ -84,11 +83,10 @@ def get_private_network(oneandone_conn, private_network, full_object=False):
Return the private network ID.
"""
for _private_network in oneandone_conn.list_private_networks():
if private_network in (_private_network['name'],
_private_network['id']):
if private_network in (_private_network["name"], _private_network["id"]):
if full_object:
return _private_network
return _private_network['id']
return _private_network["id"]
def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
@@ -97,11 +95,10 @@ def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
Return the monitoring policy ID.
"""
for _monitoring_policy in oneandone_conn.list_monitoring_policies():
if monitoring_policy in (_monitoring_policy['name'],
_monitoring_policy['id']):
if monitoring_policy in (_monitoring_policy["name"], _monitoring_policy["id"]):
if full_object:
return _monitoring_policy
return _monitoring_policy['id']
return _monitoring_policy["id"]
def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
@@ -110,11 +107,10 @@ def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
Return the firewall policy ID.
"""
for _firewall_policy in oneandone_conn.list_firewall_policies():
if firewall_policy in (_firewall_policy['name'],
_firewall_policy['id']):
if firewall_policy in (_firewall_policy["name"], _firewall_policy["id"]):
if full_object:
return _firewall_policy
return _firewall_policy['id']
return _firewall_policy["id"]
def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
@@ -123,11 +119,10 @@ def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
Return the load balancer ID.
"""
for _load_balancer in oneandone_conn.list_load_balancers():
if load_balancer in (_load_balancer['name'],
_load_balancer['id']):
if load_balancer in (_load_balancer["name"], _load_balancer["id"]):
if full_object:
return _load_balancer
return _load_balancer['id']
return _load_balancer["id"]
def get_server(oneandone_conn, instance, full_object=False):
@@ -136,10 +131,10 @@ def get_server(oneandone_conn, instance, full_object=False):
Returns the server if one was found.
"""
for server in oneandone_conn.list_servers(per_page=1000):
if instance in (server['id'], server['name']):
if instance in (server["id"], server["name"]):
if full_object:
return server
return server['id']
return server["id"]
def get_user(oneandone_conn, user, full_object=False):
@@ -148,10 +143,10 @@ def get_user(oneandone_conn, user, full_object=False):
Returns the user if one was found.
"""
for _user in oneandone_conn.list_users(per_page=1000):
if user in (_user['id'], _user['name']):
if user in (_user["id"], _user["name"]):
if full_object:
return _user
return _user['id']
return _user["id"]
def get_role(oneandone_conn, role, full_object=False):
@@ -161,10 +156,10 @@ def get_role(oneandone_conn, role, full_object=False):
Returns the role if one was found, else None.
"""
for _role in oneandone_conn.list_roles(per_page=1000):
if role in (_role['id'], _role['name']):
if role in (_role["id"], _role["name"]):
if full_object:
return _role
return _role['id']
return _role["id"]
def get_vpn(oneandone_conn, vpn, full_object=False):
@@ -173,10 +168,10 @@ def get_vpn(oneandone_conn, vpn, full_object=False):
Returns the vpn if one was found.
"""
for _vpn in oneandone_conn.list_vpns(per_page=1000):
if vpn in (_vpn['id'], _vpn['name']):
if vpn in (_vpn["id"], _vpn["name"]):
if full_object:
return _vpn
return _vpn['id']
return _vpn["id"]
def get_public_ip(oneandone_conn, public_ip, full_object=False):
@@ -185,17 +180,13 @@ def get_public_ip(oneandone_conn, public_ip, full_object=False):
Returns the public ip if one was found.
"""
for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
if public_ip in (_public_ip['id'], _public_ip['ip']):
if public_ip in (_public_ip["id"], _public_ip["ip"]):
if full_object:
return _public_ip
return _public_ip['id']
return _public_ip["id"]
def wait_for_resource_creation_completion(oneandone_conn,
resource_type,
resource_id,
wait_timeout,
wait_interval):
def wait_for_resource_creation_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval):
"""
Waits for the resource create operation to complete based on the timeout period.
"""
@@ -207,33 +198,25 @@ def wait_for_resource_creation_completion(oneandone_conn,
resource = get_resource(oneandone_conn, resource_type, resource_id)
if resource_type == OneAndOneResources.server:
resource_state = resource['status']['state']
resource_state = resource["status"]["state"]
else:
resource_state = resource['state']
resource_state = resource["state"]
if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
(resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
if (resource_type == OneAndOneResources.server and resource_state.lower() == "powered_on") or (
resource_type != OneAndOneResources.server and resource_state.lower() == "active"
):
return
elif resource_state.lower() == 'failed':
raise Exception(f'{resource_type} creation failed for {resource_id}')
elif resource_state.lower() in ('active',
'enabled',
'deploying',
'configuring'):
elif resource_state.lower() == "failed":
raise Exception(f"{resource_type} creation failed for {resource_id}")
elif resource_state.lower() in ("active", "enabled", "deploying", "configuring"):
continue
else:
raise Exception(
f'Unknown {resource_type} state {resource_state}')
raise Exception(f"Unknown {resource_type} state {resource_state}")
raise Exception(
f'Timed out waiting for {resource_type} completion for {resource_id}')
raise Exception(f"Timed out waiting for {resource_type} completion for {resource_id}")
def wait_for_resource_deletion_completion(oneandone_conn,
resource_type,
resource_id,
wait_timeout,
wait_interval):
def wait_for_resource_deletion_completion(oneandone_conn, resource_type, resource_id, wait_timeout, wait_interval):
"""
Waits for the resource delete operation to complete based on the timeout period.
"""
@@ -242,23 +225,21 @@ def wait_for_resource_deletion_completion(oneandone_conn,
time.sleep(wait_interval)
# Refresh the operation info
logs = oneandone_conn.list_logs(q='DELETE',
period='LAST_HOUR',
sort='-start_date')
logs = oneandone_conn.list_logs(q="DELETE", period="LAST_HOUR", sort="-start_date")
if resource_type == OneAndOneResources.server:
_type = 'VM'
_type = "VM"
elif resource_type == OneAndOneResources.private_network:
_type = 'PRIVATENETWORK'
_type = "PRIVATENETWORK"
else:
raise Exception(
f'Unsupported wait_for delete operation for {resource_type} resource')
raise Exception(f"Unsupported wait_for delete operation for {resource_type} resource")
for log in logs:
if (log['resource']['id'] == resource_id and
log['action'] == 'DELETE' and
log['type'] == _type and
log['status']['state'] == 'OK'):
if (
log["resource"]["id"] == resource_id
and log["action"] == "DELETE"
and log["type"] == _type
and log["status"]["state"] == "OK"
):
return
raise Exception(
f'Timed out waiting for {resource_type} deletion for {resource_id}')
raise Exception(f"Timed out waiting for {resource_type} deletion for {resource_id}")

View File

@@ -20,6 +20,7 @@ from collections.abc import Mapping
HPE_ONEVIEW_IMP_ERR = None
try:
from hpOneView.oneview_client import OneViewClient
HAS_HPE_ONEVIEW = True
except ImportError:
HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
@@ -46,7 +47,7 @@ def transform_list_to_dict(list_):
if isinstance(value, Mapping):
ret.update(value)
else:
ret[to_native(value, errors='surrogate_or_strict')] = True
ret[to_native(value, errors="surrogate_or_strict")] = True
return ret
@@ -121,7 +122,7 @@ class OneViewModuleException(Exception):
Attributes:
msg (str): Exception message.
oneview_response (dict): OneView rest response.
"""
"""
def __init__(self, data):
self.msg = None
@@ -133,7 +134,7 @@ class OneViewModuleException(Exception):
self.oneview_response = data
if data and isinstance(data, dict):
self.msg = data.get('message')
self.msg = data.get("message")
if self.oneview_response:
Exception.__init__(self, self.msg, self.oneview_response)
@@ -163,6 +164,7 @@ class OneViewModuleValueError(OneViewModuleException):
Attributes:
msg (str): Exception message.
"""
pass
@@ -174,27 +176,28 @@ class OneViewModuleResourceNotFound(OneViewModuleException):
Attributes:
msg (str): Exception message.
"""
pass
class OneViewModuleBase(metaclass=abc.ABCMeta):
MSG_CREATED = 'Resource created successfully.'
MSG_UPDATED = 'Resource updated successfully.'
MSG_DELETED = 'Resource deleted successfully.'
MSG_ALREADY_PRESENT = 'Resource is already present.'
MSG_ALREADY_ABSENT = 'Resource is already absent.'
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
MSG_CREATED = "Resource created successfully."
MSG_UPDATED = "Resource updated successfully."
MSG_DELETED = "Resource deleted successfully."
MSG_ALREADY_PRESENT = "Resource is already present."
MSG_ALREADY_ABSENT = "Resource is already absent."
MSG_DIFF_AT_KEY = "Difference found at key '{0}'. "
ONEVIEW_COMMON_ARGS = dict(
config=dict(type='path'),
hostname=dict(type='str'),
username=dict(type='str'),
password=dict(type='str', no_log=True),
api_version=dict(type='int'),
image_streamer_hostname=dict(type='str')
config=dict(type="path"),
hostname=dict(type="str"),
username=dict(type="str"),
password=dict(type="str", no_log=True),
api_version=dict(type="int"),
image_streamer_hostname=dict(type="str"),
)
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type="bool", default=True))
resource_client = None
@@ -212,19 +215,18 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
self._check_hpe_oneview_sdk()
self._create_oneview_client()
self.state = self.module.params.get('state')
self.data = self.module.params.get('data')
self.state = self.module.params.get("state")
self.data = self.module.params.get("data")
# Preload params for get_all - used by facts
self.facts_params = self.module.params.get('params') or {}
self.facts_params = self.module.params.get("params") or {}
# Preload options as dict - used by facts
self.options = transform_list_to_dict(self.module.params.get('options'))
self.options = transform_list_to_dict(self.module.params.get("options"))
self.validate_etag_support = validate_etag_support
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
merged_arg_spec = dict()
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
@@ -238,19 +240,21 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
def _check_hpe_oneview_sdk(self):
if not HAS_HPE_ONEVIEW:
self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
self.module.fail_json(msg=missing_required_lib("hpOneView"), exception=HPE_ONEVIEW_IMP_ERR)
def _create_oneview_client(self):
if self.module.params.get('hostname'):
config = dict(ip=self.module.params['hostname'],
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
api_version=self.module.params['api_version'],
image_streamer_ip=self.module.params['image_streamer_hostname'])
if self.module.params.get("hostname"):
config = dict(
ip=self.module.params["hostname"],
credentials=dict(userName=self.module.params["username"], password=self.module.params["password"]),
api_version=self.module.params["api_version"],
image_streamer_ip=self.module.params["image_streamer_hostname"],
)
self.oneview_client = OneViewClient(config)
elif not self.module.params['config']:
elif not self.module.params["config"]:
self.oneview_client = OneViewClient.from_environment_variables()
else:
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
self.oneview_client = OneViewClient.from_json_file(self.module.params["config"])
@abc.abstractmethod
def execute_module(self):
@@ -275,21 +279,21 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
"""
try:
if self.validate_etag_support:
if not self.module.params.get('validate_etag'):
if not self.module.params.get("validate_etag"):
self.oneview_client.connection.disable_etag_validation()
result = self.execute_module()
if "changed" not in result:
result['changed'] = False
result["changed"] = False
self.module.exit_json(**result)
except OneViewModuleException as exception:
error_msg = '; '.join(to_native(e) for e in exception.args)
error_msg = "; ".join(to_native(e) for e in exception.args)
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
def resource_absent(self, resource, method='delete'):
def resource_absent(self, resource, method="delete"):
"""
Generic implementation of the absent state for the OneView resources.
@@ -315,10 +319,10 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
:return: The resource found or None.
"""
result = self.resource_client.get_by('name', name)
result = self.resource_client.get_by("name", name)
return result[0] if result else None
def resource_present(self, resource, fact_name, create_method='create'):
def resource_present(self, resource, fact_name, create_method="create"):
"""
Generic implementation of the present state for the OneView resources.
@@ -351,11 +355,7 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
changed = True
msg = self.MSG_UPDATED
return dict(
msg=msg,
changed=changed,
ansible_facts={fact_name: resource}
)
return dict(msg=msg, changed=changed, ansible_facts={fact_name: resource})
def resource_scopes_set(self, state, fact_name, scope_uris):
"""
@@ -370,13 +370,13 @@ class OneViewModuleBase(metaclass=abc.ABCMeta):
"""
if scope_uris is None:
scope_uris = []
resource = state['ansible_facts'][fact_name]
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
resource = state["ansible_facts"][fact_name]
operation_data = dict(operation="replace", path="/scopeUris", value=scope_uris)
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
state['changed'] = True
state['msg'] = self.MSG_UPDATED
if resource["scopeUris"] is None or set(resource["scopeUris"]) != set(scope_uris):
state["ansible_facts"][fact_name] = self.resource_client.patch(resource["uri"], **operation_data)
state["changed"] = True
state["msg"] = self.MSG_UPDATED
return state

View File

@@ -13,22 +13,26 @@ from ansible.module_utils.urls import fetch_url
def online_argument_spec():
return dict(
api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
no_log=True, aliases=['oauth_token']),
api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
api_timeout=dict(type='int', default=30, aliases=['timeout']),
validate_certs=dict(default=True, type='bool'),
api_token=dict(
required=True,
fallback=(env_fallback, ["ONLINE_TOKEN", "ONLINE_API_KEY", "ONLINE_OAUTH_TOKEN", "ONLINE_API_TOKEN"]),
no_log=True,
aliases=["oauth_token"],
),
api_url=dict(
fallback=(env_fallback, ["ONLINE_API_URL"]), default="https://api.online.net", aliases=["base_url"]
),
api_timeout=dict(type="int", default=30, aliases=["timeout"]),
validate_certs=dict(default=True, type="bool"),
)
class OnlineException(Exception):
def __init__(self, message):
self.message = message
class Response:
def __init__(self, resp, info):
self.body = None
if resp:
@@ -56,26 +60,26 @@ class Response:
class Online:
def __init__(self, module):
self.module = module
self.headers = {
'Authorization': f"Bearer {self.module.params.get('api_token')}",
'User-Agent': self.get_user_agent_string(module),
'Content-type': 'application/json',
"Authorization": f"Bearer {self.module.params.get('api_token')}",
"User-Agent": self.get_user_agent_string(module),
"Content-type": "application/json",
}
self.name = None
def get_resources(self):
results = self.get(f'/{self.name}')
results = self.get(f"/{self.name}")
if not results.ok:
raise OnlineException(
f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]")
f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]"
)
return results.json
def _url_builder(self, path):
if path[0] == '/':
if path[0] == "/":
path = path[1:]
return f"{self.module.params.get('api_url')}/{path}"
@@ -87,13 +91,17 @@ class Online:
self.headers.update(headers)
resp, info = fetch_url(
self.module, url, data=data, headers=self.headers, method=method,
timeout=self.module.params.get('api_timeout')
self.module,
url,
data=data,
headers=self.headers,
method=method,
timeout=self.module.params.get("api_timeout"),
)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
if info["status"] == -1:
self.module.fail_json(msg=info["msg"])
return Response(resp, info)
@@ -102,16 +110,16 @@ class Online:
return f"ansible {module.ansible_version} Python {sys.version.split(' ', 1)[0]}"
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
return self.send("GET", path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
return self.send("PUT", path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
return self.send("POST", path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
return self.send("DELETE", path, data, headers)
def patch(self, path, data=None, headers=None):
return self.send("PATCH", path, data, headers)

View File

@@ -13,7 +13,19 @@ from os import environ
from ansible.module_utils.basic import AnsibleModule
IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
IMAGE_STATES = [
"INIT",
"READY",
"USED",
"DISABLED",
"LOCKED",
"ERROR",
"CLONE",
"DELETE",
"USED_PERS",
"LOCKED_USED",
"LOCKED_USED_PERS",
]
HAS_PYONE = True
try:
@@ -29,8 +41,10 @@ except ImportError:
# There are either lists of dictionaries (length > 1) or just dictionaries.
def flatten(to_flatten, extract=False):
"""Flattens nested lists (with optional value extraction)."""
def recurse(to_flatten):
return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten]
value = recurse(to_flatten)
if extract and len(value) == 1:
return value[0]
@@ -41,6 +55,7 @@ def flatten(to_flatten, extract=False):
# It renders JSON-like template representation into OpenNebula's template syntax (string).
def render(to_render):
"""Converts dictionary to OpenNebula template."""
def recurse(to_render):
for key, value in sorted(to_render.items()):
if value is None:
@@ -53,11 +68,12 @@ def render(to_render):
yield f"{key}=[{','.join(recurse(item))}]"
continue
if isinstance(value, str):
_value = value.replace('\\', '\\\\').replace('"', '\\"')
_value = value.replace("\\", "\\\\").replace('"', '\\"')
yield f'{key}="{_value}"'
continue
yield f'{key}="{value}"'
return '\n'.join(recurse(to_render))
return "\n".join(recurse(to_render))
class OpenNebulaModule:
@@ -68,26 +84,27 @@ class OpenNebulaModule:
"""
common_args = dict(
api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
validate_certs=dict(default=True, type='bool'),
wait_timeout=dict(type='int', default=300),
api_url=dict(type="str", aliases=["api_endpoint"], default=environ.get("ONE_URL")),
api_username=dict(type="str", default=environ.get("ONE_USERNAME")),
api_password=dict(type="str", no_log=True, aliases=["api_token"], default=environ.get("ONE_PASSWORD")),
validate_certs=dict(default=True, type="bool"),
wait_timeout=dict(type="int", default=300),
)
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
def __init__(
self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None
):
module_args = OpenNebulaModule.common_args.copy()
module_args.update(argument_spec)
self.module = AnsibleModule(argument_spec=module_args,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
required_if=required_if)
self.result = dict(changed=False,
original_message='',
message='')
self.module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
required_if=required_if,
)
self.result = dict(changed=False, original_message="", message="")
self.one = self.create_one_client()
self.resolved_parameters = self.resolve_parameters()
@@ -101,7 +118,7 @@ class OpenNebulaModule:
"""
# context required for not validating SSL, old python versions won't validate anyway.
if hasattr(ssl, '_create_unverified_context'):
if hasattr(ssl, "_create_unverified_context"):
no_ssl_validation_context = ssl._create_unverified_context()
else:
no_ssl_validation_context = None
@@ -144,7 +161,7 @@ class OpenNebulaModule:
Args:
msg: human readable failure reason.
"""
if hasattr(self, 'one'):
if hasattr(self, "one"):
self.close_one_client()
self.module.fail_json(msg=msg)
@@ -153,7 +170,7 @@ class OpenNebulaModule:
Utility exit method, will ensure pyone is properly closed before exiting.
"""
if hasattr(self, 'one'):
if hasattr(self, "one"):
self.close_one_client()
self.module.exit_json(**self.result)
@@ -169,11 +186,11 @@ class OpenNebulaModule:
resolved_params = dict(self.module.params)
if 'cluster_name' in self.module.params:
if "cluster_name" in self.module.params:
clusters = self.one.clusterpool.info()
for cluster in clusters.CLUSTER:
if cluster.NAME == self.module.params.get('cluster_name'):
resolved_params['cluster_id'] = cluster.ID
if cluster.NAME == self.module.params.get("cluster_name"):
resolved_params["cluster_id"] = cluster.ID
return resolved_params
@@ -196,14 +213,14 @@ class OpenNebulaModule:
return self.resolved_parameters.get(name)
def get_host_by_name(self, name):
'''
"""
Returns a host given its name.
Args:
name: the name of the host
Returns: the host object or None if the host is absent.
'''
"""
hosts = self.one.hostpool.info()
for h in hosts.HOST:
if h.NAME == name:
@@ -226,14 +243,14 @@ class OpenNebulaModule:
return None
def get_template_by_name(self, name):
'''
"""
Returns a template given its name.
Args:
name: the name of the template
Returns: the template object or None if the host is absent.
'''
"""
templates = self.one.templatepool.info()
for t in templates.TEMPLATE:
if t.NAME == name:
@@ -262,7 +279,7 @@ class OpenNebulaModule:
if isinstance(value, dict):
self.cast_template(template[key])
elif isinstance(value, list):
template[key] = ', '.join(value)
template[key] = ", ".join(value)
elif not isinstance(value, str):
template[key] = str(value)
@@ -290,9 +307,16 @@ class OpenNebulaModule:
return True
return not (desired == intersection)
def wait_for_state(self, element_name, state, state_name, target_states,
invalid_states=None, transition_states=None,
wait_timeout=None):
def wait_for_state(
self,
element_name,
state,
state_name,
target_states,
invalid_states=None,
transition_states=None,
wait_timeout=None,
):
"""
Args:
element_name: the name of the object we are waiting for: HOST, VM, etc.
@@ -313,11 +337,11 @@ class OpenNebulaModule:
current_state = state()
if current_state in invalid_states:
self.fail(f'invalid {element_name} state {state_name(current_state)}')
self.fail(f"invalid {element_name} state {state_name(current_state)}")
if transition_states:
if current_state not in transition_states:
self.fail(f'invalid {element_name} transition state {state_name(current_state)}')
self.fail(f"invalid {element_name} transition state {state_name(current_state)}")
if current_state in target_states:
return True
@@ -353,17 +377,17 @@ class OpenNebulaModule:
"""
list_of_id = []
if element == 'VMS':
if element == "VMS":
image_list = image.VMS
if element == 'CLONES':
if element == "CLONES":
image_list = image.CLONES
if element == 'APP_CLONES':
if element == "APP_CLONES":
image_list = image.APP_CLONES
for iter in image_list.ID:
list_of_id.append(
# These are optional so firstly check for presence
getattr(iter, 'ID', 'Null'),
getattr(iter, "ID", "Null"),
)
return list_of_id
@@ -374,16 +398,18 @@ class OpenNebulaModule:
list_of_snapshots = []
for iter in image.SNAPSHOTS.SNAPSHOT:
list_of_snapshots.append({
'date': iter['DATE'],
'parent': iter['PARENT'],
'size': iter['SIZE'],
# These are optional so firstly check for presence
'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'),
'children': getattr(iter, 'CHILDREN', 'Null'),
'active': getattr(iter, 'ACTIVE', 'Null'),
'name': getattr(iter, 'NAME', 'Null'),
})
list_of_snapshots.append(
{
"date": iter["DATE"],
"parent": iter["PARENT"],
"size": iter["SIZE"],
# These are optional so firstly check for presence
"allow_orhans": getattr(image.SNAPSHOTS, "ALLOW_ORPHANS", "Null"),
"children": getattr(iter, "CHILDREN", "Null"),
"active": getattr(iter, "ACTIVE", "Null"),
"name": getattr(iter, "NAME", "Null"),
}
)
return list_of_snapshots
def get_image_info(self, image):
@@ -393,43 +419,43 @@ class OpenNebulaModule:
Returns: a copy of the parameters that includes the resolved parameters.
"""
info = {
'id': image.ID,
'name': image.NAME,
'state': IMAGE_STATES[image.STATE],
'running_vms': image.RUNNING_VMS,
'used': bool(image.RUNNING_VMS),
'user_name': image.UNAME,
'user_id': image.UID,
'group_name': image.GNAME,
'group_id': image.GID,
'permissions': {
'owner_u': image.PERMISSIONS.OWNER_U,
'owner_m': image.PERMISSIONS.OWNER_M,
'owner_a': image.PERMISSIONS.OWNER_A,
'group_u': image.PERMISSIONS.GROUP_U,
'group_m': image.PERMISSIONS.GROUP_M,
'group_a': image.PERMISSIONS.GROUP_A,
'other_u': image.PERMISSIONS.OTHER_U,
'other_m': image.PERMISSIONS.OTHER_M,
'other_a': image.PERMISSIONS.OTHER_A
"id": image.ID,
"name": image.NAME,
"state": IMAGE_STATES[image.STATE],
"running_vms": image.RUNNING_VMS,
"used": bool(image.RUNNING_VMS),
"user_name": image.UNAME,
"user_id": image.UID,
"group_name": image.GNAME,
"group_id": image.GID,
"permissions": {
"owner_u": image.PERMISSIONS.OWNER_U,
"owner_m": image.PERMISSIONS.OWNER_M,
"owner_a": image.PERMISSIONS.OWNER_A,
"group_u": image.PERMISSIONS.GROUP_U,
"group_m": image.PERMISSIONS.GROUP_M,
"group_a": image.PERMISSIONS.GROUP_A,
"other_u": image.PERMISSIONS.OTHER_U,
"other_m": image.PERMISSIONS.OTHER_M,
"other_a": image.PERMISSIONS.OTHER_A,
},
'type': image.TYPE,
'disk_type': image.DISK_TYPE,
'persistent': image.PERSISTENT,
'regtime': image.REGTIME,
'source': image.SOURCE,
'path': image.PATH,
'fstype': getattr(image, 'FSTYPE', 'Null'),
'size': image.SIZE,
'cloning_ops': image.CLONING_OPS,
'cloning_id': image.CLONING_ID,
'target_snapshot': image.TARGET_SNAPSHOT,
'datastore_id': image.DATASTORE_ID,
'datastore': image.DATASTORE,
'vms': self.get_image_list_id(image, 'VMS'),
'clones': self.get_image_list_id(image, 'CLONES'),
'app_clones': self.get_image_list_id(image, 'APP_CLONES'),
'snapshots': self.get_image_snapshots_list(image),
'template': image.TEMPLATE,
"type": image.TYPE,
"disk_type": image.DISK_TYPE,
"persistent": image.PERSISTENT,
"regtime": image.REGTIME,
"source": image.SOURCE,
"path": image.PATH,
"fstype": getattr(image, "FSTYPE", "Null"),
"size": image.SIZE,
"cloning_ops": image.CLONING_OPS,
"cloning_id": image.CLONING_ID,
"target_snapshot": image.TARGET_SNAPSHOT,
"datastore_id": image.DATASTORE_ID,
"datastore": image.DATASTORE,
"vms": self.get_image_list_id(image, "VMS"),
"clones": self.get_image_list_id(image, "CLONES"),
"app_clones": self.get_image_list_id(image, "APP_CLONES"),
"snapshots": self.get_image_snapshots_list(image),
"template": image.TEMPLATE,
}
return info

View File

@@ -14,6 +14,7 @@ import logging
import logging.config
import os
import tempfile
# (TODO: remove next line!)
from datetime import datetime # noqa: F401, pylint: disable=unused-import
from operator import eq
@@ -117,9 +118,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False):
if supports_wait:
common_args.update(
wait=dict(type="bool", default=True),
wait_timeout=dict(
type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS
),
wait_timeout=dict(type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS),
wait_until=dict(type="str"),
)
@@ -156,9 +155,7 @@ def get_oci_config(module, service_client_class=None):
if not config_file:
if "OCI_CONFIG_FILE" in os.environ:
config_file = os.environ["OCI_CONFIG_FILE"]
_debug(
f"Config file through OCI_CONFIG_FILE environment variable - {config_file}"
)
_debug(f"Config file through OCI_CONFIG_FILE environment variable - {config_file}")
else:
config_file = "~/.oci/config"
_debug(f"Config file (fallback) - {config_file} ")
@@ -170,9 +167,7 @@ def get_oci_config(module, service_client_class=None):
else:
config_profile = "DEFAULT"
try:
config = oci.config.from_file(
file_location=config_file, profile_name=config_profile
)
config = oci.config.from_file(file_location=config_file, profile_name=config_profile)
except (
ConfigFileNotFound,
InvalidConfig,
@@ -183,9 +178,7 @@ def get_oci_config(module, service_client_class=None):
# When auth_type is not instance_principal, config file is required
module.fail_json(msg=str(ex))
else:
_debug(
f"Ignore {ex} as the auth_type is set to instance_principal"
)
_debug(f"Ignore {ex} as the auth_type is set to instance_principal")
# if instance_principal auth is used, an empty 'config' map is used below.
config["additional_user_agent"] = f"Oracle-Ansible/{__version__}"
@@ -234,24 +227,16 @@ def get_oci_config(module, service_client_class=None):
)
# Redirect calls to home region for IAM service.
do_not_redirect = module.params.get(
"do_not_redirect_to_home_region", False
) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION")
do_not_redirect = module.params.get("do_not_redirect_to_home_region", False) or os.environ.get(
"OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION"
)
if service_client_class == IdentityClient and not do_not_redirect:
_debug("Region passed for module invocation - {0} ".format(config["region"]))
identity_client = IdentityClient(config)
region_subscriptions = identity_client.list_region_subscriptions(
config["tenancy"]
).data
region_subscriptions = identity_client.list_region_subscriptions(config["tenancy"]).data
# Replace the region in the config with the home region.
[config["region"]] = [
rs.region_name for rs in region_subscriptions if rs.is_home_region is True
]
_debug(
"Setting region in the config to home region - {0} ".format(
config["region"]
)
)
[config["region"]] = [rs.region_name for rs in region_subscriptions if rs.is_home_region is True]
_debug("Setting region in the config to home region - {0} ".format(config["region"]))
return config
@@ -282,9 +267,7 @@ def create_service_client(module, service_client_class):
try:
oci.config.validate_config(config, **kwargs)
except oci.exceptions.InvalidConfig as ic:
module.fail_json(
msg=f"Invalid OCI configuration. Exception: {ic}"
)
module.fail_json(msg=f"Invalid OCI configuration. Exception: {ic}")
# Create service client class with the signer
client = service_client_class(config, **kwargs)
@@ -294,43 +277,31 @@ def create_service_client(module, service_client_class):
def _is_instance_principal_auth(module):
# check if auth type is overridden via module params
instance_principal_auth = (
"auth_type" in module.params
and module.params["auth_type"] == "instance_principal"
)
instance_principal_auth = "auth_type" in module.params and module.params["auth_type"] == "instance_principal"
if not instance_principal_auth:
instance_principal_auth = (
"OCI_ANSIBLE_AUTH_TYPE" in os.environ
and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
"OCI_ANSIBLE_AUTH_TYPE" in os.environ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
)
return instance_principal_auth
def _merge_auth_option(
config, module, module_option_name, env_var_name, config_attr_name
):
def _merge_auth_option(config, module, module_option_name, env_var_name, config_attr_name):
"""Merge the values for an authentication attribute from ansible module options and
environment variables with the values specified in a configuration file"""
_debug(f"Merging {module_option_name}")
auth_attribute = module.params.get(module_option_name)
_debug(
f"\t Ansible module option {module_option_name} = {auth_attribute}"
)
_debug(f"\t Ansible module option {module_option_name} = {auth_attribute}")
if not auth_attribute:
if env_var_name in os.environ:
auth_attribute = os.environ[env_var_name]
_debug(
f"\t Environment variable {env_var_name} = {auth_attribute}"
)
_debug(f"\t Environment variable {env_var_name} = {auth_attribute}")
# An authentication attribute has been provided through an env-variable or an ansible
# option and must override the corresponding attribute's value specified in the
# config file [profile].
if auth_attribute:
_debug(
f"Updating config attribute {config_attr_name} -> {auth_attribute} "
)
_debug(f"Updating config attribute {config_attr_name} -> {auth_attribute} ")
config.update({config_attr_name: auth_attribute})
@@ -425,9 +396,7 @@ def setup_logging(
return logging
def check_and_update_attributes(
target_instance, attr_name, input_value, existing_value, changed
):
def check_and_update_attributes(target_instance, attr_name, input_value, existing_value, changed):
"""
This function checks the difference between two resource attributes of literal types and sets the attribute
value in the target instance type holding the attribute.
@@ -460,7 +429,6 @@ def check_and_update_resource(
wait_applicable=True,
states=None,
):
"""
This function handles update operation on a resource. It checks whether update is required and accordingly returns
the resource and the changed status.
@@ -486,9 +454,7 @@ def check_and_update_resource(
"""
try:
result = dict(changed=False)
attributes_to_update, resource = get_attr_to_update(
get_fn, kwargs_get, module, update_attributes
)
attributes_to_update, resource = get_attr_to_update(get_fn, kwargs_get, module, update_attributes)
if attributes_to_update:
kwargs_update = get_kwargs_update(
@@ -501,9 +467,7 @@ def check_and_update_resource(
resource = call_with_backoff(update_fn, **kwargs_update).data
if wait_applicable:
if client is None:
module.fail_json(
msg="wait_applicable is True, but client is not specified."
)
module.fail_json(msg="wait_applicable is True, but client is not specified.")
resource = wait_for_resource_lifecycle_state(
client, module, True, kwargs_get, get_fn, None, resource, states
)
@@ -528,10 +492,7 @@ def get_kwargs_update(
update_object = param()
for key in update_object.attribute_map:
if key in attributes_to_update:
if (
sub_attributes_of_update_model
and key in sub_attributes_of_update_model
):
if sub_attributes_of_update_model and key in sub_attributes_of_update_model:
setattr(update_object, key, sub_attributes_of_update_model[key])
else:
setattr(update_object, key, module.params[key])
@@ -601,9 +562,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
unequal_list_attr = (
isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list)
) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
unequal_attr = not isinstance(resources_attr_value, list) and to_dict(
resources_attr_value
) != to_dict(user_provided_attr_value)
unequal_attr = not isinstance(resources_attr_value, list) and to_dict(resources_attr_value) != to_dict(
user_provided_attr_value
)
if unequal_list_attr or unequal_attr:
# only update if the user has explicitly provided a value for this attribute
# otherwise, no update is necessary because the user hasn't expressed a particular
@@ -621,9 +582,7 @@ def get_taggable_arg_spec(supports_create=False, supports_wait=False):
defined tags.
"""
tag_arg_spec = get_common_arg_spec(supports_create, supports_wait)
tag_arg_spec.update(
dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))
)
tag_arg_spec.update(dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict")))
return tag_arg_spec
@@ -723,15 +682,11 @@ def check_and_create_resource(
result = dict()
attributes_to_consider = _get_attributes_to_consider(
exclude_attributes, model, module
)
attributes_to_consider = _get_attributes_to_consider(exclude_attributes, model, module)
if "defined_tags" not in default_attribute_values:
default_attribute_values["defined_tags"] = {}
resource_matched = None
_debug(
f"Trying to find a match within {len(existing_resources)} existing resources"
)
_debug(f"Trying to find a match within {len(existing_resources)} existing resources")
for resource in existing_resources:
if _is_resource_active(resource, dead_states):
@@ -888,24 +843,17 @@ def does_existing_resource_match_user_inputs(
# If the user has not explicitly provided the value for attr and attr is in exclude_list, we can
# consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and
# that attribute is in the 'exclude_list' according to the module author(Not User), then exclude
if (
exclude_attributes.get(attr) is None
and resources_value_for_attr is not None
):
if exclude_attributes.get(attr) is None and resources_value_for_attr is not None:
if module.argument_spec.get(attr):
attribute_with_default_metadata = module.argument_spec.get(attr)
default_attribute_value = attribute_with_default_metadata.get(
"default", None
)
default_attribute_value = attribute_with_default_metadata.get("default", None)
if default_attribute_value is not None:
if existing_resource[attr] != default_attribute_value:
return False
# Check if attr has a value that is not default. For example, a custom `security_list_id`
# is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a
# value that is not the default, then it must be considered a mismatch and false returned.
elif not is_attr_assigned_default(
default_attribute_values, attr, existing_resource[attr]
):
elif not is_attr_assigned_default(default_attribute_values, attr, existing_resource[attr]):
return False
else:
@@ -999,24 +947,17 @@ def check_if_user_value_matches_resources_attr(
if isinstance(exclude_attributes.get(attribute_name), dict):
exclude_attributes = exclude_attributes.get(attribute_name)
if isinstance(resources_value_for_attr, list) or isinstance(
user_provided_value_for_attr, list
):
if isinstance(resources_value_for_attr, list) or isinstance(user_provided_value_for_attr, list):
# Perform a deep equivalence check for a List attribute
if exclude_attributes.get(attribute_name):
return
if (
user_provided_value_for_attr is None
and default_attribute_values.get(attribute_name) is not None
):
if user_provided_value_for_attr is None and default_attribute_values.get(attribute_name) is not None:
user_provided_value_for_attr = default_attribute_values.get(attribute_name)
if resources_value_for_attr is None and user_provided_value_for_attr is None:
return
if (
resources_value_for_attr is None or user_provided_value_for_attr is None
):
if resources_value_for_attr is None or user_provided_value_for_attr is None:
res[0] = False
return
@@ -1028,17 +969,10 @@ def check_if_user_value_matches_resources_attr(
res[0] = False
return
if (
user_provided_value_for_attr
and isinstance(user_provided_value_for_attr[0], dict)
):
if user_provided_value_for_attr and isinstance(user_provided_value_for_attr[0], dict):
# Process a list of dict
sorted_user_provided_value_for_attr = sort_list_of_dictionary(
user_provided_value_for_attr
)
sorted_resources_value_for_attr = sort_list_of_dictionary(
resources_value_for_attr
)
sorted_user_provided_value_for_attr = sort_list_of_dictionary(user_provided_value_for_attr)
sorted_resources_value_for_attr = sort_list_of_dictionary(resources_value_for_attr)
else:
sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr)
@@ -1046,9 +980,7 @@ def check_if_user_value_matches_resources_attr(
# Walk through the sorted list values of the resource's value for this attribute, and compare against user
# provided values.
for index, resources_value_for_attr_part in enumerate(
sorted_resources_value_for_attr
):
for index, resources_value_for_attr_part in enumerate(sorted_resources_value_for_attr):
check_if_user_value_matches_resources_attr(
attribute_name,
resources_value_for_attr_part,
@@ -1064,10 +996,7 @@ def check_if_user_value_matches_resources_attr(
if not resources_value_for_attr and user_provided_value_for_attr:
res[0] = False
for key in resources_value_for_attr:
if (
user_provided_value_for_attr is not None
and user_provided_value_for_attr
):
if user_provided_value_for_attr is not None and user_provided_value_for_attr:
check_if_user_value_matches_resources_attr(
key,
resources_value_for_attr.get(key),
@@ -1096,17 +1025,12 @@ def check_if_user_value_matches_resources_attr(
)
elif resources_value_for_attr != user_provided_value_for_attr:
if (
exclude_attributes.get(attribute_name) is None
and default_attribute_values.get(attribute_name) is not None
):
if exclude_attributes.get(attribute_name) is None and default_attribute_values.get(attribute_name) is not None:
# As the user has not specified a value for an optional attribute, if the existing resource's
# current state has a DEFAULT value for that attribute, we must not consider this incongruence
# an issue and continue with other checks. If the existing resource's value for the attribute
# is not the default value, then the existing resource is not a match.
if not is_attr_assigned_default(
default_attribute_values, attribute_name, resources_value_for_attr
):
if not is_attr_assigned_default(default_attribute_values, attribute_name, resources_value_for_attr):
res[0] = False
elif user_provided_value_for_attr is not None:
res[0] = False
@@ -1123,9 +1047,7 @@ def are_dicts_equal(
# User has not provided a value for the map option. In this case, the user hasn't expressed an intent around
# this optional attribute. Check if existing_resource_dict matches default.
# For example, source_details attribute in volume is optional and does not have any defaults.
return is_attr_assigned_default(
default_attribute_values, option_name, existing_resource_dict
)
return is_attr_assigned_default(default_attribute_values, option_name, existing_resource_dict)
# If the existing resource has an empty dict, while the user has provided entries, dicts are not equal
if not existing_resource_dict and user_provided_dict:
@@ -1145,9 +1067,7 @@ def are_dicts_equal(
# If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value.
else:
if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list):
default_value_for_dict_attr = default_attribute_values.get(
option_name, None
)
default_value_for_dict_attr = default_attribute_values.get(option_name, None)
if default_value_for_dict_attr:
# if a default value for the sub-attr was provided by the module author, fail if the existing
# resource's value for the sub-attr is not the default
@@ -1173,7 +1093,7 @@ def are_dicts_equal(
def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list):
"""An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the
key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map
option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """
option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']}"""
for exclude_item in exclude_list:
if isinstance(exclude_item, dict):
if map_option_name in exclude_item:
@@ -1356,21 +1276,13 @@ def wait_for_resource_lifecycle_state(
# 'Authorization failed or requested resource not found', 'status': 404}.
# This is because it takes few seconds for the permissions on a compartment to be ready.
# Wait for few seconds before attempting a get call on compartment.
_debug(
"Pausing execution for permission on the newly created compartment to be ready."
)
_debug("Pausing execution for permission on the newly created compartment to be ready.")
time.sleep(15)
if kwargs_get:
_debug(
f"Waiting for resource to reach READY state. get_args: {kwargs_get}"
)
_debug(f"Waiting for resource to reach READY state. get_args: {kwargs_get}")
response_get = call_with_backoff(get_fn, **kwargs_get)
else:
_debug(
"Waiting for resource with id {0} to reach READY state.".format(
resource["id"]
)
)
_debug("Waiting for resource with id {0} to reach READY state.".format(resource["id"]))
response_get = call_with_backoff(get_fn, **{get_param: resource["id"]})
if states is None:
states = module.params.get("wait_until") or DEFAULT_READY_STATES
@@ -1379,9 +1291,7 @@ def wait_for_resource_lifecycle_state(
client,
response_get,
evaluate_response=lambda r: r.data.lifecycle_state in states,
max_wait_seconds=module.params.get(
"wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
),
max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS),
).data
)
return resource
@@ -1390,28 +1300,20 @@ def wait_for_resource_lifecycle_state(
def wait_on_work_request(client, response, module):
try:
if module.params.get("wait", None):
_debug(
f"Waiting for work request with id {response.data.id} to reach SUCCEEDED state."
)
_debug(f"Waiting for work request with id {response.data.id} to reach SUCCEEDED state.")
wait_response = oci.wait_until(
client,
response,
evaluate_response=lambda r: r.data.status == "SUCCEEDED",
max_wait_seconds=module.params.get(
"wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
),
max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS),
)
else:
_debug(
f"Waiting for work request with id {response.data.id} to reach ACCEPTED state."
)
_debug(f"Waiting for work request with id {response.data.id} to reach ACCEPTED state.")
wait_response = oci.wait_until(
client,
response,
evaluate_response=lambda r: r.data.status == "ACCEPTED",
max_wait_seconds=module.params.get(
"wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
),
max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS),
)
except MaximumWaitTimeExceeded as ex:
_debug(str(ex))
@@ -1461,12 +1363,8 @@ def delete_and_wait(
response = call_with_backoff(delete_fn, **kwargs_delete)
if process_work_request:
wr_id = response.headers.get("opc-work-request-id")
get_wr_response = call_with_backoff(
client.get_work_request, work_request_id=wr_id
)
result["work_request"] = to_dict(
wait_on_work_request(client, get_wr_response, module)
)
get_wr_response = call_with_backoff(client.get_work_request, work_request_id=wr_id)
result["work_request"] = to_dict(wait_on_work_request(client, get_wr_response, module))
# Set changed to True as work request has been created to delete the resource.
result["changed"] = True
resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
@@ -1476,19 +1374,13 @@ def delete_and_wait(
if wait_applicable and module.params.get("wait", None):
if states is None:
states = (
module.params.get("wait_until")
or DEFAULT_TERMINATED_STATES
)
states = module.params.get("wait_until") or DEFAULT_TERMINATED_STATES
try:
wait_response = oci.wait_until(
client,
get_fn(**kwargs_get),
evaluate_response=lambda r: r.data.lifecycle_state
in states,
max_wait_seconds=module.params.get(
"wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
),
evaluate_response=lambda r: r.data.lifecycle_state in states,
max_wait_seconds=module.params.get("wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS),
succeed_on_not_found=True,
)
except MaximumWaitTimeExceeded as ex:
@@ -1513,17 +1405,13 @@ def delete_and_wait(
result[resource_type] = resource
else:
_debug(
f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False"
)
_debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False")
except ServiceError as ex:
# DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
# resource is not available, instead of the expected 404. So working around this for now.
if isinstance(client, oci.dns.DnsClient):
if ex.status == 400 and ex.code == "InvalidParameter":
_debug(
f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False"
)
_debug(f"Resource {resource_type} with {kwargs_get} already deleted. So returning changed=False")
elif ex.status != 404:
module.fail_json(msg=ex.message)
result[resource_type] = dict()
@@ -1673,9 +1561,7 @@ def generate_subclass(parent_class):
"__eq__": generic_eq,
}
subclass_name = "GeneratedSub" + parent_class.__name__
generated_sub_class = type(
subclass_name, (parent_class,), dict_of_method_in_subclass
)
generated_sub_class = type(subclass_name, (parent_class,), dict_of_method_in_subclass)
return generated_sub_class
@@ -1689,15 +1575,11 @@ def get_hashed_object_list(class_type, object_with_values, attributes_class_type
return None
hashed_class_instances = []
for object_with_value in object_with_values:
hashed_class_instances.append(
get_hashed_object(class_type, object_with_value, attributes_class_type)
)
hashed_class_instances.append(get_hashed_object(class_type, object_with_value, attributes_class_type))
return hashed_class_instances
def get_hashed_object(
class_type, object_with_value, attributes_class_type=None, supported_attributes=None
):
def get_hashed_object(class_type, object_with_value, attributes_class_type=None, supported_attributes=None):
"""
Convert any class instance into hashable so that the
instances are eligible for various comparison
@@ -1718,9 +1600,7 @@ def get_hashed_object(
hashed_class_instance = HashedClass()
if supported_attributes:
class_attributes = list(
set(hashed_class_instance.attribute_map) & set(supported_attributes)
)
class_attributes = list(set(hashed_class_instance.attribute_map) & set(supported_attributes))
else:
class_attributes = hashed_class_instance.attribute_map
@@ -1729,17 +1609,13 @@ def get_hashed_object(
if attributes_class_type:
for attribute_class_type in attributes_class_type:
if isinstance(attribute_value, attribute_class_type):
attribute_value = get_hashed_object(
attribute_class_type, attribute_value
)
attribute_value = get_hashed_object(attribute_class_type, attribute_value)
hashed_class_instance.__setattr__(attribute, attribute_value)
return hashed_class_instance
def update_class_type_attr_difference(
update_class_details, existing_instance, attr_name, attr_class, input_attr_value
):
def update_class_type_attr_difference(update_class_details, existing_instance, attr_name, attr_class, input_attr_value):
"""
Checks the difference and updates an attribute which is represented by a class
instance. Not applicable if the attribute type is a primitive value.
@@ -1757,9 +1633,7 @@ def update_class_type_attr_difference(
"""
changed = False
# Here existing attribute values is an instance
existing_attr_value = get_hashed_object(
attr_class, getattr(existing_instance, attr_name)
)
existing_attr_value = get_hashed_object(attr_class, getattr(existing_instance, attr_name))
if input_attr_value is None:
update_class_details.__setattr__(attr_name, existing_attr_value)
else:
@@ -1791,9 +1665,7 @@ def get_existing_resource(target_fn, module, **kwargs):
return existing_resource
def get_attached_instance_info(
module, lookup_attached_instance, list_attachments_fn, list_attachments_args
):
def get_attached_instance_info(module, lookup_attached_instance, list_attachments_fn, list_attachments_args):
config = get_oci_config(module)
identity_client = create_service_client(module, IdentityClient)
@@ -1802,18 +1674,14 @@ def get_attached_instance_info(
if lookup_attached_instance:
# Get all the compartments in the tenancy
compartments = to_dict(
identity_client.list_compartments(
config.get("tenancy"), compartment_id_in_subtree=True
).data
identity_client.list_compartments(config.get("tenancy"), compartment_id_in_subtree=True).data
)
# For each compartment, get the volume attachments for the compartment_id with the other args in
# list_attachments_args.
for compartment in compartments:
list_attachments_args["compartment_id"] = compartment["id"]
try:
volume_attachments += list_all_resources(
list_attachments_fn, **list_attachments_args
)
volume_attachments += list_all_resources(list_attachments_fn, **list_attachments_args)
# Pass ServiceError due to authorization issue in accessing volume attachments of a compartment
except ServiceError as ex:
@@ -1821,9 +1689,7 @@ def get_attached_instance_info(
pass
else:
volume_attachments = list_all_resources(
list_attachments_fn, **list_attachments_args
)
volume_attachments = list_all_resources(list_attachments_fn, **list_attachments_args)
volume_attachments = to_dict(volume_attachments)
# volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or
@@ -1864,15 +1730,11 @@ def check_and_return_component_list_difference(
return existing_components, changed
def get_component_list_difference(
input_component_list, existing_components, purge_components, delete_components=False
):
def get_component_list_difference(input_component_list, existing_components, purge_components, delete_components=False):
if delete_components:
if existing_components is None:
return None, False
component_differences = set(existing_components).intersection(
set(input_component_list)
)
component_differences = set(existing_components).intersection(set(input_component_list))
if component_differences:
return list(set(existing_components) - component_differences), True
else:
@@ -1880,16 +1742,12 @@ def get_component_list_difference(
if existing_components is None:
return input_component_list, True
if purge_components:
components_differences = set(input_component_list).symmetric_difference(
set(existing_components)
)
components_differences = set(input_component_list).symmetric_difference(set(existing_components))
if components_differences:
return input_component_list, True
components_differences = set(input_component_list).difference(
set(existing_components)
)
components_differences = set(input_component_list).difference(set(existing_components))
if components_differences:
return list(components_differences) + existing_components, True
return None, False
@@ -1900,9 +1758,7 @@ def write_to_file(path, content):
dest_file.write(content)
def get_target_resource_from_list(
module, list_resource_fn, target_resource_id=None, **kwargs
):
def get_target_resource_from_list(module, list_resource_fn, target_resource_id=None, **kwargs):
"""
Returns a resource filtered by identifier from a list of resources. This method should be
used as an alternative of 'get resource' method when 'get resource' is nor provided by

View File

@@ -24,22 +24,26 @@ _state_map = {
def fmt_resource_type(value):
return [":".join(value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None)]
return [
":".join(
value[k] for k in ["resource_standard", "resource_provider", "resource_name"] if value.get(k) is not None
)
]
def fmt_resource_operation(value):
cmd = []
for op in value:
cmd.append("op")
cmd.append(op.get('operation_action'))
for operation_option in op.get('operation_option'):
cmd.append(op.get("operation_action"))
for operation_option in op.get("operation_option"):
cmd.append(operation_option)
return cmd
def fmt_resource_argument(value):
return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option']
return ["--group" if value["argument_action"] == "group" else value["argument_action"]] + value["argument_option"]
def get_pacemaker_maintenance_mode(runner):
@@ -51,7 +55,7 @@ def get_pacemaker_maintenance_mode(runner):
def pacemaker_runner(module, **kwargs):
runner_command = ['pcs']
runner_command = ["pcs"]
runner = CmdRunner(
module,
command=runner_command,
@@ -74,6 +78,6 @@ def pacemaker_runner(module, **kwargs):
version=cmd_runner_fmt.as_fixed("--version"),
output_format=cmd_runner_fmt.as_opt_eq_val("--output-format"),
),
**kwargs
**kwargs,
)
return runner

View File

@@ -12,27 +12,27 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
pipx_common_argspec = {
"global": dict(type='bool', default=False),
"executable": dict(type='path'),
"global": dict(type="bool", default=False),
"executable": dict(type="path"),
}
_state_map = dict(
install='install',
install_all='install-all',
present='install',
uninstall='uninstall',
absent='uninstall',
uninstall_all='uninstall-all',
inject='inject',
uninject='uninject',
upgrade='upgrade',
upgrade_shared='upgrade-shared',
upgrade_all='upgrade-all',
reinstall='reinstall',
reinstall_all='reinstall-all',
pin='pin',
unpin='unpin',
install="install",
install_all="install-all",
present="install",
uninstall="uninstall",
absent="uninstall",
uninstall_all="uninstall-all",
inject="inject",
uninject="uninject",
upgrade="upgrade",
upgrade_shared="upgrade-shared",
upgrade_all="upgrade-all",
reinstall="reinstall",
reinstall_all="reinstall-all",
pin="pin",
unpin="unpin",
)
@@ -46,15 +46,15 @@ def pipx_runner(module, command, **kwargs):
inject_packages=cmd_runner_fmt.as_list(),
force=cmd_runner_fmt.as_bool("--force"),
include_injected=cmd_runner_fmt.as_bool("--include-injected"),
index_url=cmd_runner_fmt.as_opt_val('--index-url'),
python=cmd_runner_fmt.as_opt_val('--python'),
index_url=cmd_runner_fmt.as_opt_val("--index-url"),
python=cmd_runner_fmt.as_opt_val("--python"),
system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"),
_list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']),
_list=cmd_runner_fmt.as_fixed(["list", "--include-injected", "--json"]),
editable=cmd_runner_fmt.as_bool("--editable"),
pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'),
suffix=cmd_runner_fmt.as_opt_val('--suffix'),
pip_args=cmd_runner_fmt.as_opt_eq_val("--pip-args"),
suffix=cmd_runner_fmt.as_opt_val("--suffix"),
spec_metadata=cmd_runner_fmt.as_list(),
version=cmd_runner_fmt.as_fixed('--version'),
version=cmd_runner_fmt.as_fixed("--version"),
)
arg_formats["global"] = cmd_runner_fmt.as_bool("--global")
@@ -62,23 +62,23 @@ def pipx_runner(module, command, **kwargs):
module,
command=command,
arg_formats=arg_formats,
environ_update={'USE_EMOJI': '0', 'PIPX_USE_EMOJI': '0'},
environ_update={"USE_EMOJI": "0", "PIPX_USE_EMOJI": "0"},
check_rc=True,
**kwargs
**kwargs,
)
return runner
def _make_entry(venv_name, venv, include_injected, include_deps):
entry = {
'name': venv_name,
'version': venv['metadata']['main_package']['package_version'],
'pinned': venv['metadata']['main_package'].get('pinned'),
"name": venv_name,
"version": venv["metadata"]["main_package"]["package_version"],
"pinned": venv["metadata"]["main_package"].get("pinned"),
}
if include_injected:
entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}
entry["injected"] = {k: v["package_version"] for k, v in venv["metadata"]["injected_packages"].items()}
if include_deps:
entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
entry["dependencies"] = list(venv["metadata"]["main_package"]["app_paths_of_dependencies"])
return entry
@@ -89,7 +89,7 @@ def make_process_dict(include_injected, include_deps=False):
results = {}
raw_data = json.loads(out)
for venv_name, venv in raw_data['venvs'].items():
for venv_name, venv in raw_data["venvs"].items():
results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps)
return results, raw_data
@@ -111,9 +111,6 @@ def make_process_list(mod_helper, **kwargs):
if kwargs.get("include_raw"):
mod_helper.vars.raw_output = raw_data
return [
entry
for name, entry in res_dict.items()
if name == kwargs.get("name")
]
return [entry for name, entry in res_dict.items() if name == kwargs.get("name")]
return process_list

View File

@@ -37,7 +37,7 @@ class PackageRequirement:
return name, None
# Quick check for simple package names
if not any(c in name for c in '>=<!~[]'):
if not any(c in name for c in ">=<!~[]"):
return name.strip(), None
deps.validate(self.module, "packaging")

View File

@@ -15,9 +15,9 @@ _PUPPET_PATH_PREFIX = ["/opt/puppetlabs/bin"]
def get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
return "/etc/facter/facts.d"
else:
return os.path.expanduser('~/.facter/facts.d')
return os.path.expanduser("~/.facter/facts.d")
def _puppet_cmd(module):
@@ -32,23 +32,19 @@ def ensure_agent_enabled(module):
command="puppet",
path_prefix=_PUPPET_PATH_PREFIX,
arg_formats=dict(
_agent_disabled=cmd_runner_fmt.as_fixed(['config', 'print', 'agent_disabled_lockfile']),
_agent_disabled=cmd_runner_fmt.as_fixed(["config", "print", "agent_disabled_lockfile"]),
),
check_rc=False,
)
rc, stdout, stderr = runner("_agent_disabled").run()
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
module.fail_json(msg="Puppet agent is administratively disabled.", disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
module.fail_json(msg="Puppet agent state could not be determined.")
def puppet_runner(module):
# Keeping backward compatibility, allow for running with the `timeout` CLI command.
# If this can be replaced with ansible `timeout` parameter in playbook,
# then this function could be removed.
@@ -80,10 +76,19 @@ def puppet_runner(module):
command=_prepare_base_cmd(),
path_prefix=_PUPPET_PATH_PREFIX,
arg_formats=dict(
_agent_fixed=cmd_runner_fmt.as_fixed([
"agent", "--onetime", "--no-daemonize", "--no-usecacheonfailure",
"--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0",
]),
_agent_fixed=cmd_runner_fmt.as_fixed(
[
"agent",
"--onetime",
"--no-daemonize",
"--no-usecacheonfailure",
"--no-splay",
"--detailed-exitcodes",
"--verbose",
"--color",
"0",
]
),
_apply_fixed=cmd_runner_fmt.as_fixed(["apply", "--detailed-exitcodes"]),
puppetmaster=cmd_runner_fmt.as_opt_val("--server"),
show_diff=cmd_runner_fmt.as_bool("--show-diff"),

View File

@@ -10,14 +10,24 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
class PythonRunner(CmdRunner):
def __init__(self, module, command, arg_formats=None, default_args_order=(),
check_rc=False, force_lang="C", path_prefix=None, environ_update=None,
python="python", venv=None):
def __init__(
self,
module,
command,
arg_formats=None,
default_args_order=(),
check_rc=False,
force_lang="C",
path_prefix=None,
environ_update=None,
python="python",
venv=None,
):
self.python = python
self.venv = venv
self.has_venv = venv is not None
if os.path.isabs(python) or '/' in python:
if os.path.isabs(python) or "/" in python:
self.python = python
elif self.has_venv:
if path_prefix is None:
@@ -30,5 +40,6 @@ class PythonRunner(CmdRunner):
python_cmd = [self.python] + _ensure_list(command)
super().__init__(module, python_cmd, arg_formats, default_args_order,
check_rc, force_lang, path_prefix, environ_update)
super().__init__(
module, python_cmd, arg_formats, default_args_order, check_rc, force_lang, path_prefix, environ_update
)

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,7 @@ REDIS_IMP_ERR = None
try:
from redis import Redis
from redis import __version__ as redis_version
HAS_REDIS_PACKAGE = True
REDIS_IMP_ERR = None
except ImportError:
@@ -21,6 +22,7 @@ except ImportError:
try:
import certifi
HAS_CERTIFI_PACKAGE = True
CERTIFI_IMPORT_ERROR = None
except ImportError:
@@ -32,65 +34,63 @@ def fail_imports(module, needs_certifi=True):
errors = []
traceback = []
if not HAS_REDIS_PACKAGE:
errors.append(missing_required_lib('redis'))
errors.append(missing_required_lib("redis"))
traceback.append(REDIS_IMP_ERR)
if not HAS_CERTIFI_PACKAGE and needs_certifi:
errors.append(missing_required_lib('certifi'))
errors.append(missing_required_lib("certifi"))
traceback.append(CERTIFI_IMPORT_ERROR)
if errors:
module.fail_json(msg='\n'.join(errors), traceback='\n'.join(traceback))
module.fail_json(msg="\n".join(errors), traceback="\n".join(traceback))
def redis_auth_argument_spec(tls_default=True):
return dict(
login_host=dict(type='str',
default='localhost',),
login_user=dict(type='str'),
login_password=dict(type='str',
no_log=True
),
login_port=dict(type='int', default=6379),
tls=dict(type='bool',
default=tls_default),
validate_certs=dict(type='bool',
default=True
),
ca_certs=dict(type='str'),
client_cert_file=dict(type='str'),
client_key_file=dict(type='str'),
login_host=dict(
type="str",
default="localhost",
),
login_user=dict(type="str"),
login_password=dict(type="str", no_log=True),
login_port=dict(type="int", default=6379),
tls=dict(type="bool", default=tls_default),
validate_certs=dict(type="bool", default=True),
ca_certs=dict(type="str"),
client_cert_file=dict(type="str"),
client_key_file=dict(type="str"),
)
def redis_auth_params(module):
login_host = module.params['login_host']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_port = module.params['login_port']
tls = module.params['tls']
validate_certs = 'required' if module.params['validate_certs'] else None
ca_certs = module.params['ca_certs']
login_host = module.params["login_host"]
login_user = module.params["login_user"]
login_password = module.params["login_password"]
login_port = module.params["login_port"]
tls = module.params["tls"]
validate_certs = "required" if module.params["validate_certs"] else None
ca_certs = module.params["ca_certs"]
if tls and ca_certs is None:
ca_certs = str(certifi.where())
client_cert_file = module.params['client_cert_file']
client_key_file = module.params['client_key_file']
if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
module.fail_json(
msg='The option `username` in only supported with redis >= 3.4.0.')
params = {'host': login_host,
'port': login_port,
'password': login_password,
'ssl_ca_certs': ca_certs,
'ssl_certfile': client_cert_file,
'ssl_keyfile': client_key_file,
'ssl_cert_reqs': validate_certs,
'ssl': tls}
client_cert_file = module.params["client_cert_file"]
client_key_file = module.params["client_key_file"]
if tuple(map(int, redis_version.split("."))) < (3, 4, 0) and login_user is not None:
module.fail_json(msg="The option `username` in only supported with redis >= 3.4.0.")
params = {
"host": login_host,
"port": login_port,
"password": login_password,
"ssl_ca_certs": ca_certs,
"ssl_certfile": client_cert_file,
"ssl_keyfile": client_key_file,
"ssl_cert_reqs": validate_certs,
"ssl": tls,
}
if login_user is not None:
params['username'] = login_user
params["username"] = login_user
return params
class RedisAnsible:
'''Base class for Redis module'''
"""Base class for Redis module"""
def __init__(self, module):
self.module = module
@@ -100,5 +100,5 @@ class RedisAnsible:
try:
return Redis(**redis_auth_params(self.module))
except Exception as e:
self.module.fail_json(msg=f'{e}')
self.module.fail_json(msg=f"{e}")
return None

View File

@@ -14,8 +14,10 @@
from __future__ import annotations
import traceback
try:
from pylxca import connect, disconnect
HAS_PYLXCA = True
except ImportError:
HAS_PYLXCA = False
@@ -59,12 +61,11 @@ def setup_conn(module):
"""
lxca_con = None
try:
lxca_con = connect(module.params['auth_url'],
module.params['login_user'],
module.params['login_password'],
"True")
lxca_con = connect(
module.params["auth_url"], module.params["login_user"], module.params["login_password"], "True"
)
except Exception as exception:
error_msg = '; '.join(exception.args)
error_msg = "; ".join(exception.args)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
return lxca_con

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -12,16 +11,18 @@ from ansible.module_utils.common.text.converters import to_native
def api_argument_spec():
'''
"""
Creates an argument spec that can be used with any module
that will be requesting content via Rundeck API
'''
"""
api_argument_spec = url_argument_spec()
api_argument_spec.update(dict(
url=dict(required=True, type="str"),
api_version=dict(type="int", default=39),
api_token=dict(required=True, type="str", no_log=True)
))
api_argument_spec.update(
dict(
url=dict(required=True, type="str"),
api_version=dict(type="int", default=39),
api_token=dict(required=True, type="str", no_log=True),
)
)
return api_argument_spec
@@ -59,21 +60,18 @@ def api_request(module, endpoint, data=None, method="GET", content_type="applica
headers={
"Content-Type": content_type,
"Accept": "application/json",
"X-Rundeck-Auth-Token": module.params["api_token"]
}
"X-Rundeck-Auth-Token": module.params["api_token"],
},
)
if info["status"] == 403:
module.fail_json(msg="Token authorization failed",
execution_info=json.loads(info["body"]))
module.fail_json(msg="Token authorization failed", execution_info=json.loads(info["body"]))
elif info["status"] == 404:
return None, info
elif info["status"] == 409:
module.fail_json(msg="Job executions limit reached",
execution_info=json.loads(info["body"]))
module.fail_json(msg="Job executions limit reached", execution_info=json.loads(info["body"]))
elif info["status"] >= 500:
module.fail_json(msg="Rundeck API error",
execution_info=json.loads(info["body"]))
module.fail_json(msg="Rundeck API error", execution_info=json.loads(info["body"]))
try:
content = response.read()
@@ -84,14 +82,6 @@ def api_request(module, endpoint, data=None, method="GET", content_type="applica
json_response = json.loads(content)
return json_response, info
except AttributeError as error:
module.fail_json(
msg="Rundeck API request error",
exception=to_native(error),
execution_info=info
)
module.fail_json(msg="Rundeck API request error", exception=to_native(error), execution_info=info)
except ValueError as error:
module.fail_json(
msg="No valid JSON response",
exception=to_native(error),
execution_info=content
)
module.fail_json(msg="No valid JSON response", exception=to_native(error), execution_info=content)

View File

@@ -1,4 +1,3 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -50,7 +49,7 @@ def mapping_profile(string):
if in_table_c12(c):
# map non-ASCII space characters
# (that can be mapped) to Unicode space
tmp.append(' ')
tmp.append(" ")
else:
tmp.append(c)
@@ -67,7 +66,7 @@ def is_ral_string(string):
# RandALCat character MUST be the last character of the string.
if in_table_d1(string[0]):
if not in_table_d1(string[-1]):
raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
raise ValueError("RFC3454: incorrect bidirectional RandALCat string.")
return True
return False
@@ -95,41 +94,41 @@ def prohibited_output_profile(string):
# If a string contains any RandALCat characters,
# The string MUST NOT contain any LCat character:
is_prohibited_bidi_ch = in_table_d2
bidi_table = 'D.2'
bidi_table = "D.2"
else:
# Forbid RandALCat characters in LCat string:
is_prohibited_bidi_ch = in_table_d1
bidi_table = 'D.1'
bidi_table = "D.1"
RFC = 'RFC4013'
RFC = "RFC4013"
for c in string:
# RFC4013 2.3. Prohibited Output:
if in_table_c12(c):
raise ValueError(f'{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).')
raise ValueError(f"{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).")
if in_table_c21_c22(c):
raise ValueError(f'{RFC}: prohibited control characters (C.2.1).')
raise ValueError(f"{RFC}: prohibited control characters (C.2.1).")
if in_table_c3(c):
raise ValueError(f'{RFC}: prohibited private Use characters (C.3).')
raise ValueError(f"{RFC}: prohibited private Use characters (C.3).")
if in_table_c4(c):
raise ValueError(f'{RFC}: prohibited non-character code points (C.4).')
raise ValueError(f"{RFC}: prohibited non-character code points (C.4).")
if in_table_c5(c):
raise ValueError(f'{RFC}: prohibited surrogate code points (C.5).')
raise ValueError(f"{RFC}: prohibited surrogate code points (C.5).")
if in_table_c6(c):
raise ValueError(f'{RFC}: prohibited inappropriate for plain text characters (C.6).')
raise ValueError(f"{RFC}: prohibited inappropriate for plain text characters (C.6).")
if in_table_c7(c):
raise ValueError(f'{RFC}: prohibited inappropriate for canonical representation characters (C.7).')
raise ValueError(f"{RFC}: prohibited inappropriate for canonical representation characters (C.7).")
if in_table_c8(c):
raise ValueError(f'{RFC}: prohibited change display properties / deprecated characters (C.8).')
raise ValueError(f"{RFC}: prohibited change display properties / deprecated characters (C.8).")
if in_table_c9(c):
raise ValueError(f'{RFC}: prohibited tagging characters (C.9).')
raise ValueError(f"{RFC}: prohibited tagging characters (C.9).")
# RFC4013, 2.4. Bidirectional Characters:
if is_prohibited_bidi_ch(c):
raise ValueError(f'{RFC}: prohibited bidi characters ({bidi_table}).')
raise ValueError(f"{RFC}: prohibited bidi characters ({bidi_table}).")
# RFC4013, 2.5. Unassigned Code Points:
if in_table_a1(c):
raise ValueError(f'{RFC}: prohibited unassigned code points (A.1).')
raise ValueError(f"{RFC}: prohibited unassigned code points (A.1).")
def saslprep(string):
@@ -151,16 +150,16 @@ def saslprep(string):
# comprised of characters from the Unicode [Unicode] character set."
# Validate the string is a Unicode string
if not is_unicode_str(string):
raise TypeError(f'input must be of type str, not {type(string)}')
raise TypeError(f"input must be of type str, not {type(string)}")
# RFC4013: 2.1. Mapping.
string = mapping_profile(string)
# RFC4013: 2.2. Normalization.
# "This profile specifies using Unicode normalization form KC."
string = normalize('NFKC', string)
string = normalize("NFKC", string)
if not string:
return ''
return ""
# RFC4013: 2.3. Prohibited Output.
# RFC4013: 2.4. Bidirectional Characters.

View File

@@ -22,6 +22,7 @@ from ansible_collections.community.general.plugins.module_utils.datetime import
SCALEWAY_SECRET_IMP_ERR: str | None = None
try:
from passlib.hash import argon2
HAS_SCALEWAY_SECRET_PACKAGE = True
except Exception:
SCALEWAY_SECRET_IMP_ERR = traceback.format_exc()
@@ -30,12 +31,18 @@ except Exception:
def scaleway_argument_spec():
return dict(
api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
no_log=True, aliases=['oauth_token']),
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
api_timeout=dict(type='int', default=30, aliases=['timeout']),
query_parameters=dict(type='dict', default={}),
validate_certs=dict(default=True, type='bool'),
api_token=dict(
required=True,
fallback=(env_fallback, ["SCW_TOKEN", "SCW_API_KEY", "SCW_OAUTH_TOKEN", "SCW_API_TOKEN"]),
no_log=True,
aliases=["oauth_token"],
),
api_url=dict(
fallback=(env_fallback, ["SCW_API_URL"]), default="https://api.scaleway.com", aliases=["base_url"]
),
api_timeout=dict(type="int", default=30, aliases=["timeout"]),
query_parameters=dict(type="dict", default={}),
validate_certs=dict(default=True, type="bool"),
)
@@ -48,47 +55,42 @@ def scaleway_waitable_resource_argument_spec():
def payload_from_object(scw_object):
return {
k: v
for k, v in scw_object.items()
if k != 'id' and v is not None
}
return {k: v for k, v in scw_object.items() if k != "id" and v is not None}
class ScalewayException(Exception):
def __init__(self, message):
self.message = message
# Specify a complete Link header, for validation purposes
R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
(,<[^>]+>;\srel="(first|previous|next|last)")*'''
R_LINK_HEADER = r"""<[^>]+>;\srel="(first|previous|next|last)"
(,<[^>]+>;\srel="(first|previous|next|last)")*"""
# Specify a single relation, for iteration and string extraction purposes
R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
def parse_pagination_link(header):
if not re.match(R_LINK_HEADER, header, re.VERBOSE):
raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
raise ScalewayException("Scaleway API answered with an invalid Link pagination header")
else:
relations = header.split(',')
relations = header.split(",")
parsed_relations = {}
rc_relation = re.compile(R_RELATION)
for relation in relations:
match = rc_relation.match(relation)
if not match:
raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
raise ScalewayException("Scaleway API answered with an invalid relation in the Link pagination header")
data = match.groupdict()
parsed_relations[data['relation']] = data['target_IRI']
parsed_relations[data["relation"]] = data["target_IRI"]
return parsed_relations
def filter_sensitive_attributes(container, attributes):
'''
"""
WARNING: This function is effectively private, **do not use it**!
It will be removed or renamed once changing its name no longer triggers a pylint bug.
'''
"""
for attr in attributes:
container[attr] = "SENSITIVE_VALUE"
@@ -100,8 +102,8 @@ class SecretVariables:
def ensure_scaleway_secret_package(module):
if not HAS_SCALEWAY_SECRET_PACKAGE:
module.fail_json(
msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'),
exception=SCALEWAY_SECRET_IMP_ERR
msg=missing_required_lib("passlib[argon2]", url="https://passlib.readthedocs.io/en/stable/"),
exception=SCALEWAY_SECRET_IMP_ERR,
)
@staticmethod
@@ -110,8 +112,8 @@ class SecretVariables:
@staticmethod
def list_to_dict(source_list, hashed=False):
key_value = 'hashed_value' if hashed else 'value'
return {var['key']: var[key_value] for var in source_list}
key_value = "hashed_value" if hashed else "value"
return {var["key"]: var[key_value] for var in source_list}
@classmethod
def decode(cls, secrets_list, values_list):
@@ -140,7 +142,6 @@ def resource_attributes_should_be_changed(target, wished, verifiable_mutable_att
class Response:
def __init__(self, resp, info):
self.body = None
if resp:
@@ -168,32 +169,32 @@ class Response:
class Scaleway:
def __init__(self, module):
self.module = module
self.headers = {
'X-Auth-Token': self.module.params.get('api_token'),
'User-Agent': self.get_user_agent_string(module),
'Content-Type': 'application/json',
"X-Auth-Token": self.module.params.get("api_token"),
"User-Agent": self.get_user_agent_string(module),
"Content-Type": "application/json",
}
self.name = None
def get_resources(self):
results = self.get(f'/{self.name}')
results = self.get(f"/{self.name}")
if not results.ok:
raise ScalewayException(
f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]")
f"Error fetching {self.name} ({self.module.params.get('api_url')}/{self.name}) [{results.status_code}: {results.json['message']}]"
)
return results.json.get(self.name)
def _url_builder(self, path, params):
d = self.module.params.get('query_parameters')
d = self.module.params.get("query_parameters")
if params is not None:
d.update(params)
query_string = urlencode(d, doseq=True)
if path[0] == '/':
if path[0] == "/":
path = path[1:]
return f"{self.module.params.get('api_url')}/{path}?{query_string}"
@@ -204,17 +205,21 @@ class Scaleway:
if headers is not None:
self.headers.update(headers)
if self.headers['Content-Type'] == "application/json":
if self.headers["Content-Type"] == "application/json":
data = self.module.jsonify(data)
resp, info = fetch_url(
self.module, url, data=data, headers=self.headers, method=method,
timeout=self.module.params.get('api_timeout')
self.module,
url,
data=data,
headers=self.headers,
method=method,
timeout=self.module.params.get("api_timeout"),
)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
if info["status"] == -1:
self.module.fail_json(msg=info["msg"])
return Response(resp, info)
@@ -223,16 +228,16 @@ class Scaleway:
return f"ansible {module.ansible_version} Python {sys.version.split(' ', 1)[0]}"
def get(self, path, data=None, headers=None, params=None):
return self.send(method='GET', path=path, data=data, headers=headers, params=params)
return self.send(method="GET", path=path, data=data, headers=headers, params=params)
def put(self, path, data=None, headers=None, params=None):
return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
return self.send(method="PUT", path=path, data=data, headers=headers, params=params)
def post(self, path, data=None, headers=None, params=None):
return self.send(method='POST', path=path, data=data, headers=headers, params=params)
return self.send(method="POST", path=path, data=data, headers=headers, params=params)
def delete(self, path, data=None, headers=None, params=None):
return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
return self.send(method="DELETE", path=path, data=data, headers=headers, params=params)
def patch(self, path, data=None, headers=None, params=None):
return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
@@ -251,7 +256,7 @@ class Scaleway:
return "absent"
if not response.ok:
msg = f'Error during state fetching: ({response.status_code}) {response.json}'
msg = f"Error during state fetching: ({response.status_code}) {response.json}"
self.module.fail_json(msg=msg)
try:
@@ -261,13 +266,13 @@ class Scaleway:
self.module.fail_json(msg=f"Could not fetch state in {response.json}")
def fetch_paginated_resources(self, resource_key, **pagination_kwargs):
response = self.get(
path=self.api_path,
params=pagination_kwargs)
response = self.get(path=self.api_path, params=pagination_kwargs)
status_code = response.status_code
if not response.ok:
self.module.fail_json(msg=f"Error getting {resource_key} [{response.status_code}: {response.json['message']}]")
self.module.fail_json(
msg=f"Error getting {resource_key} [{response.status_code}: {response.json['message']}]"
)
return response.json[resource_key]
@@ -278,10 +283,10 @@ class Scaleway:
while len(result) != 0:
result = self.fetch_paginated_resources(resource_key, **pagination_kwargs)
resources += result
if 'page' in pagination_kwargs:
pagination_kwargs['page'] += 1
if "page" in pagination_kwargs:
pagination_kwargs["page"] += 1
else:
pagination_kwargs['page'] = 2
pagination_kwargs["page"] = 2
return resources
@@ -315,95 +320,83 @@ class Scaleway:
SCALEWAY_LOCATION = {
'par1': {
'name': 'Paris 1',
'country': 'FR',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
"par1": {
"name": "Paris 1",
"country": "FR",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-1",
},
'EMEA-FR-PAR1': {
'name': 'Paris 1',
'country': 'FR',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
"EMEA-FR-PAR1": {
"name": "Paris 1",
"country": "FR",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-1",
},
'par2': {
'name': 'Paris 2',
'country': 'FR',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
"par2": {
"name": "Paris 2",
"country": "FR",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-2",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-2",
},
'EMEA-FR-PAR2': {
'name': 'Paris 2',
'country': 'FR',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
"EMEA-FR-PAR2": {
"name": "Paris 2",
"country": "FR",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-2",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-2",
},
'par3': {
'name': 'Paris 3',
'country': 'FR',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-3',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-3'
"par3": {
"name": "Paris 3",
"country": "FR",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/fr-par-3",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/fr-par-3",
},
'ams1': {
'name': 'Amsterdam 1',
'country': 'NL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1'
"ams1": {
"name": "Amsterdam 1",
"country": "NL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-1",
},
'EMEA-NL-EVS': {
'name': 'Amsterdam 1',
'country': 'NL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1'
"EMEA-NL-EVS": {
"name": "Amsterdam 1",
"country": "NL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-1",
},
'ams2': {
'name': 'Amsterdam 2',
'country': 'NL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-2',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-2'
"ams2": {
"name": "Amsterdam 2",
"country": "NL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-2",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-2",
},
'ams3': {
'name': 'Amsterdam 3',
'country': 'NL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-3',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-3'
"ams3": {
"name": "Amsterdam 3",
"country": "NL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/nl-ams-3",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/nl-ams-3",
},
'waw1': {
'name': 'Warsaw 1',
'country': 'PL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
"waw1": {
"name": "Warsaw 1",
"country": "PL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-1",
},
'EMEA-PL-WAW1': {
'name': 'Warsaw 1',
'country': 'PL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
"EMEA-PL-WAW1": {
"name": "Warsaw 1",
"country": "PL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-1",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-1",
},
'waw2': {
'name': 'Warsaw 2',
'country': 'PL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-2',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-2'
"waw2": {
"name": "Warsaw 2",
"country": "PL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-2",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-2",
},
'waw3': {
'name': 'Warsaw 3',
'country': 'PL',
'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-3',
'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-3'
"waw3": {
"name": "Warsaw 3",
"country": "PL",
"api_endpoint": "https://api.scaleway.com/instance/v1/zones/pl-waw-3",
"api_endpoint_vpc": "https://api.scaleway.com/vpc/v1/zones/pl-waw-3",
},
}

View File

@@ -8,17 +8,17 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
_alias_state_map = dict(
present='alias',
absent='unalias',
info='aliases',
present="alias",
absent="unalias",
info="aliases",
)
_state_map = dict(
present='install',
absent='remove',
enabled='enable',
disabled='disable',
refresh='refresh',
present="install",
absent="remove",
enabled="enable",
disabled="disable",
refresh="refresh",
)
@@ -29,20 +29,20 @@ def snap_runner(module, **kwargs):
arg_formats=dict(
state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only
name=cmd_runner_fmt.as_list(),
alias=cmd_runner_fmt.as_list(), # snap_alias only
alias=cmd_runner_fmt.as_list(), # snap_alias only
state=cmd_runner_fmt.as_map(_state_map),
_list=cmd_runner_fmt.as_fixed("list"),
_set=cmd_runner_fmt.as_fixed("set"),
get=cmd_runner_fmt.as_fixed(["get", "-d"]),
classic=cmd_runner_fmt.as_bool("--classic"),
channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', f'{v}']),
channel=cmd_runner_fmt.as_func(lambda v: [] if v == "stable" else ["--channel", f"{v}"]),
options=cmd_runner_fmt.as_list(),
info=cmd_runner_fmt.as_fixed("info"),
dangerous=cmd_runner_fmt.as_bool("--dangerous"),
version=cmd_runner_fmt.as_fixed("version"),
),
check_rc=False,
**kwargs
**kwargs,
)
return runner

View File

@@ -12,7 +12,7 @@ from ansible.module_utils.urls import fetch_url, basic_auth_header
class BitbucketHelper:
BITBUCKET_API_URL = 'https://api.bitbucket.org'
BITBUCKET_API_URL = "https://api.bitbucket.org"
def __init__(self, module):
self.module = module
@@ -21,58 +21,66 @@ class BitbucketHelper:
@staticmethod
def bitbucket_argument_spec():
return dict(
client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
client_id=dict(type="str", fallback=(env_fallback, ["BITBUCKET_CLIENT_ID"])),
client_secret=dict(type="str", no_log=True, fallback=(env_fallback, ["BITBUCKET_CLIENT_SECRET"])),
# TODO:
# - Rename user to username once current usage of username is removed
# - Alias user to username and deprecate it
user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
user=dict(type="str", aliases=["username"], fallback=(env_fallback, ["BITBUCKET_USERNAME"])),
password=dict(type="str", no_log=True, fallback=(env_fallback, ["BITBUCKET_PASSWORD"])),
)
@staticmethod
def bitbucket_required_one_of():
return [['client_id', 'client_secret', 'user', 'password']]
return [["client_id", "client_secret", "user", "password"]]
@staticmethod
def bitbucket_required_together():
return [['client_id', 'client_secret'], ['user', 'password']]
return [["client_id", "client_secret"], ["user", "password"]]
def fetch_access_token(self):
if self.module.params['client_id'] and self.module.params['client_secret']:
if self.module.params["client_id"] and self.module.params["client_secret"]:
headers = {
'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
"Authorization": basic_auth_header(
self.module.params["client_id"], self.module.params["client_secret"]
),
}
info, content = self.request(
api_url='https://bitbucket.org/site/oauth2/access_token',
method='POST',
data='grant_type=client_credentials',
api_url="https://bitbucket.org/site/oauth2/access_token",
method="POST",
data="grant_type=client_credentials",
headers=headers,
)
if info['status'] == 200:
self.access_token = content['access_token']
if info["status"] == 200:
self.access_token = content["access_token"]
else:
self.module.fail_json(msg=f'Failed to retrieve access token: {info}')
self.module.fail_json(msg=f"Failed to retrieve access token: {info}")
def request(self, api_url, method, data=None, headers=None):
headers = headers or {}
if self.access_token:
headers.update({
'Authorization': f'Bearer {self.access_token}',
})
elif self.module.params['user'] and self.module.params['password']:
headers.update({
'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
})
headers.update(
{
"Authorization": f"Bearer {self.access_token}",
}
)
elif self.module.params["user"] and self.module.params["password"]:
headers.update(
{
"Authorization": basic_auth_header(self.module.params["user"], self.module.params["password"]),
}
)
if isinstance(data, dict):
data = self.module.jsonify(data)
headers.update({
'Content-type': 'application/json',
})
headers.update(
{
"Content-type": "application/json",
}
)
response, info = fetch_url(
module=self.module,

View File

@@ -13,7 +13,7 @@ import os
def determine_config_file(user, config_file):
if user:
config_file = os.path.join(os.path.expanduser(f'~{user}'), '.ssh', 'config')
config_file = os.path.join(os.path.expanduser(f"~{user}"), ".ssh", "config")
elif config_file is None:
config_file = '/etc/ssh/ssh_config'
config_file = "/etc/ssh/ssh_config"
return config_file

View File

@@ -6,8 +6,7 @@ from __future__ import annotations
emc_vnx_argument_spec = {
'sp_address': dict(type='str', required=True),
'sp_user': dict(type='str', required=False, default='sysadmin'),
'sp_password': dict(type='str', required=False, default='sysadmin',
no_log=True),
"sp_address": dict(type="str", required=True),
"sp_user": dict(type="str", required=False, default="sysadmin"),
"sp_password": dict(type="str", required=False, default="sysadmin", no_log=True),
}

View File

@@ -11,7 +11,7 @@ from ansible.module_utils import basic
def convert_to_binary_multiple(size_with_unit):
if size_with_unit is None:
return -1
valid_units = ['MiB', 'GiB', 'TiB']
valid_units = ["MiB", "GiB", "TiB"]
valid_unit = False
for unit in valid_units:
if size_with_unit.strip().endswith(unit):
@@ -22,47 +22,24 @@ def convert_to_binary_multiple(size_with_unit):
if not valid_unit:
raise ValueError(f"{size_with_unit} does not have a valid unit. The unit must be one of {valid_units}")
size = size_with_unit.replace(" ", "").split('iB')[0]
size = size_with_unit.replace(" ", "").split("iB")[0]
size_kib = basic.human_to_bytes(size)
return int(size_kib / (1024 * 1024))
storage_system_spec = {
"storage_system_ip": {
"required": True,
"type": "str"
},
"storage_system_username": {
"required": True,
"type": "str",
"no_log": True
},
"storage_system_password": {
"required": True,
"type": "str",
"no_log": True
},
"secure": {
"type": "bool",
"default": False
}
"storage_system_ip": {"required": True, "type": "str"},
"storage_system_username": {"required": True, "type": "str", "no_log": True},
"storage_system_password": {"required": True, "type": "str", "no_log": True},
"secure": {"type": "bool", "default": False},
}
def cpg_argument_spec():
spec = {
"state": {
"required": True,
"choices": ['present', 'absent'],
"type": 'str'
},
"cpg_name": {
"required": True,
"type": "str"
},
"domain": {
"type": "str"
},
"state": {"required": True, "choices": ["present", "absent"], "type": "str"},
"cpg_name": {"required": True, "type": "str"},
"domain": {"type": "str"},
"growth_increment": {
"type": "str",
},
@@ -72,23 +49,10 @@ def cpg_argument_spec():
"growth_warning": {
"type": "str",
},
"raid_type": {
"required": False,
"type": "str",
"choices": ['R0', 'R1', 'R5', 'R6']
},
"set_size": {
"required": False,
"type": "int"
},
"high_availability": {
"type": "str",
"choices": ['PORT', 'CAGE', 'MAG']
},
"disk_type": {
"type": "str",
"choices": ['FC', 'NL', 'SSD']
}
"raid_type": {"required": False, "type": "str", "choices": ["R0", "R1", "R5", "R6"]},
"set_size": {"required": False, "type": "int"},
"high_availability": {"type": "str", "choices": ["PORT", "CAGE", "MAG"]},
"disk_type": {"type": "str", "choices": ["FC", "NL", "SSD"]},
}
spec.update(storage_system_spec)
return spec

View File

@@ -22,11 +22,5 @@ def systemd_runner(module, command, **kwargs):
unit=cmd_runner_fmt.as_list(),
)
runner = CmdRunner(
module,
command=command,
arg_formats=arg_formats,
check_rc=True,
**kwargs
)
runner = CmdRunner(module, command=command, arg_formats=arg_formats, check_rc=True, **kwargs)
return runner

View File

@@ -1,4 +1,3 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -44,12 +43,12 @@ import re
__all__ = [
'ldap_search',
'config_registry',
'base_dn',
'uldap',
'umc_module_for_add',
'umc_module_for_edit',
"ldap_search",
"config_registry",
"base_dn",
"uldap",
"umc_module_for_add",
"umc_module_for_edit",
]
@@ -58,6 +57,7 @@ _singletons: dict[str, object] = {}
def ldap_module():
import ldap as orig_ldap
return orig_ldap
@@ -69,18 +69,18 @@ def _singleton(name: str, constructor):
def config_registry():
def construct():
import univention.config_registry
ucr = univention.config_registry.ConfigRegistry()
ucr.load()
return ucr
return _singleton('config_registry', construct)
return _singleton("config_registry", construct)
def base_dn():
return config_registry()['ldap/base']
return config_registry()["ldap/base"]
def uldap():
@@ -88,52 +88,59 @@ def uldap():
def construct():
try:
secret_file = open('/etc/ldap.secret', 'r')
bind_dn = f'cn=admin,{base_dn()}'
secret_file = open("/etc/ldap.secret", "r")
bind_dn = f"cn=admin,{base_dn()}"
except IOError: # pragma: no cover
secret_file = open('/etc/machine.secret', 'r')
secret_file = open("/etc/machine.secret", "r")
bind_dn = config_registry()["ldap/hostdn"]
pwd_line = secret_file.readline()
pwd = re.sub('\n', '', pwd_line)
pwd = re.sub("\n", "", pwd_line)
import univention.admin.uldap
return univention.admin.uldap.access(
host=config_registry()['ldap/master'],
host=config_registry()["ldap/master"],
base=base_dn(),
binddn=bind_dn,
bindpw=pwd,
start_tls=1,
)
return _singleton('uldap', construct)
return _singleton("uldap", construct)
def config():
def construct():
import univention.admin.config
return univention.admin.config.config()
return _singleton('config', construct)
return _singleton("config", construct)
def init_modules():
def construct():
import univention.admin.modules
univention.admin.modules.update()
return True
return _singleton('modules_initialized', construct)
return _singleton("modules_initialized", construct)
def position_base_dn():
def construct():
import univention.admin.uldap
return univention.admin.uldap.position(base_dn())
return _singleton('position_base_dn', construct)
return _singleton("position_base_dn", construct)
def ldap_dn_tree_parent(dn, count=1):
dn_array = dn.split(',')
dn_array = dn.split(",")
dn_array[0:count] = []
return ','.join(dn_array)
return ",".join(dn_array)
def ldap_search(filter, base=None, attr=None):
@@ -142,12 +149,7 @@ def ldap_search(filter, base=None, attr=None):
if base is None:
base = base_dn()
msgid = uldap().lo.lo.search(
base,
ldap_module().SCOPE_SUBTREE,
filterstr=filter,
attrlist=attr
)
msgid = uldap().lo.lo.search(base, ldap_module().SCOPE_SUBTREE, filterstr=filter, attrlist=attr)
# I used to have a try: finally: here but there seems to be a bug in python
# which swallows the KeyboardInterrupt
# The abandon now doesn't make too much sense
@@ -181,12 +183,13 @@ def module_by_name(module_name_):
def construct():
import univention.admin.modules
init_modules()
module = univention.admin.modules.get(module_name_)
univention.admin.modules.init(uldap(), position_base_dn(), module)
return module
return _singleton(f'module/{module_name_}', construct)
return _singleton(f"module/{module_name_}", construct)
def get_umc_admin_objects():
@@ -196,6 +199,7 @@ def get_umc_admin_objects():
are not loaded until this function is called.
"""
import univention.admin
return univention.admin.objects
@@ -226,14 +230,14 @@ def umc_module_for_add(module, container_dn, superordinate=None):
def umc_module_for_edit(module, object_dn, superordinate=None):
"""Returns an UMC module object prepared for editing an existing entry.
The module is a module specification according to the udm commandline.
Example values are:
* users/user
* shares/share
* groups/group
The module is a module specification according to the udm commandline.
Example values are:
* users/user
* shares/share
* groups/group
The object_dn MUST be the dn of the object itself, not the container!
"""
The object_dn MUST be the dn of the object itself, not the container!
"""
mod = module_by_name(module)
objects = get_umc_admin_objects()
@@ -241,14 +245,7 @@ def umc_module_for_edit(module, object_dn, superordinate=None):
position = position_base_dn()
position.setDn(ldap_dn_tree_parent(object_dn))
obj = objects.get(
mod,
config(),
uldap(),
position=position,
superordinate=superordinate,
dn=object_dn
)
obj = objects.get(mod, config(), uldap(), position=position, superordinate=superordinate, dn=object_dn)
obj.open()
return obj
@@ -257,21 +254,16 @@ def umc_module_for_edit(module, object_dn, superordinate=None):
def create_containers_and_parents(container_dn):
"""Create a container and if needed the parents containers"""
import univention.admin.uexceptions as uexcp
if not container_dn.startswith("cn="):
raise AssertionError()
try:
parent = ldap_dn_tree_parent(container_dn)
obj = umc_module_for_add(
'container/cn',
parent
)
obj['name'] = container_dn.split(',')[0].split('=')[1]
obj['description'] = "container created by import"
obj = umc_module_for_add("container/cn", parent)
obj["name"] = container_dn.split(",")[0].split("=")[1]
obj["description"] = "container created by import"
except uexcp.ldapError:
create_containers_and_parents(parent)
obj = umc_module_for_add(
'container/cn',
parent
)
obj['name'] = container_dn.split(',')[0].split('=')[1]
obj['description'] = "container created by import"
obj = umc_module_for_add("container/cn", parent)
obj["name"] = container_dn.split(",")[0].split("=")[1]
obj["description"] = "container created by import"

View File

@@ -19,7 +19,6 @@ from ansible.module_utils.urls import fetch_url
class UTMModuleConfigurationError(Exception):
def __init__(self, msg, **args):
super().__init__(self, msg)
self.msg = msg
@@ -37,21 +36,38 @@ class UTMModule(AnsibleModule):
See the other modules like utm_aaa_group for example.
"""
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
supports_check_mode=False, required_if=None):
def __init__(
self,
argument_spec,
bypass_checks=False,
no_log=False,
mutually_exclusive=None,
required_together=None,
required_one_of=None,
add_file_common_args=False,
supports_check_mode=False,
required_if=None,
):
default_specs = dict(
headers=dict(type='dict', required=False, default={}),
utm_host=dict(type='str', required=True),
utm_port=dict(type='int', default=4444),
utm_token=dict(type='str', required=True, no_log=True),
utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
validate_certs=dict(type='bool', required=False, default=True),
state=dict(default='present', choices=['present', 'absent'])
headers=dict(type="dict", required=False, default={}),
utm_host=dict(type="str", required=True),
utm_port=dict(type="int", default=4444),
utm_token=dict(type="str", required=True, no_log=True),
utm_protocol=dict(type="str", required=False, default="https", choices=["https", "http"]),
validate_certs=dict(type="bool", required=False, default=True),
state=dict(default="present", choices=["present", "absent"]),
)
super().__init__(
self._merge_specs(default_specs, argument_spec),
bypass_checks,
no_log,
mutually_exclusive,
required_together,
required_one_of,
add_file_common_args,
supports_check_mode,
required_if,
)
super().__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
mutually_exclusive, required_together, required_one_of,
add_file_common_args, supports_check_mode, required_if)
def _merge_specs(self, default_specs, custom_specs):
result = default_specs.copy()
@@ -60,7 +76,6 @@ class UTMModule(AnsibleModule):
class UTM:
def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
"""
Initialize UTM Class
@@ -71,16 +86,14 @@ class UTM:
"""
self.info_only = info_only
self.module = module
self.request_url = (
f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/"
)
self.request_url = f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/"
"""
The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
"""
self.change_relevant_keys = change_relevant_keys
self.module.params['url_username'] = 'token'
self.module.params['url_password'] = module.params.get('utm_token')
self.module.params["url_username"] = "token"
self.module.params["url_password"] = module.params.get("utm_token")
if all(elem in self.change_relevant_keys for elem in module.params.keys()):
raise UTMModuleConfigurationError(
f"The keys {self.change_relevant_keys} to check are not in the modules keys:\n{list(module.params.keys())}"
@@ -89,9 +102,9 @@ class UTM:
def execute(self):
try:
if not self.info_only:
if self.module.params.get('state') == 'present':
if self.module.params.get("state") == "present":
self._add()
elif self.module.params.get('state') == 'absent':
elif self.module.params.get("state") == "absent":
self._remove()
else:
self._info()
@@ -125,19 +138,23 @@ class UTM:
else:
data_as_json_string = self.module.jsonify(self.module.params)
if result is None:
response, info = fetch_url(self.module, self.request_url, method="POST",
headers=combined_headers,
data=data_as_json_string)
response, info = fetch_url(
self.module, self.request_url, method="POST", headers=combined_headers, data=data_as_json_string
)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
else:
if self._is_object_changed(self.change_relevant_keys, self.module, result):
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
headers=combined_headers,
data=data_as_json_string)
if info['status'] >= 400:
response, info = fetch_url(
self.module,
self.request_url + result["_ref"],
method="PUT",
headers=combined_headers,
data=data_as_json_string,
)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
@@ -149,9 +166,9 @@ class UTM:
:return: A combined headers dict
"""
default_headers = {"Accept": "application/json", "Content-type": "application/json"}
if self.module.params.get('headers') is not None:
if self.module.params.get("headers") is not None:
result = default_headers.copy()
result.update(self.module.params.get('headers'))
result.update(self.module.params.get("headers"))
else:
result = default_headers
return result
@@ -163,9 +180,13 @@ class UTM:
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if result is not None:
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
data=self.module.jsonify(self.module.params))
response, info = fetch_url(
self.module,
self.request_url + result["_ref"],
method="DELETE",
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
data=self.module.jsonify(self.module.params),
)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
else:
@@ -183,7 +204,7 @@ class UTM:
result = None
if response is not None:
results = json.loads(response.read())
result = next((d for d in results if d['name'] == module.params.get('name')), None)
result = next((d for d in results if d["name"] == module.params.get("name")), None)
return info, result
def _clean_result(self, result):
@@ -192,14 +213,14 @@ class UTM:
:param result: The result from the query
:return: The modified result
"""
del result['utm_host']
del result['utm_port']
del result['utm_token']
del result['utm_protocol']
del result['validate_certs']
del result['url_username']
del result['url_password']
del result['state']
del result["utm_host"]
del result["utm_port"]
del result["utm_token"]
del result["utm_protocol"]
del result["validate_certs"]
del result["url_username"]
del result["url_password"]
del result["state"]
return result
def _is_object_changed(self, keys, module, result):

View File

@@ -94,7 +94,7 @@ class _Variable:
@property
def diff_result(self):
if self.diff and self.has_changed:
return {'before': self.initial_value, 'after': self.value}
return {"before": self.initial_value, "after": self.value}
return
def __str__(self):
@@ -105,7 +105,19 @@ class _Variable:
class VarDict:
reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict')
reserved_names = (
"__vars__",
"_var",
"var",
"set_meta",
"get_meta",
"set",
"output",
"diff",
"facts",
"has_changed",
"as_dict",
)
def __init__(self):
self.__vars__ = dict()
@@ -123,7 +135,7 @@ class VarDict:
return getattr(super(), item)
def __setattr__(self, key, value):
if key == '__vars__':
if key == "__vars__":
super().__setattr__(key, value)
else:
self.set(key, value)
@@ -177,11 +189,13 @@ class VarDict:
return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)}
def diff(self, verbosity=0):
diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)]
diff_results = [
(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)
]
if diff_results:
before = {n: dr['before'] for n, dr in diff_results}
after = {n: dr['after'] for n, dr in diff_results}
return {'before': before, 'after': after}
before = {n: dr["before"] for n, dr in diff_results}
after = {n: dr["after"] for n, dr in diff_results}
return {"before": before, "after": after}
return None
def facts(self, verbosity=0):

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later

View File

@@ -19,76 +19,71 @@ VXOS_VERSION = None
def get_version(iocs_json):
if not iocs_json:
raise Exception('Invalid IOC json')
active = next((x for x in iocs_json if x['mgmtRole']), None)
raise Exception("Invalid IOC json")
active = next((x for x in iocs_json if x["mgmtRole"]), None)
if active is None:
raise Exception('Unable to detect active IOC')
ver = active['swVersion']
if ver[0] != 'v':
raise Exception('Illegal version string')
ver = ver[1:ver.find('-')]
ver = map(int, ver.split('.'))
raise Exception("Unable to detect active IOC")
ver = active["swVersion"]
if ver[0] != "v":
raise Exception("Illegal version string")
ver = ver[1 : ver.find("-")]
ver = map(int, ver.split("."))
return tuple(ver)
def get_array(module):
"""Return storage array object or fail"""
global VXOS_VERSION
array = module.params['array']
user = module.params.get('user', None)
password = module.params.get('password', None)
validate = module.params.get('validate_certs')
array = module.params["array"]
user = module.params.get("user", None)
password = module.params.get("password", None)
validate = module.params.get("validate_certs")
if not HAS_VEXATAPI:
module.fail_json(msg='vexatapi library is required for this module. '
'To install, use `pip install vexatapi`')
module.fail_json(msg="vexatapi library is required for this module. To install, use `pip install vexatapi`")
if user and password:
system = VexataAPIProxy(array, user, password, verify_cert=validate)
else:
module.fail_json(msg='The user/password are required to be passed in to '
'the module as arguments or by setting the '
'VEXATA_USER and VEXATA_PASSWORD environment variables.')
module.fail_json(
msg="The user/password are required to be passed in to "
"the module as arguments or by setting the "
"VEXATA_USER and VEXATA_PASSWORD environment variables."
)
try:
if system.test_connection():
VXOS_VERSION = get_version(system.iocs())
return system
else:
module.fail_json(msg='Test connection to array failed.')
module.fail_json(msg="Test connection to array failed.")
except Exception as e:
module.fail_json(msg=f'Vexata API access failed: {e}')
module.fail_json(msg=f"Vexata API access failed: {e}")
def argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
array=dict(type='str',
required=True),
user=dict(type='str',
fallback=(env_fallback, ['VEXATA_USER'])),
password=dict(type='str',
no_log=True,
fallback=(env_fallback, ['VEXATA_PASSWORD'])),
validate_certs=dict(type='bool',
required=False,
default=False),
array=dict(type="str", required=True),
user=dict(type="str", fallback=(env_fallback, ["VEXATA_USER"])),
password=dict(type="str", no_log=True, fallback=(env_fallback, ["VEXATA_PASSWORD"])),
validate_certs=dict(type="bool", required=False, default=False),
)
def required_together():
"""Return the default list used for the required_together argument to AnsibleModule"""
return [['user', 'password']]
return [["user", "password"]]
def size_to_MiB(size):
"""Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
quant = size[:-1]
exponent = size[-1]
if not quant.isdigit() or exponent not in 'MGT':
if not quant.isdigit() or exponent not in "MGT":
return -1
quant = int(quant)
if exponent == 'G':
if exponent == "G":
quant <<= 10
elif exponent == 'T':
elif exponent == "T":
quant <<= 20
return quant

View File

@@ -1,4 +1,3 @@
# Copyright (c) 2022 Western Digital Corporation
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -18,6 +17,7 @@ from ansible_collections.community.general.plugins.module_utils.redfish_utils im
class WdcRedfishUtils(RedfishUtils):
"""Extension to RedfishUtils to support WDC enclosures."""
# Status codes returned by WDC FW Update Status
UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE = 0
UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS = 1
@@ -41,19 +41,15 @@ class WdcRedfishUtils(RedfishUtils):
CHASSIS_LOCATE = "#Chassis.Locate"
CHASSIS_POWER_MODE = "#Chassis.PowerMode"
def __init__(self,
creds,
root_uris,
timeout,
module,
resource_id,
data_modification):
super().__init__(creds=creds,
root_uri=root_uris[0],
timeout=timeout,
module=module,
resource_id=resource_id,
data_modification=data_modification)
def __init__(self, creds, root_uris, timeout, module, resource_id, data_modification):
super().__init__(
creds=creds,
root_uri=root_uris[0],
timeout=timeout,
module=module,
resource_id=resource_id,
data_modification=data_modification,
)
# Update the root URI if we cannot perform a Redfish GET to the first one
self._set_root_uri(root_uris)
@@ -66,14 +62,14 @@ class WdcRedfishUtils(RedfishUtils):
for root_uri in root_uris:
uri = f"{root_uri}/redfish/v1"
response = self.get_request(uri)
if response['ret']:
if response["ret"]:
self.root_uri = root_uri
break
def _find_updateservice_resource(self):
"""Find the update service resource as well as additional WDC-specific resources."""
response = super()._find_updateservice_resource()
if not response['ret']:
if not response["ret"]:
return response
return self._find_updateservice_additional_uris()
@@ -87,47 +83,47 @@ class WdcRedfishUtils(RedfishUtils):
None if unable to determine.
"""
response = self.get_request(f"{self.root_uri}{self.service_root}Chassis/Enclosure")
if response['ret'] is False:
if response["ret"] is False:
return None
pattern = r".*-[A,B]"
data = response['data']
if 'EnclVersion' not in data:
enc_version = 'G1'
data = response["data"]
if "EnclVersion" not in data:
enc_version = "G1"
else:
enc_version = data['EnclVersion']
return re.match(pattern, data['SerialNumber']) is not None, enc_version
enc_version = data["EnclVersion"]
return re.match(pattern, data["SerialNumber"]) is not None, enc_version
def _find_updateservice_additional_uris(self):
"""Find & set WDC-specific update service URIs"""
response = self.get_request(self.root_uri + self._update_uri())
if response['ret'] is False:
if response["ret"] is False:
return response
data = response['data']
if 'Actions' not in data:
return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
if '#UpdateService.SimpleUpdate' not in data['Actions']:
return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
action = data['Actions']['#UpdateService.SimpleUpdate']
if 'target' not in action:
return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
self.simple_update_uri = action['target']
data = response["data"]
if "Actions" not in data:
return {"ret": False, "msg": "Service does not support SimpleUpdate"}
if "#UpdateService.SimpleUpdate" not in data["Actions"]:
return {"ret": False, "msg": "Service does not support SimpleUpdate"}
action = data["Actions"]["#UpdateService.SimpleUpdate"]
if "target" not in action:
return {"ret": False, "msg": "Service does not support SimpleUpdate"}
self.simple_update_uri = action["target"]
# Simple update status URI is not provided via GET /redfish/v1/UpdateService
# So we have to hard code it.
self.simple_update_status_uri = f"{self.simple_update_uri}/Status"
# FWActivate URI
if 'Oem' not in data['Actions']:
return {'ret': False, 'msg': 'Service does not support OEM operations'}
if 'WDC' not in data['Actions']['Oem']:
return {'ret': False, 'msg': 'Service does not support WDC operations'}
if '#UpdateService.FWActivate' not in data['Actions']['Oem']['WDC']:
return {'ret': False, 'msg': 'Service does not support FWActivate'}
action = data['Actions']['Oem']['WDC']['#UpdateService.FWActivate']
if 'target' not in action:
return {'ret': False, 'msg': 'Service does not support FWActivate'}
self.firmware_activate_uri = action['target']
return {'ret': True}
if "Oem" not in data["Actions"]:
return {"ret": False, "msg": "Service does not support OEM operations"}
if "WDC" not in data["Actions"]["Oem"]:
return {"ret": False, "msg": "Service does not support WDC operations"}
if "#UpdateService.FWActivate" not in data["Actions"]["Oem"]["WDC"]:
return {"ret": False, "msg": "Service does not support FWActivate"}
action = data["Actions"]["Oem"]["WDC"]["#UpdateService.FWActivate"]
if "target" not in action:
return {"ret": False, "msg": "Service does not support FWActivate"}
self.firmware_activate_uri = action["target"]
return {"ret": True}
def _simple_update_status_uri(self):
return self.simple_update_status_uri
@@ -142,39 +138,37 @@ class WdcRedfishUtils(RedfishUtils):
"""Issue Redfish HTTP GET to return the simple update status"""
result = {}
response = self.get_request(self.root_uri + self._simple_update_status_uri())
if response['ret'] is False:
if response["ret"] is False:
return response
result['ret'] = True
data = response['data']
result['entries'] = data
result["ret"] = True
data = response["data"]
result["entries"] = data
return result
def firmware_activate(self, update_opts):
"""Perform FWActivate using Redfish HTTP API."""
creds = update_opts.get('update_creds')
creds = update_opts.get("update_creds")
payload = {}
if creds:
if creds.get('username'):
payload["Username"] = creds.get('username')
if creds.get('password'):
payload["Password"] = creds.get('password')
if creds.get("username"):
payload["Username"] = creds.get("username")
if creds.get("password"):
payload["Password"] = creds.get("password")
# Make sure the service supports FWActivate
response = self.get_request(self.root_uri + self._update_uri())
if response['ret'] is False:
if response["ret"] is False:
return response
data = response['data']
if 'Actions' not in data:
return {'ret': False, 'msg': 'Service does not support FWActivate'}
data = response["data"]
if "Actions" not in data:
return {"ret": False, "msg": "Service does not support FWActivate"}
response = self.post_request(self.root_uri + self._firmware_activate_uri(), payload)
if response['ret'] is False:
if response["ret"] is False:
return response
return {'ret': True, 'changed': True,
'msg': "FWActivate requested"}
return {"ret": True, "changed": True, "msg": "FWActivate requested"}
def _get_bundle_version(self,
bundle_uri):
def _get_bundle_version(self, bundle_uri):
"""Get the firmware version from a bundle file, and whether or not it is multi-tenant.
Only supports HTTP at this time. Assumes URI exists and is a tarfile.
@@ -192,8 +186,7 @@ class WdcRedfishUtils(RedfishUtils):
and bundle generation. Either value will be None if unable to determine.
:rtype: str or None, bool or None
"""
bundle_temp_filename = fetch_file(module=self.module,
url=bundle_uri)
bundle_temp_filename = fetch_file(module=self.module, url=bundle_uri)
bundle_version = None
is_multi_tenant = None
gen = None
@@ -210,9 +203,9 @@ class WdcRedfishUtils(RedfishUtils):
# It is anticipated that DP firmware bundle will be having the value "DPG2"
# for cookie1 in the header
if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2":
file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1]))
file_name, ext = os.path.splitext(str(bundle_uri.rsplit("/", 1)[1]))
# G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12
parsedFileName = file_name.split('_')
parsedFileName = file_name.split("_")
if len(parsedFileName) == 5:
bundle_version = parsedFileName[4]
# MM G2 is always single tanant
@@ -237,7 +230,7 @@ class WdcRedfishUtils(RedfishUtils):
bin_file = tf.extractfile(bin_filename)
bin_file.seek(11)
byte_11 = bin_file.read(1)
is_multi_tenant = byte_11 == b'\x80'
is_multi_tenant = byte_11 == b"\x80"
gen = "G1"
return bundle_version, is_multi_tenant, gen
@@ -251,7 +244,7 @@ class WdcRedfishUtils(RedfishUtils):
:rtype: bool
"""
parsed_bundle_uri = urlparse(uri)
return parsed_bundle_uri.scheme.lower() in ['http', 'https']
return parsed_bundle_uri.scheme.lower() in ["http", "https"]
def update_and_activate(self, update_opts):
"""Update and activate the firmware in a single action.
@@ -262,12 +255,18 @@ class WdcRedfishUtils(RedfishUtils):
"""
# Convert credentials to standard HTTP format
if update_opts.get("update_creds") is not None and "username" in update_opts["update_creds"] and "password" in update_opts["update_creds"]:
if (
update_opts.get("update_creds") is not None
and "username" in update_opts["update_creds"]
and "password" in update_opts["update_creds"]
):
update_creds = update_opts["update_creds"]
parsed_url = urlparse(update_opts["update_image_uri"])
if update_creds:
original_netloc = parsed_url.netloc
parsed_url = parsed_url._replace(netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}")
parsed_url = parsed_url._replace(
netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}"
)
update_opts["update_image_uri"] = urlunparse(parsed_url)
del update_opts["update_creds"]
@@ -275,24 +274,19 @@ class WdcRedfishUtils(RedfishUtils):
bundle_uri = update_opts["update_image_uri"]
if not self.uri_is_http(bundle_uri):
return {
'ret': False,
'msg': 'Bundle URI must be HTTP or HTTPS'
}
return {"ret": False, "msg": "Bundle URI must be HTTP or HTTPS"}
# Make sure IOM is ready for update
result = self.get_simple_update_status()
if result['ret'] is False:
if result["ret"] is False:
return result
update_status = result['entries']
status_code = update_status['StatusCode']
status_description = update_status['Description']
if status_code not in [
self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE,
self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED
]:
update_status = result["entries"]
status_code = update_status["StatusCode"]
status_description = update_status["Description"]
if status_code not in [self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED]:
return {
'ret': False,
'msg': f'Target is not ready for FW update. Current status: {status_code} ({status_description})'}
"ret": False,
"msg": f"Target is not ready for FW update. Current status: {status_code} ({status_description})",
}
# Check the FW version in the bundle file, and compare it to what is already on the IOMs
@@ -300,8 +294,8 @@ class WdcRedfishUtils(RedfishUtils):
bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri)
if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None:
return {
'ret': False,
'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file'
"ret": False,
"msg": "Unable to extract bundle version or multi-tenant status or generation from update image file",
}
is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen()
@@ -309,16 +303,13 @@ class WdcRedfishUtils(RedfishUtils):
# Verify that the bundle is correctly multi-tenant or not
if is_enclosure_multi_tenant != is_bundle_multi_tenant:
return {
'ret': False,
'msg': f'Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}'
"ret": False,
"msg": f"Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}",
}
# Verify that the bundle is compliant with the target enclosure
if enclosure_gen != bundle_gen:
return {
'ret': False,
'msg': f'Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}'
}
return {"ret": False, "msg": f"Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}"}
# Version number installed on IOMs
firmware_inventory = self.get_firmware_inventory()
@@ -334,27 +325,22 @@ class WdcRedfishUtils(RedfishUtils):
if is_enclosure_multi_tenant:
# For multi-tenant, only one of the IOMs will be affected by the firmware update,
# so see if that IOM already has the same firmware version as the bundle.
firmware_already_installed = bundle_firmware_version == self._get_installed_firmware_version_of_multi_tenant_system(
iom_a_firmware_version,
iom_b_firmware_version)
firmware_already_installed = (
bundle_firmware_version
== self._get_installed_firmware_version_of_multi_tenant_system(
iom_a_firmware_version, iom_b_firmware_version
)
)
else:
# For single-tenant, see if both IOMs already have the same firmware version as the bundle.
firmware_already_installed = bundle_firmware_version == iom_a_firmware_version == iom_b_firmware_version
# If this FW already installed, return changed: False, and do not update the firmware.
if firmware_already_installed:
return {
'ret': True,
'changed': False,
'msg': f'Version {bundle_firmware_version} already installed'
}
return {"ret": True, "changed": False, "msg": f"Version {bundle_firmware_version} already installed"}
# Version numbers don't match the bundle -- proceed with update (unless we are in check mode)
if self.module.check_mode:
return {
'ret': True,
'changed': True,
'msg': 'Update not performed in check mode.'
}
return {"ret": True, "changed": True, "msg": "Update not performed in check mode."}
update_successful = False
retry_interval_seconds = 5
max_number_of_retries = 5
@@ -365,14 +351,14 @@ class WdcRedfishUtils(RedfishUtils):
retry_number += 1
result = self.simple_update(update_opts)
if result['ret'] is not True:
if result["ret"] is not True:
# Sometimes a timeout error is returned even though the update actually was requested.
# Check the update status to see if the update is in progress.
status_result = self.get_simple_update_status()
if status_result['ret'] is False:
if status_result["ret"] is False:
continue
update_status = status_result['entries']
status_code = update_status['StatusCode']
update_status = status_result["entries"]
status_code = update_status["StatusCode"]
if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS:
# Update is not in progress -- retry until max number of retries
continue
@@ -393,20 +379,22 @@ class WdcRedfishUtils(RedfishUtils):
# to "update in progress"
status_codes_for_update_incomplete = [
self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS,
self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE
self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE,
]
iteration = 0
while status_code in status_codes_for_update_incomplete \
and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes):
while (
status_code in status_codes_for_update_incomplete
and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes)
):
if iteration != 0:
time.sleep(polling_interval_seconds)
iteration += 1
result = self.get_simple_update_status()
if result['ret'] is False:
if result["ret"] is False:
continue # We may get timeouts, just keep trying until we give up
update_status = result['entries']
status_code = update_status['StatusCode']
status_description = update_status['Description']
update_status = result["entries"]
status_code = update_status["StatusCode"]
status_description = update_status["Description"]
if status_code == self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS:
# Once it says update in progress, "ready for update" is no longer a valid status code
status_codes_for_update_incomplete = [self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS]
@@ -414,16 +402,14 @@ class WdcRedfishUtils(RedfishUtils):
# Update no longer in progress -- verify that it finished
if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION:
return {
'ret': False,
'msg': f'Target is not ready for FW activation after update. Current status: {status_code} ({status_description})'}
"ret": False,
"msg": f"Target is not ready for FW activation after update. Current status: {status_code} ({status_description})",
}
self.firmware_activate(update_opts)
return {'ret': True, 'changed': True,
'msg': "Firmware updated and activation initiated."}
return {"ret": True, "changed": True, "msg": "Firmware updated and activation initiated."}
def _get_installed_firmware_version_of_multi_tenant_system(self,
iom_a_firmware_version,
iom_b_firmware_version):
def _get_installed_firmware_version_of_multi_tenant_system(self, iom_a_firmware_version, iom_b_firmware_version):
"""Return the version for the active IOM on a multi-tenant system.
Only call this on a multi-tenant system.
@@ -434,18 +420,18 @@ class WdcRedfishUtils(RedfishUtils):
# The one we are on will return valid data.
# The other will return an error with message "IOM Module A/B cannot be read"
which_iom_is_this = None
for iom_letter in ['A', 'B']:
for iom_letter in ["A", "B"]:
iom_uri = f"Chassis/IOModule{iom_letter}FRU"
response = self.get_request(self.root_uri + self.service_root + iom_uri)
if response['ret'] is False:
if response["ret"] is False:
continue
data = response['data']
data = response["data"]
if "Id" in data: # Assume if there is an "Id", it is valid
which_iom_is_this = iom_letter
break
if which_iom_is_this == 'A':
if which_iom_is_this == "A":
return iom_a_firmware_version
elif which_iom_is_this == 'B':
elif which_iom_is_this == "B":
return iom_b_firmware_version
else:
return None
@@ -459,11 +445,21 @@ class WdcRedfishUtils(RedfishUtils):
return None
if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]:
return None
if WdcRedfishUtils.CHASSIS_LOCATE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
if (
WdcRedfishUtils.CHASSIS_LOCATE
not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]
):
return None
if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE]:
if (
WdcRedfishUtils.TARGET
not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][
WdcRedfishUtils.CHASSIS_LOCATE
]
):
return None
return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][WdcRedfishUtils.TARGET]
return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][
WdcRedfishUtils.TARGET
]
@staticmethod
def _get_power_mode_uri(data):
@@ -474,41 +470,51 @@ class WdcRedfishUtils(RedfishUtils):
return None
if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]:
return None
if WdcRedfishUtils.CHASSIS_POWER_MODE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
if (
WdcRedfishUtils.CHASSIS_POWER_MODE
not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]
):
return None
if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE]:
if (
WdcRedfishUtils.TARGET
not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][
WdcRedfishUtils.CHASSIS_POWER_MODE
]
):
return None
return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE][WdcRedfishUtils.TARGET]
return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][
WdcRedfishUtils.CHASSIS_POWER_MODE
][WdcRedfishUtils.TARGET]
def manage_indicator_led(self, command, resource_uri):
key = 'IndicatorLED'
key = "IndicatorLED"
payloads = {'IndicatorLedOn': 'On', 'IndicatorLedOff': 'Off'}
current_led_status_map = {'IndicatorLedOn': 'Blinking', 'IndicatorLedOff': 'Off'}
payloads = {"IndicatorLedOn": "On", "IndicatorLedOff": "Off"}
current_led_status_map = {"IndicatorLedOn": "Blinking", "IndicatorLedOff": "Off"}
result = {}
response = self.get_request(self.root_uri + resource_uri)
if response['ret'] is False:
if response["ret"] is False:
return response
result['ret'] = True
data = response['data']
result["ret"] = True
data = response["data"]
if key not in data:
return {'ret': False, 'msg': f"Key {key} not found"}
return {"ret": False, "msg": f"Key {key} not found"}
current_led_status = data[key]
if current_led_status == current_led_status_map[command]:
return {'ret': True, 'changed': False}
return {"ret": True, "changed": False}
led_locate_uri = self._get_led_locate_uri(data)
if led_locate_uri is None:
return {'ret': False, 'msg': 'LED locate URI not found.'}
return {"ret": False, "msg": "LED locate URI not found."}
if command in payloads.keys():
payload = {'LocateState': payloads[command]}
payload = {"LocateState": payloads[command]}
response = self.post_request(self.root_uri + led_locate_uri, payload)
if response['ret'] is False:
if response["ret"] is False:
return response
else:
return {'ret': False, 'msg': 'Invalid command'}
return {"ret": False, "msg": "Invalid command"}
return result
@@ -519,35 +525,38 @@ class WdcRedfishUtils(RedfishUtils):
if resource_uri is None:
resource_uri = self.chassis_uri
payloads = {'PowerModeNormal': 'Normal', 'PowerModeLow': 'Low'}
payloads = {"PowerModeNormal": "Normal", "PowerModeLow": "Low"}
requested_power_mode = payloads[command]
result = {}
response = self.get_request(self.root_uri + resource_uri)
if response['ret'] is False:
if response["ret"] is False:
return response
result['ret'] = True
data = response['data']
result["ret"] = True
data = response["data"]
# Make sure the response includes Oem.WDC.PowerMode, and get current power mode
power_mode = 'PowerMode'
if WdcRedfishUtils.OEM not in data or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] or\
power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
return {'ret': False, 'msg': 'Resource does not support Oem.WDC.PowerMode'}
power_mode = "PowerMode"
if (
WdcRedfishUtils.OEM not in data
or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM]
or power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]
):
return {"ret": False, "msg": "Resource does not support Oem.WDC.PowerMode"}
current_power_mode = data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][power_mode]
if current_power_mode == requested_power_mode:
return {'ret': True, 'changed': False}
return {"ret": True, "changed": False}
power_mode_uri = self._get_power_mode_uri(data)
if power_mode_uri is None:
return {'ret': False, 'msg': 'Power Mode URI not found.'}
return {"ret": False, "msg": "Power Mode URI not found."}
if command in payloads.keys():
payload = {'PowerMode': payloads[command]}
payload = {"PowerMode": payloads[command]}
response = self.post_request(self.root_uri + power_mode_uri, payload)
if response['ret'] is False:
if response["ret"] is False:
return response
else:
return {'ret': False, 'msg': 'Invalid command'}
return {"ret": False, "msg": "Invalid command"}
return result

View File

@@ -11,15 +11,15 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
def xdg_mime_runner(module, **kwargs):
return CmdRunner(
module,
command=['xdg-mime'],
command=["xdg-mime"],
arg_formats=dict(
default=cmd_runner_fmt.as_fixed('default'),
query=cmd_runner_fmt.as_fixed('query'),
default=cmd_runner_fmt.as_fixed("default"),
query=cmd_runner_fmt.as_fixed("query"),
mime_types=cmd_runner_fmt.as_list(),
handler=cmd_runner_fmt.as_list(),
version=cmd_runner_fmt.as_fixed('--version'),
version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
**kwargs,
)

View File

@@ -13,6 +13,7 @@ import traceback
XENAPI_IMP_ERR = None
try:
import XenAPI
HAS_XENAPI = True
except ImportError:
HAS_XENAPI = False
@@ -24,22 +25,19 @@ from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
def xenserver_common_argument_spec():
return dict(
hostname=dict(type='str',
aliases=['host', 'pool'],
default='localhost',
fallback=(env_fallback, ['XENSERVER_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
default='root',
fallback=(env_fallback, ['XENSERVER_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
no_log=True,
fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
validate_certs=dict(type='bool',
default=True,
fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
hostname=dict(
type="str",
aliases=["host", "pool"],
default="localhost",
fallback=(env_fallback, ["XENSERVER_HOST"]),
),
username=dict(
type="str", aliases=["user", "admin"], default="root", fallback=(env_fallback, ["XENSERVER_USER"])
),
password=dict(
type="str", aliases=["pass", "pwd"], no_log=True, fallback=(env_fallback, ["XENSERVER_PASSWORD"])
),
validate_certs=dict(type="bool", default=True, fallback=(env_fallback, ["XENSERVER_VALIDATE_CERTS"])),
)
@@ -49,7 +47,7 @@ def xapi_to_module_vm_power_state(power_state):
"running": "poweredon",
"halted": "poweredoff",
"suspended": "suspended",
"paused": "paused"
"paused": "paused",
}
return module_power_state_map.get(power_state)
@@ -78,7 +76,7 @@ def is_valid_ip_addr(ip_addr):
Returns:
bool: True if string is valid IPv4 address, else False.
"""
ip_addr_split = ip_addr.split('.')
ip_addr_split = ip_addr.split(".")
if len(ip_addr_split) != 4:
return False
@@ -104,22 +102,24 @@ def is_valid_ip_netmask(ip_netmask):
Returns:
bool: True if string is valid IPv4 netmask, else False.
"""
ip_netmask_split = ip_netmask.split('.')
ip_netmask_split = ip_netmask.split(".")
if len(ip_netmask_split) != 4:
return False
valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
valid_octet_values = ["0", "128", "192", "224", "240", "248", "252", "254", "255"]
for ip_netmask_octet in ip_netmask_split:
if ip_netmask_octet not in valid_octet_values:
return False
if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
if ip_netmask_split[0] != "255" and (
ip_netmask_split[1] != "0" or ip_netmask_split[2] != "0" or ip_netmask_split[3] != "0"
):
return False
elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
elif ip_netmask_split[1] != "255" and (ip_netmask_split[2] != "0" or ip_netmask_split[3] != "0"):
return False
elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
elif ip_netmask_split[2] != "255" and ip_netmask_split[3] != "0":
return False
return True
@@ -163,7 +163,7 @@ def ip_prefix_to_netmask(ip_prefix, skip_check=False):
ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
if ip_prefix_valid:
return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
return ".".join([str((0xFFFFFFFF << (32 - int(ip_prefix)) >> i) & 0xFF) for i in [24, 16, 8, 0]])
else:
return ""
@@ -201,7 +201,7 @@ def is_valid_ip6_addr(ip6_addr):
bool: True if string is valid IPv6 address, else False.
"""
ip6_addr = ip6_addr.lower()
ip6_addr_split = ip6_addr.split(':')
ip6_addr_split = ip6_addr.split(":")
if ip6_addr_split[0] == "":
ip6_addr_split.pop(0)
@@ -220,7 +220,7 @@ def is_valid_ip6_addr(ip6_addr):
if len(ip6_addr_split) != 8:
return False
ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
ip6_addr_hextet_regex = re.compile("^[0-9a-f]{1,4}$")
for ip6_addr_hextet in ip6_addr_split:
if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
@@ -337,63 +337,67 @@ def gather_vm_params(module, vm_ref):
# We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
# Affinity.
if vm_params['affinity'] != "OpaqueRef:NULL":
vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
vm_params['affinity'] = vm_affinity
if vm_params["affinity"] != "OpaqueRef:NULL":
vm_affinity = xapi_session.xenapi.host.get_record(vm_params["affinity"])
vm_params["affinity"] = vm_affinity
else:
vm_params['affinity'] = {}
vm_params["affinity"] = {}
# VBDs.
vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params["VBDs"]]
# List of VBDs is usually sorted by userdevice but we sort just
# in case. We need this list sorted by userdevice so that we can
# make positional pairing with module.params['disks'].
vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
vm_params['VBDs'] = vm_vbd_params_list
vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params["userdevice"]))
vm_params["VBDs"] = vm_vbd_params_list
# VDIs.
for vm_vbd_params in vm_params['VBDs']:
if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
for vm_vbd_params in vm_params["VBDs"]:
if vm_vbd_params["VDI"] != "OpaqueRef:NULL":
vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params["VDI"])
else:
vm_vdi_params = {}
vm_vbd_params['VDI'] = vm_vdi_params
vm_vbd_params["VDI"] = vm_vdi_params
# VIFs.
vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params["VIFs"]]
# List of VIFs is usually sorted by device but we sort just
# in case. We need this list sorted by device so that we can
# make positional pairing with module.params['networks'].
vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
vm_params['VIFs'] = vm_vif_params_list
vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params["device"]))
vm_params["VIFs"] = vm_vif_params_list
# Networks.
for vm_vif_params in vm_params['VIFs']:
if vm_vif_params['network'] != "OpaqueRef:NULL":
vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
for vm_vif_params in vm_params["VIFs"]:
if vm_vif_params["network"] != "OpaqueRef:NULL":
vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params["network"])
else:
vm_network_params = {}
vm_vif_params['network'] = vm_network_params
vm_vif_params["network"] = vm_network_params
# Guest metrics.
if vm_params['guest_metrics'] != "OpaqueRef:NULL":
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
vm_params['guest_metrics'] = vm_guest_metrics
if vm_params["guest_metrics"] != "OpaqueRef:NULL":
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params["guest_metrics"])
vm_params["guest_metrics"] = vm_guest_metrics
else:
vm_params['guest_metrics'] = {}
vm_params["guest_metrics"] = {}
# Detect customization agent.
xenserver_version = get_xenserver_version(module)
if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
"feature-static-ip-setting" in vm_params['guest_metrics']['other']):
vm_params['customization_agent'] = "native"
if (
xenserver_version[0] >= 7
and xenserver_version[1] >= 0
and vm_params.get("guest_metrics")
and "feature-static-ip-setting" in vm_params["guest_metrics"]["other"]
):
vm_params["customization_agent"] = "native"
else:
vm_params['customization_agent'] = "custom"
vm_params["customization_agent"] = "custom"
except XenAPI.Failure as f:
module.fail_json(msg=f"XAPI ERROR: {f.details}")
@@ -420,88 +424,90 @@ def gather_vm_facts(module, vm_params):
# Gather facts.
vm_facts = {
"state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
"name": vm_params['name_label'],
"name_desc": vm_params['name_description'],
"uuid": vm_params['uuid'],
"is_template": vm_params['is_a_template'],
"folder": vm_params['other_config'].get('folder', ''),
"state": xapi_to_module_vm_power_state(vm_params["power_state"].lower()),
"name": vm_params["name_label"],
"name_desc": vm_params["name_description"],
"uuid": vm_params["uuid"],
"is_template": vm_params["is_a_template"],
"folder": vm_params["other_config"].get("folder", ""),
"hardware": {
"num_cpus": int(vm_params['VCPUs_max']),
"num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
"memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
"num_cpus": int(vm_params["VCPUs_max"]),
"num_cpu_cores_per_socket": int(vm_params["platform"].get("cores-per-socket", "1")),
"memory_mb": int(int(vm_params["memory_dynamic_max"]) / 1048576),
},
"disks": [],
"cdrom": {},
"networks": [],
"home_server": vm_params['affinity'].get('name_label', ''),
"domid": vm_params['domid'],
"platform": vm_params['platform'],
"other_config": vm_params['other_config'],
"xenstore_data": vm_params['xenstore_data'],
"customization_agent": vm_params['customization_agent'],
"home_server": vm_params["affinity"].get("name_label", ""),
"domid": vm_params["domid"],
"platform": vm_params["platform"],
"other_config": vm_params["other_config"],
"xenstore_data": vm_params["xenstore_data"],
"customization_agent": vm_params["customization_agent"],
}
for vm_vbd_params in vm_params['VBDs']:
if vm_vbd_params['type'] == "Disk":
vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
for vm_vbd_params in vm_params["VBDs"]:
if vm_vbd_params["type"] == "Disk":
vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params["VDI"]["SR"])
vm_disk_params = {
"size": int(vm_vbd_params['VDI']['virtual_size']),
"name": vm_vbd_params['VDI']['name_label'],
"name_desc": vm_vbd_params['VDI']['name_description'],
"sr": vm_disk_sr_params['name_label'],
"sr_uuid": vm_disk_sr_params['uuid'],
"os_device": vm_vbd_params['device'],
"vbd_userdevice": vm_vbd_params['userdevice'],
"size": int(vm_vbd_params["VDI"]["virtual_size"]),
"name": vm_vbd_params["VDI"]["name_label"],
"name_desc": vm_vbd_params["VDI"]["name_description"],
"sr": vm_disk_sr_params["name_label"],
"sr_uuid": vm_disk_sr_params["uuid"],
"os_device": vm_vbd_params["device"],
"vbd_userdevice": vm_vbd_params["userdevice"],
}
vm_facts['disks'].append(vm_disk_params)
elif vm_vbd_params['type'] == "CD":
if vm_vbd_params['empty']:
vm_facts['cdrom'].update(type="none")
vm_facts["disks"].append(vm_disk_params)
elif vm_vbd_params["type"] == "CD":
if vm_vbd_params["empty"]:
vm_facts["cdrom"].update(type="none")
else:
vm_facts['cdrom'].update(type="iso")
vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
vm_facts["cdrom"].update(type="iso")
vm_facts["cdrom"].update(iso_name=vm_vbd_params["VDI"]["name_label"])
for vm_vif_params in vm_params['VIFs']:
vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
for vm_vif_params in vm_params["VIFs"]:
vm_guest_metrics_networks = vm_params["guest_metrics"].get("networks", {})
vm_network_params = {
"name": vm_vif_params['network']['name_label'],
"mac": vm_vif_params['MAC'],
"vif_device": vm_vif_params['device'],
"mtu": vm_vif_params['MTU'],
"ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ''),
"name": vm_vif_params["network"]["name_label"],
"mac": vm_vif_params["MAC"],
"vif_device": vm_vif_params["device"],
"mtu": vm_vif_params["MTU"],
"ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ""),
"prefix": "",
"netmask": "",
"gateway": "",
"ip6": [vm_guest_metrics_networks[ipv6]
for ipv6 in sorted(vm_guest_metrics_networks.keys())
if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")],
"ip6": [
vm_guest_metrics_networks[ipv6]
for ipv6 in sorted(vm_guest_metrics_networks.keys())
if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")
],
"prefix6": "",
"gateway6": "",
}
if vm_params['customization_agent'] == "native":
if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
if vm_params["customization_agent"] == "native":
if vm_vif_params["ipv4_addresses"] and vm_vif_params["ipv4_addresses"][0]:
vm_network_params["prefix"] = vm_vif_params["ipv4_addresses"][0].split("/")[1]
vm_network_params["netmask"] = ip_prefix_to_netmask(vm_network_params["prefix"])
vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
vm_network_params["gateway"] = vm_vif_params["ipv4_gateway"]
if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
if vm_vif_params["ipv6_addresses"] and vm_vif_params["ipv6_addresses"][0]:
vm_network_params["prefix6"] = vm_vif_params["ipv6_addresses"][0].split("/")[1]
vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
vm_network_params["gateway6"] = vm_vif_params["ipv6_gateway"]
elif vm_params['customization_agent'] == "custom":
vm_xenstore_data = vm_params['xenstore_data']
elif vm_params["customization_agent"] == "custom":
vm_xenstore_data = vm_params["xenstore_data"]
for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
for f in ["prefix", "netmask", "gateway", "prefix6", "gateway6"]:
vm_network_params[f] = vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/{f}", "")
vm_facts['networks'].append(vm_network_params)
vm_facts["networks"].append(vm_network_params)
return vm_facts
@@ -535,7 +541,7 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300):
xapi_session = XAPI.connect(module)
power_state = power_state.replace('_', '').replace('-', '').lower()
power_state = power_state.replace("_", "").replace("-", "").lower()
vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
state_changed = False
@@ -697,7 +703,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300):
# consistent with module VM power states.
vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
if vm_power_state != 'poweredon':
if vm_power_state != "poweredon":
module.fail_json(msg=f"Cannot wait for VM IP address when VM is in state '{vm_power_state}'!")
interval = 2
@@ -714,7 +720,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300):
if vm_guest_metrics_ref != "OpaqueRef:NULL":
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
vm_ips = vm_guest_metrics['networks']
vm_ips = vm_guest_metrics["networks"]
if "0/ip" in vm_ips:
break
@@ -749,7 +755,10 @@ def get_xenserver_version(module):
host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
try:
xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
xenserver_version = [
int(version_number)
for version_number in xapi_session.xenapi.host.get_software_version(host_ref)["product_version"].split(".")
]
except ValueError:
xenserver_version = [0, 0, 0]
@@ -758,6 +767,7 @@ def get_xenserver_version(module):
class XAPI:
"""Class for XAPI session management."""
_xapi_session = None
@classmethod
@@ -779,15 +789,15 @@ class XAPI:
if cls._xapi_session is not None:
return cls._xapi_session
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
ignore_ssl = not module.params['validate_certs']
hostname = module.params["hostname"]
username = module.params["username"]
password = module.params["password"]
ignore_ssl = not module.params["validate_certs"]
if hostname == 'localhost':
if hostname == "localhost":
cls._xapi_session = XenAPI.xapi_local()
username = ''
password = ''
username = ""
password = ""
else:
# If scheme is not specified we default to http:// because https://
# is problematic in most setups.
@@ -806,10 +816,10 @@ class XAPI:
cls._xapi_session = XenAPI.Session(hostname)
if not password:
password = ''
password = ""
try:
cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, "Ansible")
except XenAPI.Failure as f:
module.fail_json(msg=f"Unable to log on to XenServer at {hostname} as {username}: {f.details}")

View File

@@ -12,16 +12,16 @@ from ansible_collections.community.general.plugins.module_utils.cmd_runner impor
def _values_fmt(values, value_types):
result = []
for value, value_type in zip(values, value_types):
if value_type == 'bool':
value = 'true' if boolean(value) else 'false'
result.extend(['--type', f'{value_type}', '--set', f'{value}'])
if value_type == "bool":
value = "true" if boolean(value) else "false"
result.extend(["--type", f"{value_type}", "--set", f"{value}"])
return result
def xfconf_runner(module, **kwargs):
runner = CmdRunner(
module,
command='xfconf-query',
command="xfconf-query",
arg_formats=dict(
channel=cmd_runner_fmt.as_opt_val("--channel"),
property=cmd_runner_fmt.as_opt_val("--property"),
@@ -32,7 +32,7 @@ def xfconf_runner(module, **kwargs):
values_and_types=_values_fmt,
version=cmd_runner_fmt.as_fixed("--version"),
),
**kwargs
**kwargs,
)
return runner